id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_1819_2
/* * linux/fs/ext4/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext4 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/time.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/dax.h> #include <linux/quotaops.h> #include <linux/pagevec.h> #include <linux/uio.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" /* * Called when an inode is released. Note that this is different * from ext4_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext4_release_file(struct inode *inode, struct file *filp) { if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { ext4_alloc_da_blocks(inode); ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); } /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1) && !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) ext4_htree_free_dir_info(filp->private_data); return 0; } static void ext4_unwritten_wait(struct inode *inode) { wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); } /* * This tests whether the IO in question is block-aligned or not. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they * are converted to written only after the IO is complete. Until they are * mapped, these blocks appear as holes, so dio_zero_block() will assume that * it needs to zero out portions of the start and/or end block. If 2 AIO * threads are at work on the same unwritten block, they must be synchronized * or one thread will zero the other's data, causing corruption. */ static int ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) { struct super_block *sb = inode->i_sb; int blockmask = sb->s_blocksize - 1; if (pos >= i_size_read(inode)) return 0; if ((pos | iov_iter_alignment(from)) & blockmask) return 1; return 0; } static ssize_t ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(iocb->ki_filp); struct mutex *aio_mutex = NULL; struct blk_plug plug; int o_direct = iocb->ki_flags & IOCB_DIRECT; int overwrite = 0; ssize_t ret; /* * Unaligned direct AIO must be serialized; see comment above * In the case of O_APPEND, assume that we must always serialize */ if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) && !is_sync_kiocb(iocb) && (iocb->ki_flags & IOCB_APPEND || ext4_unaligned_aio(inode, from, iocb->ki_pos))) { aio_mutex = ext4_aio_mutex(inode); mutex_lock(aio_mutex); ext4_unwritten_wait(inode); } mutex_lock(&inode->i_mutex); ret = generic_write_checks(iocb, from); if (ret <= 0) goto out; /* * If we have encountered a bitmap-format file, the size limit * is smaller than s_maxbytes, which is for extent-mapped files. */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) { ret = -EFBIG; goto out; } iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); } iocb->private = &overwrite; if (o_direct) { size_t length = iov_iter_count(from); loff_t pos = iocb->ki_pos; blk_start_plug(&plug); /* check whether we do a DIO overwrite or not */ if (ext4_should_dioread_nolock(inode) && !aio_mutex && !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) { struct ext4_map_blocks map; unsigned int blkbits = inode->i_blkbits; int err, len; map.m_lblk = pos >> blkbits; map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits) - map.m_lblk; len = map.m_len; err = ext4_map_blocks(NULL, inode, &map, 0); /* * 'err==len' means that all of blocks has * been preallocated no matter they are * initialized or not. For excluding * unwritten extents, we need to check * m_flags. There are two conditions that * indicate for initialized extents. 1) If we * hit extent cache, EXT4_MAP_MAPPED flag is * returned; 2) If we do a real lookup, * non-flags are returned. So we should check * these two conditions. */ if (err == len && (map.m_flags & EXT4_MAP_MAPPED)) overwrite = 1; } } ret = __generic_file_write_iter(iocb, from); mutex_unlock(&inode->i_mutex); if (ret > 0) { ssize_t err; err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0) ret = err; } if (o_direct) blk_finish_plug(&plug); if (aio_mutex) mutex_unlock(aio_mutex); return ret; out: mutex_unlock(&inode->i_mutex); if (aio_mutex) mutex_unlock(aio_mutex); return ret; } #ifdef CONFIG_FS_DAX static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate) { struct inode *inode = bh->b_assoc_map->host; /* XXX: breaks on 32-bit > 16TB. Is that even supported? */ loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits; int err; if (!uptodate) return; WARN_ON(!buffer_unwritten(bh)); err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size); } static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int result; handle_t *handle = NULL; struct super_block *sb = file_inode(vma->vm_file)->i_sb; bool write = vmf->flags & FAULT_FLAG_WRITE; if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, EXT4_DATA_TRANS_BLOCKS(sb)); } if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else result = __dax_fault(vma, vmf, ext4_get_block_dax, ext4_end_io_unwritten); if (write) { if (!IS_ERR(handle)) ext4_journal_stop(handle); sb_end_pagefault(sb); } return result; } static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) { int result; handle_t *handle = NULL; struct inode *inode = file_inode(vma->vm_file); struct super_block *sb = inode->i_sb; bool write = flags & FAULT_FLAG_WRITE; if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, ext4_chunk_trans_blocks(inode, PMD_SIZE / PAGE_SIZE)); } if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else result = __dax_pmd_fault(vma, addr, pmd, flags, ext4_get_block_dax, ext4_end_io_unwritten); if (write) { if (!IS_ERR(handle)) ext4_journal_stop(handle); sb_end_pagefault(sb); } return result; } static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { return dax_mkwrite(vma, vmf, ext4_get_block_dax, ext4_end_io_unwritten); } static const struct vm_operations_struct ext4_dax_vm_ops = { .fault = ext4_dax_fault, .pmd_fault = ext4_dax_pmd_fault, .page_mkwrite = ext4_dax_mkwrite, .pfn_mkwrite = dax_pfn_mkwrite, }; #else #define ext4_dax_vm_ops ext4_file_vm_ops #endif static const struct vm_operations_struct ext4_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = ext4_page_mkwrite, }; static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file->f_mapping->host; if (ext4_encrypted_inode(inode)) { int err = ext4_get_encryption_info(inode); if (err) return 0; if (ext4_encryption_info(inode) == NULL) return -ENOKEY; } file_accessed(file); if (IS_DAX(file_inode(file))) { vma->vm_ops = &ext4_dax_vm_ops; vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; } else { vma->vm_ops = &ext4_file_vm_ops; } return 0; } static int ext4_file_open(struct inode * inode, struct file * filp) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; int ret; if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && !(sb->s_flags & MS_RDONLY))) { sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; /* * Sample where the filesystem has been mounted and * store it in the superblock for sysadmin convenience * when trying to sort through large numbers of block * devices or filesystem images. */ memset(buf, 0, sizeof(buf)); path.mnt = mnt; path.dentry = mnt->mnt_root; cp = d_path(&path, buf, sizeof(buf)); if (!IS_ERR(cp)) { handle_t *handle; int err; handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); if (IS_ERR(handle)) return PTR_ERR(handle); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) { ext4_journal_stop(handle); return err; } strlcpy(sbi->s_es->s_last_mounted, cp, sizeof(sbi->s_es->s_last_mounted)); ext4_handle_dirty_super(handle, sb); ext4_journal_stop(handle); } } if (ext4_encrypted_inode(inode)) { ret = ext4_get_encryption_info(inode); if (ret) return -EACCES; if (ext4_encryption_info(inode) == NULL) return -ENOKEY; } /* * Set up the jbd2_inode if we are opening the inode for * writing and the journal is present */ if (filp->f_mode & FMODE_WRITE) { ret = ext4_inode_attach_jinode(inode); if (ret < 0) return ret; } return dquot_file_open(inode, filp); } /* * Here we use ext4_map_blocks() to get a block mapping for a extent-based * file rather than ext4_ext_walk_space() because we can introduce * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same * function. When extent status tree has been fully implemented, it will * track all extent status for a file and we can directly use it to * retrieve the offset for SEEK_DATA/SEEK_HOLE. */ /* * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to * lookup page cache to check whether or not there has some data between * [startoff, endoff] because, if this range contains an unwritten extent, * we determine this extent as a data or a hole according to whether the * page cache has data or not. */ static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, struct ext4_map_blocks *map, loff_t *offset) { struct pagevec pvec; unsigned int blkbits; pgoff_t index; pgoff_t end; loff_t endoff; loff_t startoff; loff_t lastoff; int found = 0; blkbits = inode->i_sb->s_blocksize_bits; startoff = *offset; lastoff = startoff; endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; index = startoff >> PAGE_CACHE_SHIFT; end = endoff >> PAGE_CACHE_SHIFT; pagevec_init(&pvec, 0); do { int i, num; unsigned long nr_pages; num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, (pgoff_t)num); if (nr_pages == 0) { if (whence == SEEK_DATA) break; BUG_ON(whence != SEEK_HOLE); /* * If this is the first time to go into the loop and * offset is not beyond the end offset, it will be a * hole at this offset */ if (lastoff == startoff || lastoff < endoff) found = 1; break; } /* * If this is the first time to go into the loop and * offset is smaller than the first page offset, it will be a * hole at this offset. */ if (lastoff == startoff && whence == SEEK_HOLE && lastoff < page_offset(pvec.pages[0])) { found = 1; break; } for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; struct buffer_head *bh, *head; /* * If the current offset is not beyond the end of given * range, it will be a hole. */ if (lastoff < endoff && whence == SEEK_HOLE && page->index > end) { found = 1; *offset = lastoff; goto out; } lock_page(page); if (unlikely(page->mapping != inode->i_mapping)) { unlock_page(page); continue; } if (!page_has_buffers(page)) { unlock_page(page); continue; } if (page_has_buffers(page)) { lastoff = page_offset(page); bh = head = page_buffers(page); do { if (buffer_uptodate(bh) || buffer_unwritten(bh)) { if (whence == SEEK_DATA) found = 1; } else { if (whence == SEEK_HOLE) found = 1; } if (found) { *offset = max_t(loff_t, startoff, lastoff); unlock_page(page); goto out; } lastoff += bh->b_size; bh = bh->b_this_page; } while (bh != head); } lastoff = page_offset(page) + PAGE_SIZE; unlock_page(page); } /* * The no. of pages is less than our desired, that would be a * hole in there. */ if (nr_pages < num && whence == SEEK_HOLE) { found = 1; *offset = lastoff; break; } index = pvec.pages[i - 1]->index + 1; pagevec_release(&pvec); } while (index <= end); out: pagevec_release(&pvec); return found; } /* * ext4_seek_data() retrieves the offset for SEEK_DATA. */ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) { struct inode *inode = file->f_mapping->host; struct ext4_map_blocks map; struct extent_status es; ext4_lblk_t start, last, end; loff_t dataoff, isize; int blkbits; int ret = 0; mutex_lock(&inode->i_mutex); isize = i_size_read(inode); if (offset >= isize) { mutex_unlock(&inode->i_mutex); return -ENXIO; } blkbits = inode->i_sb->s_blocksize_bits; start = offset >> blkbits; last = start; end = isize >> blkbits; dataoff = offset; do { map.m_lblk = last; map.m_len = end - last + 1; ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { if (last != start) dataoff = (loff_t)last << blkbits; break; } /* * If there is a delay extent at this offset, * it will be as a data. */ ext4_es_find_delayed_extent_range(inode, last, last, &es); if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { if (last != start) dataoff = (loff_t)last << blkbits; break; } /* * If there is a unwritten extent at this offset, * it will be as a data or a hole according to page * cache that has data or not. */ if (map.m_flags & EXT4_MAP_UNWRITTEN) { int unwritten; unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, &map, &dataoff); if (unwritten) break; } last++; dataoff = (loff_t)last << blkbits; } while (last <= end); mutex_unlock(&inode->i_mutex); if (dataoff > isize) return -ENXIO; return vfs_setpos(file, dataoff, maxsize); } /* * ext4_seek_hole() retrieves the offset for SEEK_HOLE. */ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) { struct inode *inode = file->f_mapping->host; struct ext4_map_blocks map; struct extent_status es; ext4_lblk_t start, last, end; loff_t holeoff, isize; int blkbits; int ret = 0; mutex_lock(&inode->i_mutex); isize = i_size_read(inode); if (offset >= isize) { mutex_unlock(&inode->i_mutex); return -ENXIO; } blkbits = inode->i_sb->s_blocksize_bits; start = offset >> blkbits; last = start; end = isize >> blkbits; holeoff = offset; do { map.m_lblk = last; map.m_len = end - last + 1; ret = ext4_map_blocks(NULL, inode, &map, 0); if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { last += ret; holeoff = (loff_t)last << blkbits; continue; } /* * If there is a delay extent at this offset, * we will skip this extent. */ ext4_es_find_delayed_extent_range(inode, last, last, &es); if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { last = es.es_lblk + es.es_len; holeoff = (loff_t)last << blkbits; continue; } /* * If there is a unwritten extent at this offset, * it will be as a data or a hole according to page * cache that has data or not. */ if (map.m_flags & EXT4_MAP_UNWRITTEN) { int unwritten; unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, &map, &holeoff); if (!unwritten) { last += ret; holeoff = (loff_t)last << blkbits; continue; } } /* find a hole */ break; } while (last <= end); mutex_unlock(&inode->i_mutex); if (holeoff > isize) holeoff = isize; return vfs_setpos(file, holeoff, maxsize); } /* * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values * by calling generic_file_llseek_size() with the appropriate maxbytes * value for each. */ loff_t ext4_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; else maxbytes = inode->i_sb->s_maxbytes; switch (whence) { case SEEK_SET: case SEEK_CUR: case SEEK_END: return generic_file_llseek_size(file, offset, whence, maxbytes, i_size_read(inode)); case SEEK_DATA: return ext4_seek_data(file, offset, maxbytes); case SEEK_HOLE: return ext4_seek_hole(file, offset, maxbytes); } return -EINVAL; } const struct file_operations ext4_file_operations = { .llseek = ext4_llseek, .read_iter = generic_file_read_iter, .write_iter = ext4_file_write_iter, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, #endif .mmap = ext4_file_mmap, .open = ext4_file_open, .release = ext4_release_file, .fsync = ext4_sync_file, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = ext4_fallocate, }; const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_getattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, .fiemap = ext4_fiemap, };
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1819_2
crossvul-cpp_data_good_3460_0
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 1997-2003 Erez Zadok * Copyright (C) 2001-2003 Stony Brook University * Copyright (C) 2004-2007 International Business Machines Corp. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> * Michael C. Thompson <mcthomps@us.ibm.com> * Tyler Hicks <tyhicks@ou.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/dcache.h> #include <linux/file.h> #include <linux/module.h> #include <linux/namei.h> #include <linux/skbuff.h> #include <linux/crypto.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/key.h> #include <linux/parser.h> #include <linux/fs_stack.h> #include <linux/slab.h> #include <linux/magic.h> #include "ecryptfs_kernel.h" /** * Module parameter that defines the ecryptfs_verbosity level. */ int ecryptfs_verbosity = 0; module_param(ecryptfs_verbosity, int, 0); MODULE_PARM_DESC(ecryptfs_verbosity, "Initial verbosity level (0 or 1; defaults to " "0, which is Quiet)"); /** * Module parameter that defines the number of message buffer elements */ unsigned int ecryptfs_message_buf_len = ECRYPTFS_DEFAULT_MSG_CTX_ELEMS; module_param(ecryptfs_message_buf_len, uint, 0); MODULE_PARM_DESC(ecryptfs_message_buf_len, "Number of message buffer elements"); /** * Module parameter that defines the maximum guaranteed amount of time to wait * for a response from ecryptfsd. The actual sleep time will be, more than * likely, a small amount greater than this specified value, but only less if * the message successfully arrives. */ signed long ecryptfs_message_wait_timeout = ECRYPTFS_MAX_MSG_CTX_TTL / HZ; module_param(ecryptfs_message_wait_timeout, long, 0); MODULE_PARM_DESC(ecryptfs_message_wait_timeout, "Maximum number of seconds that an operation will " "sleep while waiting for a message response from " "userspace"); /** * Module parameter that is an estimate of the maximum number of users * that will be concurrently using eCryptfs. Set this to the right * value to balance performance and memory use. */ unsigned int ecryptfs_number_of_users = ECRYPTFS_DEFAULT_NUM_USERS; module_param(ecryptfs_number_of_users, uint, 0); MODULE_PARM_DESC(ecryptfs_number_of_users, "An estimate of the number of " "concurrent users of eCryptfs"); void __ecryptfs_printk(const char *fmt, ...) { va_list args; va_start(args, fmt); if (fmt[1] == '7') { /* KERN_DEBUG */ if (ecryptfs_verbosity >= 1) vprintk(fmt, args); } else vprintk(fmt, args); va_end(args); } /** * ecryptfs_init_lower_file * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with * the lower dentry and the lower mount set * * eCryptfs only ever keeps a single open file for every lower * inode. All I/O operations to the lower inode occur through that * file. When the first eCryptfs dentry that interposes with the first * lower dentry for that inode is created, this function creates the * lower file struct and associates it with the eCryptfs * inode. When all eCryptfs files associated with the inode are released, the * file is closed. * * The lower file will be opened with read/write permissions, if * possible. Otherwise, it is opened read-only. * * This function does nothing if a lower file is already * associated with the eCryptfs inode. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_lower_file(struct dentry *dentry, struct file **lower_file) { const struct cred *cred = current_cred(); struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); int rc; rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt, cred); if (rc) { printk(KERN_ERR "Error opening lower file " "for lower_dentry [0x%p] and lower_mnt [0x%p]; " "rc = [%d]\n", lower_dentry, lower_mnt, rc); (*lower_file) = NULL; } return rc; } int ecryptfs_get_lower_file(struct dentry *dentry, struct inode *inode) { struct ecryptfs_inode_info *inode_info; int count, rc = 0; inode_info = ecryptfs_inode_to_private(inode); mutex_lock(&inode_info->lower_file_mutex); count = atomic_inc_return(&inode_info->lower_file_count); if (WARN_ON_ONCE(count < 1)) rc = -EINVAL; else if (count == 1) { rc = ecryptfs_init_lower_file(dentry, &inode_info->lower_file); if (rc) atomic_set(&inode_info->lower_file_count, 0); } mutex_unlock(&inode_info->lower_file_mutex); return rc; } void ecryptfs_put_lower_file(struct inode *inode) { struct ecryptfs_inode_info *inode_info; inode_info = ecryptfs_inode_to_private(inode); if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count, &inode_info->lower_file_mutex)) { fput(inode_info->lower_file); inode_info->lower_file = NULL; mutex_unlock(&inode_info->lower_file_mutex); } } enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes, ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata, ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig, ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes, ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only, ecryptfs_opt_check_dev_ruid, ecryptfs_opt_err }; static const match_table_t tokens = { {ecryptfs_opt_sig, "sig=%s"}, {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"}, {ecryptfs_opt_cipher, "cipher=%s"}, {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"}, {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"}, {ecryptfs_opt_passthrough, "ecryptfs_passthrough"}, {ecryptfs_opt_xattr_metadata, "ecryptfs_xattr_metadata"}, {ecryptfs_opt_encrypted_view, "ecryptfs_encrypted_view"}, {ecryptfs_opt_fnek_sig, "ecryptfs_fnek_sig=%s"}, {ecryptfs_opt_fn_cipher, "ecryptfs_fn_cipher=%s"}, {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"}, {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"}, {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"}, {ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"}, {ecryptfs_opt_err, NULL} }; static int ecryptfs_init_global_auth_toks( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { struct ecryptfs_global_auth_tok *global_auth_tok; struct ecryptfs_auth_tok *auth_tok; int rc = 0; list_for_each_entry(global_auth_tok, &mount_crypt_stat->global_auth_tok_list, mount_crypt_stat_list) { rc = ecryptfs_keyring_auth_tok_for_sig( &global_auth_tok->global_auth_tok_key, &auth_tok, global_auth_tok->sig); if (rc) { printk(KERN_ERR "Could not find valid key in user " "session keyring for sig specified in mount " "option: [%s]\n", global_auth_tok->sig); global_auth_tok->flags |= ECRYPTFS_AUTH_TOK_INVALID; goto out; } else { global_auth_tok->flags &= ~ECRYPTFS_AUTH_TOK_INVALID; up_write(&(global_auth_tok->global_auth_tok_key)->sem); } } out: return rc; } static void ecryptfs_init_mount_crypt_stat( struct ecryptfs_mount_crypt_stat *mount_crypt_stat) { memset((void *)mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat)); INIT_LIST_HEAD(&mount_crypt_stat->global_auth_tok_list); mutex_init(&mount_crypt_stat->global_auth_tok_list_mutex); mount_crypt_stat->flags |= ECRYPTFS_MOUNT_CRYPT_STAT_INITIALIZED; } /** * ecryptfs_parse_options * @sb: The ecryptfs super block * @options: The options passed to the kernel * @check_ruid: set to 1 if device uid should be checked against the ruid * * Parse mount options: * debug=N - ecryptfs_verbosity level for debug output * sig=XXX - description(signature) of the key to use * * Returns the dentry object of the lower-level (lower/interposed) * directory; We want to mount our stackable file system on top of * that lower directory. * * The signature of the key to use must be the description of a key * already in the keyring. Mounting will fail if the key can not be * found. * * Returns zero on success; non-zero on error */ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, uid_t *check_ruid) { char *p; int rc = 0; int sig_set = 0; int cipher_name_set = 0; int fn_cipher_name_set = 0; int cipher_key_bytes; int cipher_key_bytes_set = 0; int fn_cipher_key_bytes; int fn_cipher_key_bytes_set = 0; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = &sbi->mount_crypt_stat; substring_t args[MAX_OPT_ARGS]; int token; char *sig_src; char *cipher_name_dst; char *cipher_name_src; char *fn_cipher_name_dst; char *fn_cipher_name_src; char *fnek_dst; char *fnek_src; char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; *check_ruid = 0; if (!options) { rc = -EINVAL; goto out; } ecryptfs_init_mount_crypt_stat(mount_crypt_stat); while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case ecryptfs_opt_sig: case ecryptfs_opt_ecryptfs_sig: sig_src = args[0].from; rc = ecryptfs_add_global_auth_tok(mount_crypt_stat, sig_src, 0); if (rc) { printk(KERN_ERR "Error attempting to register " "global sig; rc = [%d]\n", rc); goto out; } sig_set = 1; break; case ecryptfs_opt_cipher: case ecryptfs_opt_ecryptfs_cipher: cipher_name_src = args[0].from; cipher_name_dst = mount_crypt_stat-> global_default_cipher_name; strncpy(cipher_name_dst, cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; cipher_name_set = 1; break; case ecryptfs_opt_ecryptfs_key_bytes: cipher_key_bytes_src = args[0].from; cipher_key_bytes = (int)simple_strtol(cipher_key_bytes_src, &cipher_key_bytes_src, 0); mount_crypt_stat->global_default_cipher_key_size = cipher_key_bytes; cipher_key_bytes_set = 1; break; case ecryptfs_opt_passthrough: mount_crypt_stat->flags |= ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED; break; case ecryptfs_opt_xattr_metadata: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; break; case ecryptfs_opt_encrypted_view: mount_crypt_stat->flags |= ECRYPTFS_XATTR_METADATA_ENABLED; mount_crypt_stat->flags |= ECRYPTFS_ENCRYPTED_VIEW_ENABLED; break; case ecryptfs_opt_fnek_sig: fnek_src = args[0].from; fnek_dst = mount_crypt_stat->global_default_fnek_sig; strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX); mount_crypt_stat->global_default_fnek_sig[ ECRYPTFS_SIG_SIZE_HEX] = '\0'; rc = ecryptfs_add_global_auth_tok( mount_crypt_stat, mount_crypt_stat->global_default_fnek_sig, ECRYPTFS_AUTH_TOK_FNEK); if (rc) { printk(KERN_ERR "Error attempting to register " "global fnek sig [%s]; rc = [%d]\n", mount_crypt_stat->global_default_fnek_sig, rc); goto out; } mount_crypt_stat->flags |= (ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES | ECRYPTFS_GLOBAL_ENCFN_USE_MOUNT_FNEK); break; case ecryptfs_opt_fn_cipher: fn_cipher_name_src = args[0].from; fn_cipher_name_dst = mount_crypt_stat->global_default_fn_cipher_name; strncpy(fn_cipher_name_dst, fn_cipher_name_src, ECRYPTFS_MAX_CIPHER_NAME_SIZE); mount_crypt_stat->global_default_fn_cipher_name[ ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0'; fn_cipher_name_set = 1; break; case ecryptfs_opt_fn_cipher_key_bytes: fn_cipher_key_bytes_src = args[0].from; fn_cipher_key_bytes = (int)simple_strtol(fn_cipher_key_bytes_src, &fn_cipher_key_bytes_src, 0); mount_crypt_stat->global_default_fn_cipher_key_bytes = fn_cipher_key_bytes; fn_cipher_key_bytes_set = 1; break; case ecryptfs_opt_unlink_sigs: mount_crypt_stat->flags |= ECRYPTFS_UNLINK_SIGS; break; case ecryptfs_opt_mount_auth_tok_only: mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; break; case ecryptfs_opt_check_dev_ruid: *check_ruid = 1; break; case ecryptfs_opt_err: default: printk(KERN_WARNING "%s: eCryptfs: unrecognized option [%s]\n", __func__, p); } } if (!sig_set) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "You must supply at least one valid " "auth tok signature as a mount " "parameter; see the eCryptfs README\n"); goto out; } if (!cipher_name_set) { int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); strcpy(mount_crypt_stat->global_default_cipher_name, ECRYPTFS_DEFAULT_CIPHER); } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_name_set) strcpy(mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_cipher_name); if (!cipher_key_bytes_set) mount_crypt_stat->global_default_cipher_key_size = 0; if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !fn_cipher_key_bytes_set) mount_crypt_stat->global_default_fn_cipher_key_bytes = mount_crypt_stat->global_default_cipher_key_size; mutex_lock(&key_tfm_list_mutex); if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_cipher_name, mount_crypt_stat->global_default_cipher_key_size, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) && !ecryptfs_tfm_exists( mount_crypt_stat->global_default_fn_cipher_name, NULL)) { rc = ecryptfs_add_new_key_tfm( NULL, mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes); if (rc) { printk(KERN_ERR "Error attempting to initialize " "cipher with name = [%s] and key size = [%td]; " "rc = [%d]\n", mount_crypt_stat->global_default_fn_cipher_name, mount_crypt_stat->global_default_fn_cipher_key_bytes, rc); rc = -EINVAL; mutex_unlock(&key_tfm_list_mutex); goto out; } } mutex_unlock(&key_tfm_list_mutex); rc = ecryptfs_init_global_auth_toks(mount_crypt_stat); if (rc) printk(KERN_WARNING "One or more global auth toks could not " "properly register; rc = [%d]\n", rc); out: return rc; } struct kmem_cache *ecryptfs_sb_info_cache; static struct file_system_type ecryptfs_fs_type; /** * ecryptfs_get_sb * @fs_type * @flags * @dev_name: The path to mount over * @raw_data: The options passed into the kernel */ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { struct super_block *s; struct ecryptfs_sb_info *sbi; struct ecryptfs_dentry_info *root_info; const char *err = "Getting sb failed"; struct inode *inode; struct path path; uid_t check_ruid; int rc; sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); if (!sbi) { rc = -ENOMEM; goto out; } rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); if (rc) { err = "Error parsing options"; goto out; } s = sget(fs_type, NULL, set_anon_super, NULL); if (IS_ERR(s)) { rc = PTR_ERR(s); goto out; } s->s_flags = flags; rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); if (rc) goto out1; ecryptfs_set_superblock_private(s, sbi); s->s_bdi = &sbi->bdi; /* ->kill_sb() will take care of sbi after that point */ sbi = NULL; s->s_op = &ecryptfs_sops; s->s_d_op = &ecryptfs_dops; err = "Reading sb failed"; rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); if (rc) { ecryptfs_printk(KERN_WARNING, "kern_path() failed\n"); goto out1; } if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { rc = -EINVAL; printk(KERN_ERR "Mount on filesystem of type " "eCryptfs explicitly disallowed due to " "known incompatibilities\n"); goto out_free; } if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) { rc = -EPERM; printk(KERN_ERR "Mount of device (uid: %d) not owned by " "requested user (uid: %d)\n", path.dentry->d_inode->i_uid, current_uid()); goto out_free; } ecryptfs_set_superblock_lower(s, path.dentry->d_sb); s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; s->s_magic = ECRYPTFS_SUPER_MAGIC; inode = ecryptfs_get_inode(path.dentry->d_inode, s); rc = PTR_ERR(inode); if (IS_ERR(inode)) goto out_free; s->s_root = d_alloc_root(inode); if (!s->s_root) { iput(inode); rc = -ENOMEM; goto out_free; } rc = -ENOMEM; root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL); if (!root_info) goto out_free; /* ->kill_sb() will take care of root_info */ ecryptfs_set_dentry_private(s->s_root, root_info); ecryptfs_set_dentry_lower(s->s_root, path.dentry); ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt); s->s_flags |= MS_ACTIVE; return dget(s->s_root); out_free: path_put(&path); out1: deactivate_locked_super(s); out: if (sbi) { ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat); kmem_cache_free(ecryptfs_sb_info_cache, sbi); } printk(KERN_ERR "%s; rc = [%d]\n", err, rc); return ERR_PTR(rc); } /** * ecryptfs_kill_block_super * @sb: The ecryptfs super block * * Used to bring the superblock down and free the private data. */ static void ecryptfs_kill_block_super(struct super_block *sb) { struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb); kill_anon_super(sb); if (!sb_info) return; ecryptfs_destroy_mount_crypt_stat(&sb_info->mount_crypt_stat); bdi_destroy(&sb_info->bdi); kmem_cache_free(ecryptfs_sb_info_cache, sb_info); } static struct file_system_type ecryptfs_fs_type = { .owner = THIS_MODULE, .name = "ecryptfs", .mount = ecryptfs_mount, .kill_sb = ecryptfs_kill_block_super, .fs_flags = 0 }; /** * inode_info_init_once * * Initializes the ecryptfs_inode_info_cache when it is created */ static void inode_info_init_once(void *vptr) { struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; inode_init_once(&ei->vfs_inode); } static struct ecryptfs_cache_info { struct kmem_cache **cache; const char *name; size_t size; void (*ctor)(void *obj); } ecryptfs_cache_infos[] = { { .cache = &ecryptfs_auth_tok_list_item_cache, .name = "ecryptfs_auth_tok_list_item", .size = sizeof(struct ecryptfs_auth_tok_list_item), }, { .cache = &ecryptfs_file_info_cache, .name = "ecryptfs_file_cache", .size = sizeof(struct ecryptfs_file_info), }, { .cache = &ecryptfs_dentry_info_cache, .name = "ecryptfs_dentry_info_cache", .size = sizeof(struct ecryptfs_dentry_info), }, { .cache = &ecryptfs_inode_info_cache, .name = "ecryptfs_inode_cache", .size = sizeof(struct ecryptfs_inode_info), .ctor = inode_info_init_once, }, { .cache = &ecryptfs_sb_info_cache, .name = "ecryptfs_sb_cache", .size = sizeof(struct ecryptfs_sb_info), }, { .cache = &ecryptfs_header_cache, .name = "ecryptfs_headers", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_xattr_cache, .name = "ecryptfs_xattr_cache", .size = PAGE_CACHE_SIZE, }, { .cache = &ecryptfs_key_record_cache, .name = "ecryptfs_key_record_cache", .size = sizeof(struct ecryptfs_key_record), }, { .cache = &ecryptfs_key_sig_cache, .name = "ecryptfs_key_sig_cache", .size = sizeof(struct ecryptfs_key_sig), }, { .cache = &ecryptfs_global_auth_tok_cache, .name = "ecryptfs_global_auth_tok_cache", .size = sizeof(struct ecryptfs_global_auth_tok), }, { .cache = &ecryptfs_key_tfm_cache, .name = "ecryptfs_key_tfm_cache", .size = sizeof(struct ecryptfs_key_tfm), }, { .cache = &ecryptfs_open_req_cache, .name = "ecryptfs_open_req_cache", .size = sizeof(struct ecryptfs_open_req), }, }; static void ecryptfs_free_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; if (*(info->cache)) kmem_cache_destroy(*(info->cache)); } } /** * ecryptfs_init_kmem_caches * * Returns zero on success; non-zero otherwise */ static int ecryptfs_init_kmem_caches(void) { int i; for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) { struct ecryptfs_cache_info *info; info = &ecryptfs_cache_infos[i]; *(info->cache) = kmem_cache_create(info->name, info->size, 0, SLAB_HWCACHE_ALIGN, info->ctor); if (!*(info->cache)) { ecryptfs_free_kmem_caches(); ecryptfs_printk(KERN_WARNING, "%s: " "kmem_cache_create failed\n", info->name); return -ENOMEM; } } return 0; } static struct kobject *ecryptfs_kobj; static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buff) { return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK); } static struct kobj_attribute version_attr = __ATTR_RO(version); static struct attribute *attributes[] = { &version_attr.attr, NULL, }; static struct attribute_group attr_group = { .attrs = attributes, }; static int do_sysfs_registration(void) { int rc; ecryptfs_kobj = kobject_create_and_add("ecryptfs", fs_kobj); if (!ecryptfs_kobj) { printk(KERN_ERR "Unable to create ecryptfs kset\n"); rc = -ENOMEM; goto out; } rc = sysfs_create_group(ecryptfs_kobj, &attr_group); if (rc) { printk(KERN_ERR "Unable to create ecryptfs version attributes\n"); kobject_put(ecryptfs_kobj); } out: return rc; } static void do_sysfs_unregistration(void) { sysfs_remove_group(ecryptfs_kobj, &attr_group); kobject_put(ecryptfs_kobj); } static int __init ecryptfs_init(void) { int rc; if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " "eCryptfs cannot run on this system. The " "default eCryptfs extent size is [%u] bytes; " "the page size is [%lu] bytes.\n", ECRYPTFS_DEFAULT_EXTENT_SIZE, (unsigned long)PAGE_CACHE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); if (rc) { printk(KERN_ERR "Failed to allocate one or more kmem_cache objects\n"); goto out; } rc = register_filesystem(&ecryptfs_fs_type); if (rc) { printk(KERN_ERR "Failed to register filesystem\n"); goto out_free_kmem_caches; } rc = do_sysfs_registration(); if (rc) { printk(KERN_ERR "sysfs registration failed\n"); goto out_unregister_filesystem; } rc = ecryptfs_init_kthread(); if (rc) { printk(KERN_ERR "%s: kthread initialization failed; " "rc = [%d]\n", __func__, rc); goto out_do_sysfs_unregistration; } rc = ecryptfs_init_messaging(); if (rc) { printk(KERN_ERR "Failure occurred while attempting to " "initialize the communications channel to " "ecryptfsd\n"); goto out_destroy_kthread; } rc = ecryptfs_init_crypto(); if (rc) { printk(KERN_ERR "Failure whilst attempting to init crypto; " "rc = [%d]\n", rc); goto out_release_messaging; } if (ecryptfs_verbosity > 0) printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values " "will be written to the syslog!\n", ecryptfs_verbosity); goto out; out_release_messaging: ecryptfs_release_messaging(); out_destroy_kthread: ecryptfs_destroy_kthread(); out_do_sysfs_unregistration: do_sysfs_unregistration(); out_unregister_filesystem: unregister_filesystem(&ecryptfs_fs_type); out_free_kmem_caches: ecryptfs_free_kmem_caches(); out: return rc; } static void __exit ecryptfs_exit(void) { int rc; rc = ecryptfs_destroy_crypto(); if (rc) printk(KERN_ERR "Failure whilst attempting to destroy crypto; " "rc = [%d]\n", rc); ecryptfs_release_messaging(); ecryptfs_destroy_kthread(); do_sysfs_unregistration(); unregister_filesystem(&ecryptfs_fs_type); ecryptfs_free_kmem_caches(); } MODULE_AUTHOR("Michael A. Halcrow <mhalcrow@us.ibm.com>"); MODULE_DESCRIPTION("eCryptfs"); MODULE_LICENSE("GPL"); module_init(ecryptfs_init) module_exit(ecryptfs_exit)
./CrossVul/dataset_final_sorted/CWE-362/c/good_3460_0
crossvul-cpp_data_good_1803_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * * This file is part of the SCTP kernel implementation * * These functions work with the state functions in sctp_sm_statefuns.c * to implement that state operations. These functions implement the * steps which require modifying existing data structures. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@austin.ibm.com> * Hui Huang <hui.huang@nokia.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/skbuff.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/ip.h> #include <linux/gfp.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp); static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp); static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, struct sctp_transport *t); /******************************************************************** * Helper functions ********************************************************************/ /* A helper function for delayed processing of INET ECN CE bit. */ static void sctp_do_ecn_ce_work(struct sctp_association *asoc, __u32 lowest_tsn) { /* Save the TSN away for comparison when we receive CWR */ asoc->last_ecne_tsn = lowest_tsn; asoc->need_ecne = 1; } /* Helper function for delayed processing of SCTP ECNE chunk. */ /* RFC 2960 Appendix A * * RFC 2481 details a specific bit for a sender to send in * the header of its next outbound TCP segment to indicate to * its peer that it has reduced its congestion window. This * is termed the CWR bit. For SCTP the same indication is made * by including the CWR chunk. This chunk contains one data * element, i.e. the TSN number that was sent in the ECNE chunk. * This element represents the lowest TSN number in the datagram * that was originally marked with the CE bit. */ static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, __u32 lowest_tsn, struct sctp_chunk *chunk) { struct sctp_chunk *repl; /* Our previously transmitted packet ran into some congestion * so we should take action by reducing cwnd and ssthresh * and then ACK our peer that we we've done so by * sending a CWR. */ /* First, try to determine if we want to actually lower * our cwnd variables. Only lower them if the ECNE looks more * recent than the last response. */ if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { struct sctp_transport *transport; /* Find which transport's congestion variables * need to be adjusted. */ transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); /* Update the congestion variables. */ if (transport) sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_ECNE); asoc->last_cwr_tsn = lowest_tsn; } /* Always try to quiet the other end. In case of lost CWR, * resend last_cwr_tsn. */ repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); /* If we run out of memory, it will look like a lost CWR. We'll * get back in sync eventually. */ return repl; } /* Helper function to do delayed processing of ECN CWR chunk. */ static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, __u32 lowest_tsn) { /* Turn off ECNE getting auto-prepended to every outgoing * packet */ asoc->need_ecne = 0; } /* Generate SACK if necessary. We call this at the end of a packet. */ static int sctp_gen_sack(struct sctp_association *asoc, int force, sctp_cmd_seq_t *commands) { __u32 ctsn, max_tsn_seen; struct sctp_chunk *sack; struct sctp_transport *trans = asoc->peer.last_data_from; int error = 0; if (force || (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) asoc->peer.sack_needed = 1; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); /* From 12.2 Parameters necessary per association (i.e. the TCB): * * Ack State : This flag indicates if the next received packet * : is to be responded to with a SACK. ... * : When DATA chunks are out of order, SACK's * : are not delayed (see Section 6). * * [This is actually not mentioned in Section 6, but we * implement it here anyway. --piggy] */ if (max_tsn_seen != ctsn) asoc->peer.sack_needed = 1; /* From 6.2 Acknowledgement on Reception of DATA Chunks: * * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, * an acknowledgement SHOULD be generated for at least every * second packet (not every second DATA chunk) received, and * SHOULD be generated within 200 ms of the arrival of any * unacknowledged DATA chunk. ... */ if (!asoc->peer.sack_needed) { asoc->peer.sack_cnt++; /* Set the SACK delay timeout based on the * SACK delay for the last transport * data was received from, or the default * for the association. */ if (trans) { /* We will need a SACK for the next packet. */ if (asoc->peer.sack_cnt >= trans->sackfreq - 1) asoc->peer.sack_needed = 1; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = trans->sackdelay; } else { /* We will need a SACK for the next packet. */ if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) asoc->peer.sack_needed = 1; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; } /* Restart the SACK timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); } else { asoc->a_rwnd = asoc->rwnd; sack = sctp_make_sack(asoc); if (!sack) goto nomem; asoc->peer.sack_needed = 0; asoc->peer.sack_cnt = 0; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); /* Stop the SACK timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); } return error; nomem: error = -ENOMEM; return error; } /* When the T3-RTX timer expires, it calls this function to create the * relevant state machine event. */ void sctp_generate_t3_rtx_event(unsigned long peer) { int error; struct sctp_transport *transport = (struct sctp_transport *) peer; struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); /* Check whether a task is in the sock. */ bh_lock_sock(sk); if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) sctp_transport_hold(transport); goto out_unlock; } /* Is this transport really dead and just waiting around for * the timer to let go of the reference? */ if (transport->dead) goto out_unlock; /* Run through the state machine. */ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); if (error) sk->sk_err = -error; out_unlock: bh_unlock_sock(sk); sctp_transport_put(transport); } /* This is a sa interface for producing timeout events. It works * for timeouts which use the association as their parameter. */ static void sctp_generate_timeout_event(struct sctp_association *asoc, sctp_event_timeout_t timeout_type) { struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); int error = 0; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy: timer %d\n", __func__, timeout_type); /* Try again later. */ if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) sctp_association_hold(asoc); goto out_unlock; } /* Is this association really dead and just waiting around for * the timer to let go of the reference? */ if (asoc->base.dead) goto out_unlock; /* Run through the state machine. */ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(timeout_type), asoc->state, asoc->ep, asoc, (void *)timeout_type, GFP_ATOMIC); if (error) sk->sk_err = -error; out_unlock: bh_unlock_sock(sk); sctp_association_put(asoc); } static void sctp_generate_t1_cookie_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); } static void sctp_generate_t1_init_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); } static void sctp_generate_t2_shutdown_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); } static void sctp_generate_t4_rto_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); } static void sctp_generate_t5_shutdown_guard_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *)data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); } /* sctp_generate_t5_shutdown_guard_event() */ static void sctp_generate_autoclose_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); } /* Generate a heart beat event. If the sock is busy, reschedule. Make * sure that the transport is still valid. */ void sctp_generate_heartbeat_event(unsigned long data) { int error = 0; struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) sctp_transport_hold(transport); goto out_unlock; } /* Is this structure just waiting around for us to actually * get destroyed? */ if (transport->dead) goto out_unlock; error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); if (error) sk->sk_err = -error; out_unlock: bh_unlock_sock(sk); sctp_transport_put(transport); } /* Handle the timeout of the ICMP protocol unreachable timer. Trigger * the correct state machine transition that will close the association. */ void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ if (!mod_timer(&transport->proto_unreach_timer, jiffies + (HZ/20))) sctp_association_hold(asoc); goto out_unlock; } /* Is this structure just waiting around for us to actually * get destroyed? */ if (asoc->base.dead) goto out_unlock; sctp_do_sm(net, SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: bh_unlock_sock(sk); sctp_association_put(asoc); } /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(unsigned long data) { struct sctp_association *asoc = (struct sctp_association *) data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); } sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { NULL, sctp_generate_t1_cookie_event, sctp_generate_t1_init_event, sctp_generate_t2_shutdown_event, NULL, sctp_generate_t4_rto_event, sctp_generate_t5_shutdown_guard_event, NULL, sctp_generate_sack_event, sctp_generate_autoclose_event, }; /* RFC 2960 8.2 Path Failure Detection * * When its peer endpoint is multi-homed, an endpoint should keep a * error counter for each of the destination transport addresses of the * peer endpoint. * * Each time the T3-rtx timer expires on any address, or when a * HEARTBEAT sent to an idle address is not acknowledged within a RTO, * the error counter of that destination address will be incremented. * When the value in the error counter exceeds the protocol parameter * 'Path.Max.Retrans' of that destination address, the endpoint should * mark the destination transport address as inactive, and a * notification SHOULD be sent to the upper layer. * */ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, struct sctp_association *asoc, struct sctp_transport *transport, int is_hb) { /* The check for association's overall error counter exceeding the * threshold is done in the state function. */ /* We are here due to a timer expiration. If the timer was * not a HEARTBEAT, then normal error tracking is done. * If the timer was a heartbeat, we only increment error counts * when we already have an outstanding HEARTBEAT that has not * been acknowledged. * Additionally, some tranport states inhibit error increments. */ if (!is_hb) { asoc->overall_error_count++; if (transport->state != SCTP_INACTIVE) transport->error_count++; } else if (transport->hb_sent) { if (transport->state != SCTP_UNCONFIRMED) asoc->overall_error_count++; if (transport->state != SCTP_INACTIVE) transport->error_count++; } /* If the transport error count is greater than the pf_retrans * threshold, and less than pathmaxrtx, and if the current state * is SCTP_ACTIVE, then mark this transport as Partially Failed, * see SCTP Quick Failover Draft, section 5.1 */ if ((transport->state == SCTP_ACTIVE) && (asoc->pf_retrans < transport->pathmaxrxt) && (transport->error_count > asoc->pf_retrans)) { sctp_assoc_control_transport(asoc, transport, SCTP_TRANSPORT_PF, 0); /* Update the hb timer to resend a heartbeat every rto */ sctp_cmd_hb_timer_update(commands, transport); } if (transport->state != SCTP_INACTIVE && (transport->error_count > transport->pathmaxrxt)) { pr_debug("%s: association:%p transport addr:%pISpc failed\n", __func__, asoc, &transport->ipaddr.sa); sctp_assoc_control_transport(asoc, transport, SCTP_TRANSPORT_DOWN, SCTP_FAILED_THRESHOLD); } /* E2) For the destination address for which the timer * expires, set RTO <- RTO * 2 ("back off the timer"). The * maximum value discussed in rule C7 above (RTO.max) may be * used to provide an upper bound to this doubling operation. * * Special Case: the first HB doesn't trigger exponential backoff. * The first unacknowledged HB triggers it. We do this with a flag * that indicates that we have an outstanding HB. */ if (!is_hb || transport->hb_sent) { transport->rto = min((transport->rto * 2), transport->asoc->rto_max); sctp_max_rto(asoc, transport); } } /* Worker routine to handle INIT command failure. */ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, unsigned int error) { struct sctp_ulpevent *event; event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); /* SEND_FAILED sent later when cleaning up the association. */ asoc->outqueue.error = error; sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); } /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, sctp_event_t event_type, sctp_subtype_t subtype, struct sctp_chunk *chunk, unsigned int error) { struct sctp_ulpevent *event; struct sctp_chunk *abort; /* Cancel any partial delivery in progress. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, (__u16)error, 0, 0, chunk, GFP_ATOMIC); else event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, (__u16)error, 0, 0, NULL, GFP_ATOMIC); if (event) sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); if (asoc->overall_error_count >= asoc->max_retrans) { abort = sctp_make_violation_max_retrans(asoc, chunk); if (abort) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); } sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); /* SEND_FAILED sent later when cleaning up the association. */ asoc->outqueue.error = error; sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); } /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT * inside the cookie. In reality, this is only used for INIT-ACK processing * since all other cases use "temporary" associations and can do all * their work in statefuns directly. */ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_init_chunk_t *peer_init, gfp_t gfp) { int error; /* We only process the init as a sideeffect in a single * case. This is when we process the INIT-ACK. If we * fail during INIT processing (due to malloc problems), * just return the error and stop processing the stack. */ if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) error = -ENOMEM; else error = 0; return error; } /* Helper function to break out starting up of heartbeat timers. */ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sctp_transport *t; /* Start a heartbeat timer for each transport on the association. * hold a reference on the transport to make sure none of * the needed data structures go away. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); } } static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sctp_transport *t; /* Stop all heartbeat timers. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (del_timer(&t->hb_timer)) sctp_transport_put(t); } } /* Helper function to stop any pending T3-RTX timers */ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sctp_transport *t; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (del_timer(&t->T3_rtx_timer)) sctp_transport_put(t); } } /* Helper function to update the heartbeat timer. */ static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, struct sctp_transport *t) { /* Update the heartbeat timer. */ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); } /* Helper function to handle the reception of an HEARTBEAT ACK. */ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_transport *t, struct sctp_chunk *chunk) { sctp_sender_hb_info_t *hbinfo; int was_unconfirmed = 0; /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the * HEARTBEAT should clear the error counter of the destination * transport address to which the HEARTBEAT was sent. */ t->error_count = 0; /* * Although RFC4960 specifies that the overall error count must * be cleared when a HEARTBEAT ACK is received, we make an * exception while in SHUTDOWN PENDING. If the peer keeps its * window shut forever, we may never be able to transmit our * outstanding data and rely on the retransmission limit be reached * to shutdown the association. */ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) t->asoc->overall_error_count = 0; /* Clear the hb_sent flag to signal that we had a good * acknowledgement. */ t->hb_sent = 0; /* Mark the destination transport address as active if it is not so * marked. */ if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { was_unconfirmed = 1; sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, SCTP_HEARTBEAT_SUCCESS); } if (t->state == SCTP_PF) sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, SCTP_HEARTBEAT_SUCCESS); /* HB-ACK was received for a the proper HB. Consider this * forward progress. */ if (t->dst) dst_confirm(t->dst); /* The receiver of the HEARTBEAT ACK should also perform an * RTT measurement for that destination transport address * using the time value carried in the HEARTBEAT ACK chunk. * If the transport's rto_pending variable has been cleared, * it was most likely due to a retransmit. However, we want * to re-enable it to properly update the rto. */ if (t->rto_pending == 0) t->rto_pending = 1; hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); /* Update the heartbeat timer. */ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); if (was_unconfirmed && asoc->peer.transport_count == 1) sctp_transport_immediate_rtx(t); } /* Helper function to process the process SACK command. */ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { int err = 0; if (sctp_outq_sack(&asoc->outqueue, chunk)) { struct net *net = sock_net(asoc->base.sk); /* There are no more TSNs awaiting SACK. */ err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), asoc->state, asoc->ep, asoc, NULL, GFP_ATOMIC); } return err; } /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set * the transport for a shutdown chunk. */ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { struct sctp_transport *t; if (chunk->transport) t = chunk->transport; else { t = sctp_assoc_choose_alter_transport(asoc, asoc->shutdown_last_sent_to); chunk->transport = t; } asoc->shutdown_last_sent_to = t; asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; } /* Helper function to change the state of an association. */ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, sctp_state_t state) { struct sock *sk = asoc->base.sk; asoc->state = state; pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); if (sctp_style(sk, TCP)) { /* Change the sk->sk_state of a TCP-style socket that has * successfully completed a connect() call. */ if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) sk->sk_state = SCTP_SS_ESTABLISHED; /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ if (sctp_state(asoc, SHUTDOWN_RECEIVED) && sctp_sstate(sk, ESTABLISHED)) sk->sk_shutdown |= RCV_SHUTDOWN; } if (sctp_state(asoc, COOKIE_WAIT)) { /* Reset init timeouts since they may have been * increased due to timer expirations. */ asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; } if (sctp_state(asoc, ESTABLISHED) || sctp_state(asoc, CLOSED) || sctp_state(asoc, SHUTDOWN_RECEIVED)) { /* Wake up any processes waiting in the asoc's wait queue in * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). */ if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); /* Wake up any processes waiting in the sk's sleep queue of * a TCP-style or UDP-style peeled-off socket in * sctp_wait_for_accept() or sctp_wait_for_packet(). * For a UDP-style socket, the waiters are woken up by the * notifications. */ if (!sctp_style(sk, UDP)) sk->sk_state_change(sk); } } /* Helper function to delete an association. */ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; /* If it is a non-temporary association belonging to a TCP-style * listening socket that is not closed, do not free it so that accept() * can pick it up later. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) return; sctp_unhash_established(asoc); sctp_association_free(asoc); } /* * ADDIP Section 4.1 ASCONF Chunk Procedures * A4) Start a T-4 RTO timer, using the RTO value of the selected * destination address (we use active path instead of primary path just * because primary path may be inactive. */ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { struct sctp_transport *t; t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; chunk->transport = t; } /* Process an incoming Operation Error Chunk. */ static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { struct sctp_errhdr *err_hdr; struct sctp_ulpevent *ev; while (chunk->chunk_end > chunk->skb->data) { err_hdr = (struct sctp_errhdr *)(chunk->skb->data); ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, GFP_ATOMIC); if (!ev) return; sctp_ulpq_tail_event(&asoc->ulpq, ev); switch (err_hdr->cause) { case SCTP_ERROR_UNKNOWN_CHUNK: { sctp_chunkhdr_t *unk_chunk_hdr; unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; switch (unk_chunk_hdr->type) { /* ADDIP 4.1 A9) If the peer responds to an ASCONF with * an ERROR chunk reporting that it did not recognized * the ASCONF chunk type, the sender of the ASCONF MUST * NOT send any further ASCONF chunks and MUST stop its * T-4 timer. */ case SCTP_CID_ASCONF: if (asoc->peer.asconf_capable == 0) break; asoc->peer.asconf_capable = 0; sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); break; default: break; } break; } default: break; } } } /* Process variable FWDTSN chunk information. */ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) { struct sctp_fwdtsn_skip *skip; /* Walk through all the skipped SSNs */ sctp_walk_fwdtsn(skip, chunk) { sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); } } /* Helper function to remove the association non-primary peer * transports. */ static void sctp_cmd_del_non_primary(struct sctp_association *asoc) { struct sctp_transport *t; struct list_head *pos; struct list_head *temp; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { t = list_entry(pos, struct sctp_transport, transports); if (!sctp_cmp_addr_exact(&t->ipaddr, &asoc->peer.primary_addr)) { sctp_assoc_rm_peer(asoc, t); } } } /* Helper function to set sk_err on a 1-1 style socket. */ static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) { struct sock *sk = asoc->base.sk; if (!sctp_style(sk, UDP)) sk->sk_err = error; } /* Helper function to generate an association change event */ static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, struct sctp_association *asoc, u8 state) { struct sctp_ulpevent *ev; ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, asoc->c.sinit_num_ostreams, asoc->c.sinit_max_instreams, NULL, GFP_ATOMIC); if (ev) sctp_ulpq_tail_event(&asoc->ulpq, ev); } /* Helper function to generate an adaptation indication event */ static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, struct sctp_association *asoc) { struct sctp_ulpevent *ev; ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); if (ev) sctp_ulpq_tail_event(&asoc->ulpq, ev); } static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, sctp_event_timeout_t timer, char *name) { struct sctp_transport *t; t = asoc->init_last_sent_to; asoc->init_err_counter++; if (t->init_sent_count > (asoc->init_cycle + 1)) { asoc->timeouts[timer] *= 2; if (asoc->timeouts[timer] > asoc->max_init_timeo) { asoc->timeouts[timer] = asoc->max_init_timeo; } asoc->init_cycle++; pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" " cycle:%d timeout:%ld\n", __func__, name, asoc->init_err_counter, asoc->init_cycle, asoc->timeouts[timer]); } } /* Send the whole message, chunk by chunk, to the outqueue. * This way the whole message is queued up and bundling if * encouraged for small fragments. */ static int sctp_cmd_send_msg(struct sctp_association *asoc, struct sctp_datamsg *msg) { struct sctp_chunk *chunk; int error = 0; list_for_each_entry(chunk, &msg->chunks, frag_list) { error = sctp_outq_tail(&asoc->outqueue, chunk); if (error) break; } return error; } /* Sent the next ASCONF packet currently stored in the association. * This happens after the ASCONF_ACK was succeffully processed. */ static void sctp_cmd_send_asconf(struct sctp_association *asoc) { struct net *net = sock_net(asoc->base.sk); /* Send the next asconf chunk from the addip chunk * queue. */ if (!list_empty(&asoc->addip_chunk_list)) { struct list_head *entry = asoc->addip_chunk_list.next; struct sctp_chunk *asconf = list_entry(entry, struct sctp_chunk, list); list_del_init(entry); /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(asconf); if (sctp_primitive_ASCONF(net, asoc, asconf)) sctp_chunk_free(asconf); else asoc->addip_last_asconf = asconf; } } /* These three macros allow us to pull the debugging code out of the * main flow of sctp_do_sm() to keep attention focused on the real * functionality there. */ #define debug_pre_sfn() \ pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ asoc, sctp_state_tbl[state], state_fn->name) #define debug_post_sfn() \ pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ sctp_status_tbl[status]) #define debug_post_sfx() \ pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) /* * This is the master state machine processing function. * * If you want to understand all of lksctp, this is a * good place to start. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, gfp_t gfp) { sctp_cmd_seq_t commands; const sctp_sm_table_entry_t *state_fn; sctp_disposition_t status; int error = 0; typedef const char *(printfn_t)(sctp_subtype_t); static printfn_t *table[] = { NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, }; printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; /* Look up the state function, run it, and then process the * side effects. These three steps are the heart of lksctp. */ state_fn = sctp_sm_lookup_event(net, event_type, state, subtype); sctp_init_cmd_seq(&commands); debug_pre_sfn(); status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); debug_post_sfn(); error = sctp_side_effects(event_type, subtype, state, ep, asoc, event_arg, status, &commands, gfp); debug_post_sfx(); return error; } /***************************************************************** * This the master state function side effect processing function. *****************************************************************/ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp) { int error; /* FIXME - Most of the dispositions left today would be categorized * as "exceptional" dispositions. For those dispositions, it * may not be proper to run through any of the commands at all. * For example, the command interpreter might be run only with * disposition SCTP_DISPOSITION_CONSUME. */ if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, ep, asoc, event_arg, status, commands, gfp))) goto bail; switch (status) { case SCTP_DISPOSITION_DISCARD: pr_debug("%s: ignored sctp protocol event - state:%d, " "event_type:%d, event_id:%d\n", __func__, state, event_type, subtype.chunk); break; case SCTP_DISPOSITION_NOMEM: /* We ran out of memory, so we need to discard this * packet. */ /* BUG--we should now recover some memory, probably by * reneging... */ error = -ENOMEM; break; case SCTP_DISPOSITION_DELETE_TCB: /* This should now be a command. */ break; case SCTP_DISPOSITION_CONSUME: case SCTP_DISPOSITION_ABORT: /* * We should no longer have much work to do here as the * real work has been done as explicit commands above. */ break; case SCTP_DISPOSITION_VIOLATION: net_err_ratelimited("protocol violation state %d chunkid %d\n", state, subtype.chunk); break; case SCTP_DISPOSITION_NOT_IMPL: pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n", state, event_type, subtype.chunk); break; case SCTP_DISPOSITION_BUG: pr_err("bug in state %d, event_type %d, event_id %d\n", state, event_type, subtype.chunk); BUG(); break; default: pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n", status, state, event_type, subtype.chunk); BUG(); break; } bail: return error; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* This is the side-effect interpreter. */ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp) { int error = 0; int force; sctp_cmd_t *cmd; struct sctp_chunk *new_obj; struct sctp_chunk *chunk = NULL; struct sctp_packet *packet; struct timer_list *timer; unsigned long timeout; struct sctp_transport *t; struct sctp_sackhdr sackh; int local_cork = 0; if (SCTP_EVENT_T_TIMEOUT != event_type) chunk = event_arg; /* Note: This whole file is a huge candidate for rework. * For example, each command could either have its own handler, so * the loop would look like: * while (cmds) * cmd->handle(x, y, z) * --jgrimm */ while (NULL != (cmd = sctp_next_cmd(commands))) { switch (cmd->verb) { case SCTP_CMD_NOP: /* Do nothing. */ break; case SCTP_CMD_NEW_ASOC: /* Register a new association. */ if (local_cork) { sctp_outq_uncork(&asoc->outqueue); local_cork = 0; } /* Register with the endpoint. */ asoc = cmd->obj.asoc; BUG_ON(asoc->peer.primary_path == NULL); sctp_endpoint_add_asoc(ep, asoc); sctp_hash_established(asoc); break; case SCTP_CMD_UPDATE_ASSOC: sctp_assoc_update(asoc, cmd->obj.asoc); break; case SCTP_CMD_PURGE_OUTQUEUE: sctp_outq_teardown(&asoc->outqueue); break; case SCTP_CMD_DELETE_TCB: if (local_cork) { sctp_outq_uncork(&asoc->outqueue); local_cork = 0; } /* Delete the current association. */ sctp_cmd_delete_tcb(commands, asoc); asoc = NULL; break; case SCTP_CMD_NEW_STATE: /* Enter a new state. */ sctp_cmd_new_state(commands, asoc, cmd->obj.state); break; case SCTP_CMD_REPORT_TSN: /* Record the arrival of a TSN. */ error = sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32, NULL); break; case SCTP_CMD_REPORT_FWDTSN: /* Move the Cumulattive TSN Ack ahead. */ sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); /* purge the fragmentation queue */ sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); /* Abort any in progress partial delivery. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); break; case SCTP_CMD_PROCESS_FWDTSN: sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk); break; case SCTP_CMD_GEN_SACK: /* Generate a Selective ACK. * The argument tells us whether to just count * the packet and MAYBE generate a SACK, or * force a SACK out. */ force = cmd->obj.i32; error = sctp_gen_sack(asoc, force, commands); break; case SCTP_CMD_PROCESS_SACK: /* Process an inbound SACK. */ error = sctp_cmd_process_sack(commands, asoc, cmd->obj.chunk); break; case SCTP_CMD_GEN_INIT_ACK: /* Generate an INIT ACK chunk. */ new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, 0); if (!new_obj) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; case SCTP_CMD_PEER_INIT: /* Process a unified INIT from the peer. * Note: Only used during INIT-ACK processing. If * there is an error just return to the outter * layer which will bail. */ error = sctp_cmd_process_init(commands, asoc, chunk, cmd->obj.init, gfp); break; case SCTP_CMD_GEN_COOKIE_ECHO: /* Generate a COOKIE ECHO chunk. */ new_obj = sctp_make_cookie_echo(asoc, chunk); if (!new_obj) { if (cmd->obj.chunk) sctp_chunk_free(cmd->obj.chunk); goto nomem; } sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); /* If there is an ERROR chunk to be sent along with * the COOKIE_ECHO, send it, too. */ if (cmd->obj.chunk) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(cmd->obj.chunk)); if (new_obj->transport) { new_obj->transport->init_sent_count++; asoc->init_last_sent_to = new_obj->transport; } /* FIXME - Eventually come up with a cleaner way to * enabling COOKIE-ECHO + DATA bundling during * multihoming stale cookie scenarios, the following * command plays with asoc->peer.retran_path to * avoid the problem of sending the COOKIE-ECHO and * DATA in different paths, which could result * in the association being ABORTed if the DATA chunk * is processed first by the server. Checking the * init error counter simply causes this command * to be executed only during failed attempts of * association establishment. */ if ((asoc->peer.retran_path != asoc->peer.primary_path) && (asoc->init_err_counter > 0)) { sctp_add_cmd_sf(commands, SCTP_CMD_FORCE_PRIM_RETRAN, SCTP_NULL()); } break; case SCTP_CMD_GEN_SHUTDOWN: /* Generate SHUTDOWN when in SHUTDOWN_SENT state. * Reset error counts. */ asoc->overall_error_count = 0; /* Generate a SHUTDOWN chunk. */ new_obj = sctp_make_shutdown(asoc, chunk); if (!new_obj) goto nomem; sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; case SCTP_CMD_CHUNK_ULP: /* Send a chunk to the sockets layer. */ pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", __func__, cmd->obj.chunk, &asoc->ulpq); sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, GFP_ATOMIC); break; case SCTP_CMD_EVENT_ULP: /* Send a notification to the sockets layer. */ pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", __func__, cmd->obj.ulpevent, &asoc->ulpq); sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); break; case SCTP_CMD_REPLY: /* If an caller has not already corked, do cork. */ if (!asoc->outqueue.cork) { sctp_outq_cork(&asoc->outqueue); local_cork = 1; } /* Send a chunk to our peer. */ error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk); break; case SCTP_CMD_SEND_PKT: /* Send a full packet to our peer. */ packet = cmd->obj.packet; sctp_packet_transmit(packet); sctp_ootb_pkt_free(packet); break; case SCTP_CMD_T1_RETRAN: /* Mark a transport for retransmission. */ sctp_retransmit(&asoc->outqueue, cmd->obj.transport, SCTP_RTXR_T1_RTX); break; case SCTP_CMD_RETRAN: /* Mark a transport for retransmission. */ sctp_retransmit(&asoc->outqueue, cmd->obj.transport, SCTP_RTXR_T3_RTX); break; case SCTP_CMD_ECN_CE: /* Do delayed CE processing. */ sctp_do_ecn_ce_work(asoc, cmd->obj.u32); break; case SCTP_CMD_ECN_ECNE: /* Do delayed ECNE processing. */ new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, chunk); if (new_obj) sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(new_obj)); break; case SCTP_CMD_ECN_CWR: /* Do delayed CWR processing. */ sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); break; case SCTP_CMD_SETUP_T2: sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); break; case SCTP_CMD_TIMER_START_ONCE: timer = &asoc->timers[cmd->obj.to]; if (timer_pending(timer)) break; /* fall through */ case SCTP_CMD_TIMER_START: timer = &asoc->timers[cmd->obj.to]; timeout = asoc->timeouts[cmd->obj.to]; BUG_ON(!timeout); timer->expires = jiffies + timeout; sctp_association_hold(asoc); add_timer(timer); break; case SCTP_CMD_TIMER_RESTART: timer = &asoc->timers[cmd->obj.to]; timeout = asoc->timeouts[cmd->obj.to]; if (!mod_timer(timer, jiffies + timeout)) sctp_association_hold(asoc); break; case SCTP_CMD_TIMER_STOP: timer = &asoc->timers[cmd->obj.to]; if (del_timer(timer)) sctp_association_put(asoc); break; case SCTP_CMD_INIT_CHOOSE_TRANSPORT: chunk = cmd->obj.chunk; t = sctp_assoc_choose_alter_transport(asoc, asoc->init_last_sent_to); asoc->init_last_sent_to = t; chunk->transport = t; t->init_sent_count++; /* Set the new transport as primary */ sctp_assoc_set_primary(asoc, t); break; case SCTP_CMD_INIT_RESTART: /* Do the needed accounting and updates * associated with restarting an initialization * timer. Only multiply the timeout by two if * all transports have been tried at the current * timeout. */ sctp_cmd_t1_timer_update(asoc, SCTP_EVENT_TIMEOUT_T1_INIT, "INIT"); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); break; case SCTP_CMD_COOKIEECHO_RESTART: /* Do the needed accounting and updates * associated with restarting an initialization * timer. Only multiply the timeout by two if * all transports have been tried at the current * timeout. */ sctp_cmd_t1_timer_update(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE, "COOKIE"); /* If we've sent any data bundled with * COOKIE-ECHO we need to resend. */ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { sctp_retransmit_mark(&asoc->outqueue, t, SCTP_RTXR_T1_RTX); } sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); break; case SCTP_CMD_INIT_FAILED: sctp_cmd_init_failed(commands, asoc, cmd->obj.err); break; case SCTP_CMD_ASSOC_FAILED: sctp_cmd_assoc_failed(commands, asoc, event_type, subtype, chunk, cmd->obj.err); break; case SCTP_CMD_INIT_COUNTER_INC: asoc->init_err_counter++; break; case SCTP_CMD_INIT_COUNTER_RESET: asoc->init_err_counter = 0; asoc->init_cycle = 0; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { t->init_sent_count = 0; } break; case SCTP_CMD_REPORT_DUP: sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, cmd->obj.u32); break; case SCTP_CMD_REPORT_BAD_TAG: pr_debug("%s: vtag mismatch!\n", __func__); break; case SCTP_CMD_STRIKE: /* Mark one strike against a transport. */ sctp_do_8_2_transport_strike(commands, asoc, cmd->obj.transport, 0); break; case SCTP_CMD_TRANSPORT_IDLE: t = cmd->obj.transport; sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); break; case SCTP_CMD_TRANSPORT_HB_SENT: t = cmd->obj.transport; sctp_do_8_2_transport_strike(commands, asoc, t, 1); t->hb_sent = 1; break; case SCTP_CMD_TRANSPORT_ON: t = cmd->obj.transport; sctp_cmd_transport_on(commands, asoc, t, chunk); break; case SCTP_CMD_HB_TIMERS_START: sctp_cmd_hb_timers_start(commands, asoc); break; case SCTP_CMD_HB_TIMER_UPDATE: t = cmd->obj.transport; sctp_cmd_hb_timer_update(commands, t); break; case SCTP_CMD_HB_TIMERS_STOP: sctp_cmd_hb_timers_stop(commands, asoc); break; case SCTP_CMD_REPORT_ERROR: error = cmd->obj.error; break; case SCTP_CMD_PROCESS_CTSN: /* Dummy up a SACK for processing. */ sackh.cum_tsn_ack = cmd->obj.be32; sackh.a_rwnd = asoc->peer.rwnd + asoc->outqueue.outstanding_bytes; sackh.num_gap_ack_blocks = 0; sackh.num_dup_tsns = 0; chunk->subh.sack_hdr = &sackh; sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk)); break; case SCTP_CMD_DISCARD_PACKET: /* We need to discard the whole packet. * Uncork the queue since there might be * responses pending */ chunk->pdiscard = 1; if (asoc) { sctp_outq_uncork(&asoc->outqueue); local_cork = 0; } break; case SCTP_CMD_RTO_PENDING: t = cmd->obj.transport; t->rto_pending = 1; break; case SCTP_CMD_PART_DELIVER: sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); break; case SCTP_CMD_RENEGE: sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, GFP_ATOMIC); break; case SCTP_CMD_SETUP_T4: sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); break; case SCTP_CMD_PROCESS_OPERR: sctp_cmd_process_operr(commands, asoc, chunk); break; case SCTP_CMD_CLEAR_INIT_TAG: asoc->peer.i.init_tag = 0; break; case SCTP_CMD_DEL_NON_PRIMARY: sctp_cmd_del_non_primary(asoc); break; case SCTP_CMD_T3_RTX_TIMERS_STOP: sctp_cmd_t3_rtx_timers_stop(commands, asoc); break; case SCTP_CMD_FORCE_PRIM_RETRAN: t = asoc->peer.retran_path; asoc->peer.retran_path = asoc->peer.primary_path; error = sctp_outq_uncork(&asoc->outqueue); local_cork = 0; asoc->peer.retran_path = t; break; case SCTP_CMD_SET_SK_ERR: sctp_cmd_set_sk_err(asoc, cmd->obj.error); break; case SCTP_CMD_ASSOC_CHANGE: sctp_cmd_assoc_change(commands, asoc, cmd->obj.u8); break; case SCTP_CMD_ADAPTATION_IND: sctp_cmd_adaptation_ind(commands, asoc); break; case SCTP_CMD_ASSOC_SHKEY: error = sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); break; case SCTP_CMD_UPDATE_INITTAG: asoc->peer.i.init_tag = cmd->obj.u32; break; case SCTP_CMD_SEND_MSG: if (!asoc->outqueue.cork) { sctp_outq_cork(&asoc->outqueue); local_cork = 1; } error = sctp_cmd_send_msg(asoc, cmd->obj.msg); break; case SCTP_CMD_SEND_NEXT_ASCONF: sctp_cmd_send_asconf(asoc); break; case SCTP_CMD_PURGE_ASCONF_QUEUE: sctp_asconf_queue_teardown(asoc); break; case SCTP_CMD_SET_ASOC: asoc = cmd->obj.asoc; break; default: pr_warn("Impossible command: %u\n", cmd->verb); break; } if (error) break; } out: /* If this is in response to a received chunk, wait until * we are done with the packet to open the queue so that we don't * send multiple packets in response to a single request. */ if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { if (chunk->end_of_packet || chunk->singleton) error = sctp_outq_uncork(&asoc->outqueue); } else if (local_cork) error = sctp_outq_uncork(&asoc->outqueue); return error; nomem: error = -ENOMEM; goto out; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_1803_0
crossvul-cpp_data_good_5572_2
/* * linux/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson * * 2003-06-02 Jim Houston - Concurrent Computer Corp. * Changes to use preallocated sigqueue structures * to allow signals to be sent reliably. */ #include <linux/slab.h> #include <linux/export.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/coredump.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/signalfd.h> #include <linux/ratelimit.h> #include <linux/tracehook.h> #include <linux/capability.h> #include <linux/freezer.h> #include <linux/pid_namespace.h> #include <linux/nsproxy.h> #include <linux/user_namespace.h> #include <linux/uprobes.h> #include <linux/compat.h> #define CREATE_TRACE_POINTS #include <trace/events/signal.h> #include <asm/param.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/siginfo.h> #include <asm/cacheflush.h> #include "audit.h" /* audit_signal_info() */ /* * SLAB caches for signal bits. */ static struct kmem_cache *sigqueue_cachep; int print_fatal_signals __read_mostly; static void __user *sig_handler(struct task_struct *t, int sig) { return t->sighand->action[sig - 1].sa.sa_handler; } static int sig_handler_ignored(void __user *handler, int sig) { /* Is it explicitly or implicitly ignored? */ return handler == SIG_IGN || (handler == SIG_DFL && sig_kernel_ignore(sig)); } static int sig_task_ignored(struct task_struct *t, int sig, bool force) { void __user *handler; handler = sig_handler(t, sig); if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && handler == SIG_DFL && !force) return 1; return sig_handler_ignored(handler, sig); } static int sig_ignored(struct task_struct *t, int sig, bool force) { /* * Blocked signals are never ignored, since the * signal handler may change by the time it is * unblocked. */ if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) return 0; if (!sig_task_ignored(t, sig, force)) return 0; /* * Tracers may want to know about even ignored signals. */ return !t->ptrace; } /* * Re-calculate pending state from the set of locally pending * signals, globally pending signals, and blocked signals. */ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) { unsigned long ready; long i; switch (_NSIG_WORDS) { default: for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) ready |= signal->sig[i] &~ blocked->sig[i]; break; case 4: ready = signal->sig[3] &~ blocked->sig[3]; ready |= signal->sig[2] &~ blocked->sig[2]; ready |= signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 2: ready = signal->sig[1] &~ blocked->sig[1]; ready |= signal->sig[0] &~ blocked->sig[0]; break; case 1: ready = signal->sig[0] &~ blocked->sig[0]; } return ready != 0; } #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) static int recalc_sigpending_tsk(struct task_struct *t) { if ((t->jobctl & JOBCTL_PENDING_MASK) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) { set_tsk_thread_flag(t, TIF_SIGPENDING); return 1; } /* * We must never clear the flag in another thread, or in current * when it's possible the current syscall is returning -ERESTART*. * So we don't clear it here, and only callers who know they should do. */ return 0; } /* * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. * This is superfluous when called on current, the wakeup is a harmless no-op. */ void recalc_sigpending_and_wake(struct task_struct *t) { if (recalc_sigpending_tsk(t)) signal_wake_up(t, 0); } void recalc_sigpending(void) { if (!recalc_sigpending_tsk(current) && !freezing(current)) clear_thread_flag(TIF_SIGPENDING); } /* Given the mask, find the first available signal that should be serviced. */ #define SYNCHRONOUS_MASK \ (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) int next_signal(struct sigpending *pending, sigset_t *mask) { unsigned long i, *s, *m, x; int sig = 0; s = pending->signal.sig; m = mask->sig; /* * Handle the first word specially: it contains the * synchronous signals that need to be dequeued first. */ x = *s &~ *m; if (x) { if (x & SYNCHRONOUS_MASK) x &= SYNCHRONOUS_MASK; sig = ffz(~x) + 1; return sig; } switch (_NSIG_WORDS) { default: for (i = 1; i < _NSIG_WORDS; ++i) { x = *++s &~ *++m; if (!x) continue; sig = ffz(~x) + i*_NSIG_BPW + 1; break; } break; case 2: x = s[1] &~ m[1]; if (!x) break; sig = ffz(~x) + _NSIG_BPW + 1; break; case 1: /* Nothing to do */ break; } return sig; } static inline void print_dropped_signal(int sig) { static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); if (!print_fatal_signals) return; if (!__ratelimit(&ratelimit_state)) return; printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", current->comm, current->pid, sig); } /** * task_set_jobctl_pending - set jobctl pending bits * @task: target task * @mask: pending bits to set * * Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is * cleared. If @task is already being killed or exiting, this function * becomes noop. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if @mask is set, %false if made noop because @task was dying. */ bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) { BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) return false; if (mask & JOBCTL_STOP_SIGMASK) task->jobctl &= ~JOBCTL_STOP_SIGMASK; task->jobctl |= mask; return true; } /** * task_clear_jobctl_trapping - clear jobctl trapping bit * @task: target task * * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. * Clear it and wake up the ptracer. Note that we don't need any further * locking. @task->siglock guarantees that @task->parent points to the * ptracer. * * CONTEXT: * Must be called with @task->sighand->siglock held. */ void task_clear_jobctl_trapping(struct task_struct *task) { if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { task->jobctl &= ~JOBCTL_TRAPPING; wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); } } /** * task_clear_jobctl_pending - clear jobctl pending bits * @task: target task * @mask: pending bits to clear * * Clear @mask from @task->jobctl. @mask must be subset of * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other * STOP bits are cleared together. * * If clearing of @mask leaves no stop or trap pending, this function calls * task_clear_jobctl_trapping(). * * CONTEXT: * Must be called with @task->sighand->siglock held. */ void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) { BUG_ON(mask & ~JOBCTL_PENDING_MASK); if (mask & JOBCTL_STOP_PENDING) mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; task->jobctl &= ~mask; if (!(task->jobctl & JOBCTL_PENDING_MASK)) task_clear_jobctl_trapping(task); } /** * task_participate_group_stop - participate in a group stop * @task: task participating in a group stop * * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. * Group stop states are cleared and the group stop count is consumed if * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group * stop, the appropriate %SIGNAL_* flags are set. * * CONTEXT: * Must be called with @task->sighand->siglock held. * * RETURNS: * %true if group stop completion should be notified to the parent, %false * otherwise. */ static bool task_participate_group_stop(struct task_struct *task) { struct signal_struct *sig = task->signal; bool consume = task->jobctl & JOBCTL_STOP_CONSUME; WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); if (!consume) return false; if (!WARN_ON_ONCE(sig->group_stop_count == 0)) sig->group_stop_count--; /* * Tell the caller to notify completion iff we are entering into a * fresh group stop. Read comment in do_signal_stop() for details. */ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { sig->flags = SIGNAL_STOP_STOPPED; return true; } return false; } /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; struct user_struct *user; /* * Protect access to @t credentials. This can go away when all * callers hold rcu read lock. */ rcu_read_lock(); user = get_uid(__task_cred(t)->user); atomic_inc(&user->sigpending); rcu_read_unlock(); if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } if (unlikely(q == NULL)) { atomic_dec(&user->sigpending); free_uid(user); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; q->user = user; } return q; } static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) return; atomic_dec(&q->user->sigpending); free_uid(q->user); kmem_cache_free(sigqueue_cachep, q); } void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; sigemptyset(&queue->signal); while (!list_empty(&queue->list)) { q = list_entry(queue->list.next, struct sigqueue , list); list_del_init(&q->list); __sigqueue_free(q); } } /* * Flush all pending signals for a task. */ void __flush_signals(struct task_struct *t) { clear_tsk_thread_flag(t, TIF_SIGPENDING); flush_sigqueue(&t->pending); flush_sigqueue(&t->signal->shared_pending); } void flush_signals(struct task_struct *t) { unsigned long flags; spin_lock_irqsave(&t->sighand->siglock, flags); __flush_signals(t); spin_unlock_irqrestore(&t->sighand->siglock, flags); } static void __flush_itimer_signals(struct sigpending *pending) { sigset_t signal, retain; struct sigqueue *q, *n; signal = pending->signal; sigemptyset(&retain); list_for_each_entry_safe(q, n, &pending->list, list) { int sig = q->info.si_signo; if (likely(q->info.si_code != SI_TIMER)) { sigaddset(&retain, sig); } else { sigdelset(&signal, sig); list_del_init(&q->list); __sigqueue_free(q); } } sigorsets(&pending->signal, &signal, &retain); } void flush_itimer_signals(void) { struct task_struct *tsk = current; unsigned long flags; spin_lock_irqsave(&tsk->sighand->siglock, flags); __flush_itimer_signals(&tsk->pending); __flush_itimer_signals(&tsk->signal->shared_pending); spin_unlock_irqrestore(&tsk->sighand->siglock, flags); } void ignore_signals(struct task_struct *t) { int i; for (i = 0; i < _NSIG; ++i) t->sighand->action[i].sa.sa_handler = SIG_IGN; flush_signals(t); } /* * Flush all handlers for a task. */ void flush_signal_handlers(struct task_struct *t, int force_default) { int i; struct k_sigaction *ka = &t->sighand->action[0]; for (i = _NSIG ; i != 0 ; i--) { if (force_default || ka->sa.sa_handler != SIG_IGN) ka->sa.sa_handler = SIG_DFL; ka->sa.sa_flags = 0; sigemptyset(&ka->sa.sa_mask); ka++; } } int unhandled_signal(struct task_struct *tsk, int sig) { void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; if (is_global_init(tsk)) return 1; if (handler != SIG_IGN && handler != SIG_DFL) return 0; /* if ptraced, let the tracer determine */ return !tsk->ptrace; } /* * Notify the system that a driver wants to block all signals for this * process, and wants to be notified if any signals at all were to be * sent/acted upon. If the notifier routine returns non-zero, then the * signal will be acted upon after all. If the notifier routine returns 0, * then then signal will be blocked. Only one block per process is * allowed. priv is a pointer to private data that the notifier routine * can use to determine if the signal should be blocked or not. */ void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) { unsigned long flags; spin_lock_irqsave(&current->sighand->siglock, flags); current->notifier_mask = mask; current->notifier_data = priv; current->notifier = notifier; spin_unlock_irqrestore(&current->sighand->siglock, flags); } /* Notify the system that blocking has ended. */ void unblock_all_signals(void) { unsigned long flags; spin_lock_irqsave(&current->sighand->siglock, flags); current->notifier = NULL; current->notifier_data = NULL; recalc_sigpending(); spin_unlock_irqrestore(&current->sighand->siglock, flags); } static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) { struct sigqueue *q, *first = NULL; /* * Collect the siginfo appropriate to this signal. Check if * there is another siginfo for the same signal. */ list_for_each_entry(q, &list->list, list) { if (q->info.si_signo == sig) { if (first) goto still_pending; first = q; } } sigdelset(&list->signal, sig); if (first) { still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); __sigqueue_free(first); } else { /* * Ok, it wasn't in the queue. This must be * a fast-pathed signal or we must have been * out of queue space. So zero out the info. */ info->si_signo = sig; info->si_errno = 0; info->si_code = SI_USER; info->si_pid = 0; info->si_uid = 0; } } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info) { int sig = next_signal(pending, mask); if (sig) { if (current->notifier) { if (sigismember(current->notifier_mask, sig)) { if (!(current->notifier)(current->notifier_data)) { clear_thread_flag(TIF_SIGPENDING); return 0; } } } collect_signal(sig, pending, info); } return sig; } /* * Dequeue a signal and return the element to the caller, which is * expected to free it. * * All callers have to hold the siglock. */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ signr = __dequeue_signal(&tsk->pending, mask, info); if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info); /* * itimer signal ? * * itimers are process shared and we restart periodic * itimers in the signal delivery path to prevent DoS * attacks in the high resolution timer case. This is * compliant with the old way of self-restarting * itimers, as the SIGALRM is a legacy signal and only * queued once. Changing the restart behaviour to * restart the timer in the signal dequeue path is * reducing the timer noise on heavy loaded !highres * systems too. */ if (unlikely(signr == SIGALRM)) { struct hrtimer *tmr = &tsk->signal->real_timer; if (!hrtimer_is_queued(tmr) && tsk->signal->it_real_incr.tv64 != 0) { hrtimer_forward(tmr, tmr->base->get_time(), tsk->signal->it_real_incr); hrtimer_restart(tmr); } } } recalc_sigpending(); if (!signr) return 0; if (unlikely(sig_kernel_stop(signr))) { /* * Set a marker that we have dequeued a stop signal. Our * caller might release the siglock and then the pending * stop signal it is about to process is no longer in the * pending bitmasks, but must still be cleared by a SIGCONT * (and overruled by a SIGKILL). So those cases clear this * shared flag after we've set it. Note that this flag may * remain set after the signal we return is ignored or * handled. That doesn't matter because its only purpose * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ current->jobctl |= JOBCTL_STOP_DEQUEUED; } if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave * irqs disabled here, since the posix-timers code is * about to disable them again anyway. */ spin_unlock(&tsk->sighand->siglock); do_schedule_next_timer(info); spin_lock(&tsk->sighand->siglock); } return signr; } /* * Tell a process that it has a new active signal.. * * NOTE! we rely on the previous spin_lock to * lock interrupts for us! We can only be called with * "siglock" held, and the local interrupt must * have been disabled when that got acquired! * * No need to set need_resched since signal event passing * goes through ->blocked */ void signal_wake_up_state(struct task_struct *t, unsigned int state) { set_tsk_thread_flag(t, TIF_SIGPENDING); /* * TASK_WAKEKILL also means wake it up in the stopped/traced/killable * case. We don't check t->state here because there is a race with it * executing another processor and just now entering stopped state. * By using wake_up_state, we ensure the process will wake up and * handle its death signal. */ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) kick_process(t); } /* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. * * All callers must be holding the siglock. * * This version takes a sigset mask and looks at all signals, * not just those in the first mask word. */ static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) { struct sigqueue *q, *n; sigset_t m; sigandsets(&m, mask, &s->signal); if (sigisemptyset(&m)) return 0; sigandnsets(&s->signal, &s->signal, mask); list_for_each_entry_safe(q, n, &s->list, list) { if (sigismember(mask, q->info.si_signo)) { list_del_init(&q->list); __sigqueue_free(q); } } return 1; } /* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. * * All callers must be holding the siglock. */ static int rm_from_queue(unsigned long mask, struct sigpending *s) { struct sigqueue *q, *n; if (!sigtestsetmask(&s->signal, mask)) return 0; sigdelsetmask(&s->signal, mask); list_for_each_entry_safe(q, n, &s->list, list) { if (q->info.si_signo < SIGRTMIN && (mask & sigmask(q->info.si_signo))) { list_del_init(&q->list); __sigqueue_free(q); } } return 1; } static inline int is_si_special(const struct siginfo *info) { return info <= SEND_SIG_FORCED; } static inline bool si_fromuser(const struct siginfo *info) { return info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)); } /* * called with RCU read lock from check_kill_permission() */ static int kill_ok_by_cred(struct task_struct *t) { const struct cred *cred = current_cred(); const struct cred *tcred = __task_cred(t); if (uid_eq(cred->euid, tcred->suid) || uid_eq(cred->euid, tcred->uid) || uid_eq(cred->uid, tcred->suid) || uid_eq(cred->uid, tcred->uid)) return 1; if (ns_capable(tcred->user_ns, CAP_KILL)) return 1; return 0; } /* * Bad permissions for sending the signal * - the caller must hold the RCU read lock */ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { struct pid *sid; int error; if (!valid_signal(sig)) return -EINVAL; if (!si_fromuser(info)) return 0; error = audit_signal_info(sig, t); /* Let audit system see the signal */ if (error) return error; if (!same_thread_group(current, t) && !kill_ok_by_cred(t)) { switch (sig) { case SIGCONT: sid = task_session(t); /* * We don't return the error if sid == NULL. The * task was unhashed, the caller must notice this. */ if (!sid || sid == task_session(current)) break; default: return -EPERM; } } return security_task_kill(t, info, sig, 0); } /** * ptrace_trap_notify - schedule trap to notify ptracer * @t: tracee wanting to notify tracer * * This function schedules sticky ptrace trap which is cleared on the next * TRAP_STOP to notify ptracer of an event. @t must have been seized by * ptracer. * * If @t is running, STOP trap will be taken. If trapped for STOP and * ptracer is listening for events, tracee is woken up so that it can * re-trap for the new event. If trapped otherwise, STOP trap will be * eventually taken without returning to userland after the existing traps * are finished by PTRACE_CONT. * * CONTEXT: * Must be called with @task->sighand->siglock held. */ static void ptrace_trap_notify(struct task_struct *t) { WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); assert_spin_locked(&t->sighand->siglock); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); } /* * Handle magic process-wide effects of stop/continue signals. Unlike * the signal actions, these happen immediately at signal-generation * time regardless of blocking, ignoring, or handling. This does the * actual continuing for SIGCONT, but not the actual stopping for stop * signals. The process stop is done as a signal action for SIG_DFL. * * Returns true if the signal should be actually delivered, otherwise * it should be dropped. */ static int prepare_signal(int sig, struct task_struct *p, bool force) { struct signal_struct *signal = p->signal; struct task_struct *t; if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { /* * The process is in the middle of dying, nothing to do. */ } else if (sig_kernel_stop(sig)) { /* * This is a stop signal. Remove SIGCONT from all queues. */ rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); t = p; do { rm_from_queue(sigmask(SIGCONT), &t->pending); } while_each_thread(p, t); } else if (sig == SIGCONT) { unsigned int why; /* * Remove all stop signals from all queues, wake all threads. */ rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); t = p; do { task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); if (likely(!(t->ptrace & PT_SEIZED))) wake_up_state(t, __TASK_STOPPED); else ptrace_trap_notify(t); } while_each_thread(p, t); /* * Notify the parent with CLD_CONTINUED if we were stopped. * * If we were in the middle of a group stop, we pretend it * was already finished, and then continued. Since SIGCHLD * doesn't queue we report only CLD_STOPPED, as if the next * CLD_CONTINUED was dropped. */ why = 0; if (signal->flags & SIGNAL_STOP_STOPPED) why |= SIGNAL_CLD_CONTINUED; else if (signal->group_stop_count) why |= SIGNAL_CLD_STOPPED; if (why) { /* * The first thread which returns from do_signal_stop() * will take ->siglock, notice SIGNAL_CLD_MASK, and * notify its parent. See get_signal_to_deliver(). */ signal->flags = why | SIGNAL_STOP_CONTINUED; signal->group_stop_count = 0; signal->group_exit_code = 0; } } return !sig_ignored(p, sig, force); } /* * Test if P wants to take SIG. After we've checked all threads with this, * it's equivalent to finding no threads not blocking SIG. Any threads not * blocking SIG were ruled out because they are not running and already * have pending signals. Such threads will dequeue from the shared queue * as soon as they're available, so putting the signal on the shared queue * will be equivalent to sending it to one such thread. */ static inline int wants_signal(int sig, struct task_struct *p) { if (sigismember(&p->blocked, sig)) return 0; if (p->flags & PF_EXITING) return 0; if (sig == SIGKILL) return 1; if (task_is_stopped_or_traced(p)) return 0; return task_curr(p) || !signal_pending(p); } static void complete_signal(int sig, struct task_struct *p, int group) { struct signal_struct *signal = p->signal; struct task_struct *t; /* * Now find a thread we can wake up to take the signal off the queue. * * If the main thread wants the signal, it gets first crack. * Probably the least surprising to the average bear. */ if (wants_signal(sig, p)) t = p; else if (!group || thread_group_empty(p)) /* * There is just one thread and it does not need to be woken. * It will dequeue unblocked signals before it runs again. */ return; else { /* * Otherwise try to find a suitable thread. */ t = signal->curr_target; while (!wants_signal(sig, t)) { t = next_thread(t); if (t == signal->curr_target) /* * No thread needs to be woken. * Any eligible threads will see * the signal in the queue soon. */ return; } signal->curr_target = t; } /* * Found a killable thread. If the signal will be fatal, * then start taking the whole group down immediately. */ if (sig_fatal(p, sig) && !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && !sigismember(&t->real_blocked, sig) && (sig == SIGKILL || !t->ptrace)) { /* * This signal will be fatal to the whole group. */ if (!sig_kernel_coredump(sig)) { /* * Start a group exit and wake everybody up. * This way we don't have other threads * running and doing things after a slower * thread has the fatal signal pending. */ signal->flags = SIGNAL_GROUP_EXIT; signal->group_exit_code = sig; signal->group_stop_count = 0; t = p; do { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } while_each_thread(p, t); return; } } /* * The signal is already in the shared-pending queue. * Tell the chosen thread to wake up and dequeue it. */ signal_wake_up(t, sig == SIGKILL); return; } static inline int legacy_queue(struct sigpending *signals, int sig) { return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); } #ifdef CONFIG_USER_NS static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) { if (current_user_ns() == task_cred_xxx(t, user_ns)) return; if (SI_FROMKERNEL(info)) return; rcu_read_lock(); info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), make_kuid(current_user_ns(), info->si_uid)); rcu_read_unlock(); } #else static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) { return; } #endif static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, int group, int from_ancestor_ns) { struct sigpending *pending; struct sigqueue *q; int override_rlimit; int ret = 0, result; assert_spin_locked(&t->sighand->siglock); result = TRACE_SIGNAL_IGNORED; if (!prepare_signal(sig, t, from_ancestor_ns || (info == SEND_SIG_FORCED))) goto ret; pending = group ? &t->signal->shared_pending : &t->pending; /* * Short-circuit ignored signals and support queuing * exactly one non-rt signal, so that we can get more * detailed information about the cause of the signal. */ result = TRACE_SIGNAL_ALREADY_PENDING; if (legacy_queue(pending, sig)) goto ret; result = TRACE_SIGNAL_DELIVERED; /* * fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. */ if (info == SEND_SIG_FORCED) goto out_set; /* * Real-time signals must be queued if sent by sigqueue, or * some other real-time mechanism. It is implementation * defined whether kill() does so. We attempt to do so, on * the principle of least surprise, but since kill is not * allowed to fail with EAGAIN when low on memory we just * make sure at least one signal gets delivered and don't * pass on the info struct. */ if (sig < SIGRTMIN) override_rlimit = (is_si_special(info) || info->si_code >= 0); else override_rlimit = 0; q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, override_rlimit); if (q) { list_add_tail(&q->list, &pending->list); switch ((unsigned long) info) { case (unsigned long) SEND_SIG_NOINFO: q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_USER; q->info.si_pid = task_tgid_nr_ns(current, task_active_pid_ns(t)); q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); break; case (unsigned long) SEND_SIG_PRIV: q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_KERNEL; q->info.si_pid = 0; q->info.si_uid = 0; break; default: copy_siginfo(&q->info, info); if (from_ancestor_ns) q->info.si_pid = 0; break; } userns_fixup_signal_uid(&q->info, t); } else if (!is_si_special(info)) { if (sig >= SIGRTMIN && info->si_code != SI_USER) { /* * Queue overflow, abort. We may abort if the * signal was rt and sent by user using something * other than kill(). */ result = TRACE_SIGNAL_OVERFLOW_FAIL; ret = -EAGAIN; goto ret; } else { /* * This is a silent loss of information. We still * send the signal, but the *info bits are lost. */ result = TRACE_SIGNAL_LOSE_INFO; } } out_set: signalfd_notify(t, sig); sigaddset(&pending->signal, sig); complete_signal(sig, t, group); ret: trace_signal_generate(sig, info, t, group, result); return ret; } static int send_signal(int sig, struct siginfo *info, struct task_struct *t, int group) { int from_ancestor_ns = 0; #ifdef CONFIG_PID_NS from_ancestor_ns = si_fromuser(info) && !task_pid_nr_ns(current, task_active_pid_ns(t)); #endif return __send_signal(sig, info, t, group, from_ancestor_ns); } static void print_fatal_signal(int signr) { struct pt_regs *regs = signal_pt_regs(); printk("%s/%d: potentially unexpected fatal signal %d.\n", current->comm, task_pid_nr(current), signr); #if defined(__i386__) && !defined(__arch_um__) printk("code at %08lx: ", regs->ip); { int i; for (i = 0; i < 16; i++) { unsigned char insn; if (get_user(insn, (unsigned char *)(regs->ip + i))) break; printk("%02x ", insn); } } #endif printk("\n"); preempt_disable(); show_regs(regs); preempt_enable(); } static int __init setup_print_fatal_signals(char *str) { get_option (&str, &print_fatal_signals); return 1; } __setup("print-fatal-signals=", setup_print_fatal_signals); int __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { return send_signal(sig, info, p, 1); } static int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { return send_signal(sig, info, t, 0); } int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, bool group) { unsigned long flags; int ret = -ESRCH; if (lock_task_sighand(p, &flags)) { ret = send_signal(sig, info, p, group); unlock_task_sighand(p, &flags); } return ret; } /* * Force a signal that the process can't ignore: if necessary * we unblock the signal and change any SIG_IGN to SIG_DFL. * * Note: If we unblock the signal, we always reset it to SIG_DFL, * since we do not want to have a signal handler that was blocked * be invoked when user space had explicitly blocked it. * * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; int ret, blocked, ignored; struct k_sigaction *action; spin_lock_irqsave(&t->sighand->siglock, flags); action = &t->sighand->action[sig-1]; ignored = action->sa.sa_handler == SIG_IGN; blocked = sigismember(&t->blocked, sig); if (blocked || ignored) { action->sa.sa_handler = SIG_DFL; if (blocked) { sigdelset(&t->blocked, sig); recalc_sigpending_and_wake(t); } } if (action->sa.sa_handler == SIG_DFL) t->signal->flags &= ~SIGNAL_UNKILLABLE; ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); return ret; } /* * Nuke all other threads in the group. */ int zap_other_threads(struct task_struct *p) { struct task_struct *t = p; int count = 0; p->signal->group_stop_count = 0; while_each_thread(p, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); count++; /* Don't bother with already dead threads */ if (t->exit_state) continue; sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } return count; } struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, unsigned long *flags) { struct sighand_struct *sighand; for (;;) { local_irq_save(*flags); rcu_read_lock(); sighand = rcu_dereference(tsk->sighand); if (unlikely(sighand == NULL)) { rcu_read_unlock(); local_irq_restore(*flags); break; } spin_lock(&sighand->siglock); if (likely(sighand == tsk->sighand)) { rcu_read_unlock(); break; } spin_unlock(&sighand->siglock); rcu_read_unlock(); local_irq_restore(*flags); } return sighand; } /* * send signal info to all the members of a group */ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { int ret; rcu_read_lock(); ret = check_kill_permission(sig, info, p); rcu_read_unlock(); if (!ret && sig) ret = do_send_sig_info(sig, info, p, true); return ret; } /* * __kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) * - the caller must hold at least a readlock on tasklist_lock */ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; int retval, success; success = 0; retval = -ESRCH; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { int err = group_send_sig_info(sig, info, p); success |= !err; retval = err; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return success ? 0 : retval; } int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) { int error = -ESRCH; struct task_struct *p; rcu_read_lock(); retry: p = pid_task(pid, PIDTYPE_PID); if (p) { error = group_send_sig_info(sig, info, p); if (unlikely(error == -ESRCH)) /* * The task was unhashed in between, try again. * If it is dead, pid_task() will return NULL, * if we race with de_thread() it will find the * new leader. */ goto retry; } rcu_read_unlock(); return error; } int kill_proc_info(int sig, struct siginfo *info, pid_t pid) { int error; rcu_read_lock(); error = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return error; } static int kill_as_cred_perm(const struct cred *cred, struct task_struct *target) { const struct cred *pcred = __task_cred(target); if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) return 0; return 1; } /* like kill_pid_info(), but doesn't use uid/euid of "current" */ int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, const struct cred *cred, u32 secid) { int ret = -EINVAL; struct task_struct *p; unsigned long flags; if (!valid_signal(sig)) return ret; rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (!p) { ret = -ESRCH; goto out_unlock; } if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { ret = -EPERM; goto out_unlock; } ret = security_task_kill(p, info, sig, secid); if (ret) goto out_unlock; if (sig) { if (lock_task_sighand(p, &flags)) { ret = __send_signal(sig, info, p, 1, 0); unlock_task_sighand(p, &flags); } else ret = -ESRCH; } out_unlock: rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); /* * kill_something_info() interprets pid in interesting ways just like kill(2). * * POSIX specifies that kill(-1,sig) is unspecified, but what we have * is probably wrong. Should make it like BSD or SYSV. */ static int kill_something_info(int sig, struct siginfo *info, pid_t pid) { int ret; if (pid > 0) { rcu_read_lock(); ret = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return ret; } read_lock(&tasklist_lock); if (pid != -1) { ret = __kill_pgrp_info(sig, info, pid ? find_vpid(-pid) : task_pgrp(current)); } else { int retval = 0, count = 0; struct task_struct * p; for_each_process(p) { if (task_pid_vnr(p) > 1 && !same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) retval = err; } } ret = count ? retval : -ESRCH; } read_unlock(&tasklist_lock); return ret; } /* * These are for backward compatibility with the rest of the kernel source. */ int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { /* * Make sure legacy kernel users don't send in bad values * (normal paths check this in check_kill_permission). */ if (!valid_signal(sig)) return -EINVAL; return do_send_sig_info(sig, info, p, false); } #define __si_special(priv) \ ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) int send_sig(int sig, struct task_struct *p, int priv) { return send_sig_info(sig, __si_special(priv), p); } void force_sig(int sig, struct task_struct *p) { force_sig_info(sig, SEND_SIG_PRIV, p); } /* * When things go south during signal handling, we * will force a SIGSEGV. And if the signal that caused * the problem was already a SIGSEGV, we'll want to * make sure we don't even try to deliver the signal.. */ int force_sigsegv(int sig, struct task_struct *p) { if (sig == SIGSEGV) { unsigned long flags; spin_lock_irqsave(&p->sighand->siglock, flags); p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; spin_unlock_irqrestore(&p->sighand->siglock, flags); } force_sig(SIGSEGV, p); return 0; } int kill_pgrp(struct pid *pid, int sig, int priv) { int ret; read_lock(&tasklist_lock); ret = __kill_pgrp_info(sig, __si_special(priv), pid); read_unlock(&tasklist_lock); return ret; } EXPORT_SYMBOL(kill_pgrp); int kill_pid(struct pid *pid, int sig, int priv) { return kill_pid_info(sig, __si_special(priv), pid); } EXPORT_SYMBOL(kill_pid); /* * These functions support sending signals using preallocated sigqueue * structures. This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer * expirations or I/O completions". In the case of POSIX Timers * we allocate the sigqueue structure from the timer_create. If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. */ struct sigqueue *sigqueue_alloc(void) { struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); if (q) q->flags |= SIGQUEUE_PREALLOC; return q; } void sigqueue_free(struct sigqueue *q) { unsigned long flags; spinlock_t *lock = &current->sighand->siglock; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); /* * We must hold ->siglock while testing q->list * to serialize with collect_signal() or with * __exit_signal()->flush_sigqueue(). */ spin_lock_irqsave(lock, flags); q->flags &= ~SIGQUEUE_PREALLOC; /* * If it is queued it will be freed when dequeued, * like the "regular" sigqueue. */ if (!list_empty(&q->list)) q = NULL; spin_unlock_irqrestore(lock, flags); if (q) __sigqueue_free(q); } int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) { int sig = q->info.si_signo; struct sigpending *pending; unsigned long flags; int ret, result; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); ret = -1; if (!likely(lock_task_sighand(t, &flags))) goto ret; ret = 1; /* the signal is ignored */ result = TRACE_SIGNAL_IGNORED; if (!prepare_signal(sig, t, false)) goto out; ret = 0; if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment * the overrun count. */ BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; result = TRACE_SIGNAL_ALREADY_PENDING; goto out; } q->info.si_overrun = 0; signalfd_notify(t, sig); pending = group ? &t->signal->shared_pending : &t->pending; list_add_tail(&q->list, &pending->list); sigaddset(&pending->signal, sig); complete_signal(sig, t, group); result = TRACE_SIGNAL_DELIVERED; out: trace_signal_generate(sig, &q->info, t, group, result); unlock_task_sighand(t, &flags); ret: return ret; } /* * Let a parent know about the death of a child. * For a stopped/continued status change, use do_notify_parent_cldstop instead. * * Returns true if our parent ignored us and so we've switched to * self-reaping. */ bool do_notify_parent(struct task_struct *tsk, int sig) { struct siginfo info; unsigned long flags; struct sighand_struct *psig; bool autoreap = false; BUG_ON(sig == -1); /* do_notify_parent_cldstop should have been called instead. */ BUG_ON(task_is_stopped_or_traced(tsk)); BUG_ON(!tsk->ptrace && (tsk->group_leader != tsk || !thread_group_empty(tsk))); if (sig != SIGCHLD) { /* * This is only possible if parent == real_parent. * Check if it has changed security domain. */ if (tsk->parent_exec_id != tsk->parent->self_exec_id) sig = SIGCHLD; } info.si_signo = sig; info.si_errno = 0; /* * We are under tasklist_lock here so our parent is tied to * us and cannot change. * * task_active_pid_ns will always return the same pid namespace * until a task passes through release_task. * * write_lock() currently calls preempt_disable() which is the * same as rcu_read_lock(), but according to Oleg, this is not * correct to rely on this */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), task_uid(tsk)); rcu_read_unlock(); info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) info.si_code = CLD_DUMPED; else if (tsk->exit_code & 0x7f) info.si_code = CLD_KILLED; else { info.si_code = CLD_EXITED; info.si_status = tsk->exit_code >> 8; } psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); if (!tsk->ptrace && sig == SIGCHLD && (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* * We are exiting and our parent doesn't care. POSIX.1 * defines special semantics for setting SIGCHLD to SIG_IGN * or setting the SA_NOCLDWAIT flag: we should be reaped * automatically and not left for our parent's wait4 call. * Rather than having the parent do it as a magic kind of * signal handler, we just set this to tell do_exit that we * can be cleaned up without becoming a zombie. Note that * we still call __wake_up_parent in this case, because a * blocked sys_wait4 might now return -ECHILD. * * Whether we send SIGCHLD or not for SA_NOCLDWAIT * is implementation-defined: we do (if you don't want * it, just use SIG_IGN instead). */ autoreap = true; if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) sig = 0; } if (valid_signal(sig) && sig) __group_send_sig_info(sig, &info, tsk->parent); __wake_up_parent(tsk, tsk->parent); spin_unlock_irqrestore(&psig->siglock, flags); return autoreap; } /** * do_notify_parent_cldstop - notify parent of stopped/continued state change * @tsk: task reporting the state change * @for_ptracer: the notification is for ptracer * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report * * Notify @tsk's parent that the stopped/continued state has changed. If * @for_ptracer is %false, @tsk's group leader notifies to its real parent. * If %true, @tsk reports to @tsk->parent which should be the ptracer. * * CONTEXT: * Must be called with tasklist_lock at least read locked. */ static void do_notify_parent_cldstop(struct task_struct *tsk, bool for_ptracer, int why) { struct siginfo info; unsigned long flags; struct task_struct *parent; struct sighand_struct *sighand; if (for_ptracer) { parent = tsk->parent; } else { tsk = tsk->group_leader; parent = tsk->real_parent; } info.si_signo = SIGCHLD; info.si_errno = 0; /* * see comment in do_notify_parent() about the following 4 lines */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); rcu_read_unlock(); info.si_utime = cputime_to_clock_t(tsk->utime); info.si_stime = cputime_to_clock_t(tsk->stime); info.si_code = why; switch (why) { case CLD_CONTINUED: info.si_status = SIGCONT; break; case CLD_STOPPED: info.si_status = tsk->signal->group_exit_code & 0x7f; break; case CLD_TRAPPED: info.si_status = tsk->exit_code & 0x7f; break; default: BUG(); } sighand = parent->sighand; spin_lock_irqsave(&sighand->siglock, flags); if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) __group_send_sig_info(SIGCHLD, &info, parent); /* * Even if SIGCHLD is not generated, we must wake up wait4 calls. */ __wake_up_parent(tsk, parent); spin_unlock_irqrestore(&sighand->siglock, flags); } static inline int may_ptrace_stop(void) { if (!likely(current->ptrace)) return 0; /* * Are we in the middle of do_coredump? * If so and our tracer is also part of the coredump stopping * is a deadlock situation, and pointless because our tracer * is dead so don't allow us to stop. * If SIGKILL was already sent before the caller unlocked * ->siglock we must see ->core_state != NULL. Otherwise it * is safe to enter schedule(). * * This is almost outdated, a task with the pending SIGKILL can't * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported * after SIGKILL was already dequeued. */ if (unlikely(current->mm->core_state) && unlikely(current->mm == current->parent->mm)) return 0; return 1; } /* * Return non-zero if there is a SIGKILL that should be waking us up. * Called with the siglock held. */ static int sigkill_pending(struct task_struct *tsk) { return sigismember(&tsk->pending.signal, SIGKILL) || sigismember(&tsk->signal->shared_pending.signal, SIGKILL); } /* * This must be called with current->sighand->siglock held. * * This should be the path for all ptrace stops. * We always set current->last_siginfo while stopped here. * That makes it a way to test a stopped process for * being ptrace-stopped vs being job-control-stopped. * * If we actually decide not to stop at all because the tracer * is gone, we keep current->exit_code unless clear_code. */ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) __releases(&current->sighand->siglock) __acquires(&current->sighand->siglock) { bool gstop_done = false; if (arch_ptrace_stop_needed(exit_code, info)) { /* * The arch code has something special to do before a * ptrace stop. This is allowed to block, e.g. for faults * on user stack pages. We can't keep the siglock while * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count. * Meanwhile, a SIGKILL could come in before we retake the * siglock. That must prevent us from sleeping in TASK_TRACED. * So after regaining the lock, we must check for SIGKILL. */ spin_unlock_irq(&current->sighand->siglock); arch_ptrace_stop(exit_code, info); spin_lock_irq(&current->sighand->siglock); if (sigkill_pending(current)) return; } /* * We're committing to trapping. TRACED should be visible before * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). * Also, transition to TRACED and updates to ->jobctl should be * atomic with respect to siglock and should be done after the arch * hook as siglock is released and regrabbed across it. */ set_current_state(TASK_TRACED); current->last_siginfo = info; current->exit_code = exit_code; /* * If @why is CLD_STOPPED, we're trapping to participate in a group * stop. Do the bookkeeping. Note that if SIGCONT was delievered * across siglock relocks since INTERRUPT was scheduled, PENDING * could be clear now. We act as if SIGCONT is received after * TASK_TRACED is entered - ignore it. */ if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) gstop_done = task_participate_group_stop(current); /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); /* entering a trap, clear TRAPPING */ task_clear_jobctl_trapping(current); spin_unlock_irq(&current->sighand->siglock); read_lock(&tasklist_lock); if (may_ptrace_stop()) { /* * Notify parents of the stop. * * While ptraced, there are two parents - the ptracer and * the real_parent of the group_leader. The ptracer should * know about every stop while the real parent is only * interested in the completion of group stop. The states * for the two don't interact with each other. Notify * separately unless they're gonna be duplicates. */ do_notify_parent_cldstop(current, true, why); if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); /* * Don't want to allow preemption here, because * sys_ptrace() needs this task to be inactive. * * XXX: implement read_unlock_no_resched(). */ preempt_disable(); read_unlock(&tasklist_lock); preempt_enable_no_resched(); freezable_schedule(); } else { /* * By the time we got the lock, our tracer went away. * Don't drop the lock yet, another tracer may come. * * If @gstop_done, the ptracer went away between group stop * completion and here. During detach, it would have set * JOBCTL_STOP_PENDING on us and we'll re-enter * TASK_STOPPED in do_signal_stop() on return, so notifying * the real parent of the group stop completion is enough. */ if (gstop_done) do_notify_parent_cldstop(current, false, why); /* tasklist protects us from ptrace_freeze_traced() */ __set_current_state(TASK_RUNNING); if (clear_code) current->exit_code = 0; read_unlock(&tasklist_lock); } /* * We are back. Now reacquire the siglock before touching * last_siginfo, so that we are sure to have synchronized with * any signal-sending on another CPU that wants to examine it. */ spin_lock_irq(&current->sighand->siglock); current->last_siginfo = NULL; /* LISTENING can be set only during STOP traps, clear it */ current->jobctl &= ~JOBCTL_LISTENING; /* * Queued signals ignored us while we were stopped for tracing. * So check for any that we should take before resuming user mode. * This sets TIF_SIGPENDING, but never clears it. */ recalc_sigpending_tsk(current); } static void ptrace_do_notify(int signr, int exit_code, int why) { siginfo_t info; memset(&info, 0, sizeof info); info.si_signo = signr; info.si_code = exit_code; info.si_pid = task_pid_vnr(current); info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); /* Let the debugger run. */ ptrace_stop(exit_code, why, 1, &info); } void ptrace_notify(int exit_code) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); if (unlikely(current->task_works)) task_work_run(); spin_lock_irq(&current->sighand->siglock); ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); spin_unlock_irq(&current->sighand->siglock); } /** * do_signal_stop - handle group stop for SIGSTOP and other stop signals * @signr: signr causing group stop if initiating * * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr * and participate in it. If already set, participate in the existing * group stop. If participated in a group stop (and thus slept), %true is * returned with siglock released. * * If ptraced, this function doesn't handle stop itself. Instead, * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock * untouched. The caller must ensure that INTERRUPT trap handling takes * places afterwards. * * CONTEXT: * Must be called with @current->sighand->siglock held, which is released * on %true return. * * RETURNS: * %false if group stop is already cancelled or ptrace trap is scheduled. * %true if participated in group stop. */ static bool do_signal_stop(int signr) __releases(&current->sighand->siglock) { struct signal_struct *sig = current->signal; if (!(current->jobctl & JOBCTL_STOP_PENDING)) { unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; struct task_struct *t; /* signr will be recorded in task->jobctl for retries */ WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || unlikely(signal_group_exit(sig))) return false; /* * There is no group stop already in progress. We must * initiate one now. * * While ptraced, a task may be resumed while group stop is * still in effect and then receive a stop signal and * initiate another group stop. This deviates from the * usual behavior as two consecutive stop signals can't * cause two group stops when !ptraced. That is why we * also check !task_is_stopped(t) below. * * The condition can be distinguished by testing whether * SIGNAL_STOP_STOPPED is already set. Don't generate * group_exit_code in such case. * * This is not necessary for SIGNAL_STOP_CONTINUED because * an intervening stop signal is required to cause two * continued events regardless of ptrace. */ if (!(sig->flags & SIGNAL_STOP_STOPPED)) sig->group_exit_code = signr; sig->group_stop_count = 0; if (task_set_jobctl_pending(current, signr | gstop)) sig->group_stop_count++; for (t = next_thread(current); t != current; t = next_thread(t)) { /* * Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, * so this check has no races. */ if (!task_is_stopped(t) && task_set_jobctl_pending(t, signr | gstop)) { sig->group_stop_count++; if (likely(!(t->ptrace & PT_SEIZED))) signal_wake_up(t, 0); else ptrace_trap_notify(t); } } } if (likely(!current->ptrace)) { int notify = 0; /* * If there are no other threads in the group, or if there * is a group stop in progress and we are the last to stop, * report to the parent. */ if (task_participate_group_stop(current)) notify = CLD_STOPPED; __set_current_state(TASK_STOPPED); spin_unlock_irq(&current->sighand->siglock); /* * Notify the parent of the group stop completion. Because * we're not holding either the siglock or tasklist_lock * here, ptracer may attach inbetween; however, this is for * group stop and should always be delivered to the real * parent of the group leader. The new ptracer will get * its notification when this task transitions into * TASK_TRACED. */ if (notify) { read_lock(&tasklist_lock); do_notify_parent_cldstop(current, false, notify); read_unlock(&tasklist_lock); } /* Now we don't run again until woken by SIGCONT or SIGKILL */ freezable_schedule(); return true; } else { /* * While ptraced, group stop is handled by STOP trap. * Schedule it and let the caller deal with it. */ task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); return false; } } /** * do_jobctl_trap - take care of ptrace jobctl traps * * When PT_SEIZED, it's used for both group stop and explicit * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with * accompanying siginfo. If stopped, lower eight bits of exit_code contain * the stop signal; otherwise, %SIGTRAP. * * When !PT_SEIZED, it's used only for group stop trap with stop signal * number as exit_code and no siginfo. * * CONTEXT: * Must be called with @current->sighand->siglock held, which may be * released and re-acquired before returning with intervening sleep. */ static void do_jobctl_trap(void) { struct signal_struct *signal = current->signal; int signr = current->jobctl & JOBCTL_STOP_SIGMASK; if (current->ptrace & PT_SEIZED) { if (!signal->group_stop_count && !(signal->flags & SIGNAL_STOP_STOPPED)) signr = SIGTRAP; WARN_ON_ONCE(!signr); ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), CLD_STOPPED); } else { WARN_ON_ONCE(!signr); ptrace_stop(signr, CLD_STOPPED, 0, NULL); current->exit_code = 0; } } static int ptrace_signal(int signr, siginfo_t *info) { ptrace_signal_deliver(); /* * We do not check sig_kernel_stop(signr) but set this marker * unconditionally because we do not know whether debugger will * change signr. This flag has no meaning unless we are going * to stop after return from ptrace_stop(). In this case it will * be checked in do_signal_stop(), we should only stop if it was * not cleared by SIGCONT while we were sleeping. See also the * comment in dequeue_signal(). */ current->jobctl |= JOBCTL_STOP_DEQUEUED; ptrace_stop(signr, CLD_TRAPPED, 0, info); /* We're back. Did the debugger cancel the sig? */ signr = current->exit_code; if (signr == 0) return signr; current->exit_code = 0; /* * Update the siginfo structure if the signal has * changed. If the debugger wanted something * specific in the siginfo structure then it should * have updated *info via PTRACE_SETSIGINFO. */ if (signr != info->si_signo) { info->si_signo = signr; info->si_errno = 0; info->si_code = SI_USER; rcu_read_lock(); info->si_pid = task_pid_vnr(current->parent); info->si_uid = from_kuid_munged(current_user_ns(), task_uid(current->parent)); rcu_read_unlock(); } /* If the (new) signal is now blocked, requeue it. */ if (sigismember(&current->blocked, signr)) { specific_send_sig_info(signr, info, current); signr = 0; } return signr; } int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie) { struct sighand_struct *sighand = current->sighand; struct signal_struct *signal = current->signal; int signr; if (unlikely(current->task_works)) task_work_run(); if (unlikely(uprobe_deny_signal())) return 0; /* * Do this once, we can't return to user-mode if freezing() == T. * do_signal_stop() and ptrace_stop() do freezable_schedule() and * thus do not need another check after return. */ try_to_freeze(); relock: spin_lock_irq(&sighand->siglock); /* * Every stopped thread goes here after wakeup. Check to see if * we should notify the parent, prepare_signal(SIGCONT) encodes * the CLD_ si_code into SIGNAL_CLD_MASK bits. */ if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { int why; if (signal->flags & SIGNAL_CLD_CONTINUED) why = CLD_CONTINUED; else why = CLD_STOPPED; signal->flags &= ~SIGNAL_CLD_MASK; spin_unlock_irq(&sighand->siglock); /* * Notify the parent that we're continuing. This event is * always per-process and doesn't make whole lot of sense * for ptracers, who shouldn't consume the state via * wait(2) either, but, for backward compatibility, notify * the ptracer of the group leader too unless it's gonna be * a duplicate. */ read_lock(&tasklist_lock); do_notify_parent_cldstop(current, false, why); if (ptrace_reparented(current->group_leader)) do_notify_parent_cldstop(current->group_leader, true, why); read_unlock(&tasklist_lock); goto relock; } for (;;) { struct k_sigaction *ka; if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && do_signal_stop(0)) goto relock; if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { do_jobctl_trap(); spin_unlock_irq(&sighand->siglock); goto relock; } signr = dequeue_signal(current, &current->blocked, info); if (!signr) break; /* will return 0 */ if (unlikely(current->ptrace) && signr != SIGKILL) { signr = ptrace_signal(signr, info); if (!signr) continue; } ka = &sighand->action[signr-1]; /* Trace actually delivered signals. */ trace_signal_deliver(signr, info, ka); if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ continue; if (ka->sa.sa_handler != SIG_DFL) { /* Run the handler. */ *return_ka = *ka; if (ka->sa.sa_flags & SA_ONESHOT) ka->sa.sa_handler = SIG_DFL; break; /* will return non-zero "signr" value */ } /* * Now we are doing the default action for this signal. */ if (sig_kernel_ignore(signr)) /* Default is nothing. */ continue; /* * Global init gets no signals it doesn't want. * Container-init gets no signals it doesn't want from same * container. * * Note that if global/container-init sees a sig_kernel_only() * signal here, the signal must have been generated internally * or must have come from an ancestor namespace. In either * case, the signal cannot be dropped. */ if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && !sig_kernel_only(signr)) continue; if (sig_kernel_stop(signr)) { /* * The default action is to stop all threads in * the thread group. The job control signals * do nothing in an orphaned pgrp, but SIGSTOP * always works. Note that siglock needs to be * dropped during the call to is_orphaned_pgrp() * because of lock ordering with tasklist_lock. * This allows an intervening SIGCONT to be posted. * We need to check for that and bail out if necessary. */ if (signr != SIGSTOP) { spin_unlock_irq(&sighand->siglock); /* signals can be posted during this window */ if (is_current_pgrp_orphaned()) goto relock; spin_lock_irq(&sighand->siglock); } if (likely(do_signal_stop(info->si_signo))) { /* It released the siglock. */ goto relock; } /* * We didn't actually stop, due to a race * with SIGCONT or something like that. */ continue; } spin_unlock_irq(&sighand->siglock); /* * Anything else is fatal, maybe with a core dump. */ current->flags |= PF_SIGNALED; if (sig_kernel_coredump(signr)) { if (print_fatal_signals) print_fatal_signal(info->si_signo); /* * If it was able to dump core, this kills all * other threads in the group and synchronizes with * their demise. If we lost the race with another * thread getting here, it set group_exit_code * first and our do_group_exit call below will use * that value and ignore the one we pass it. */ do_coredump(info); } /* * Death signals, no core dump. */ do_group_exit(info->si_signo); /* NOTREACHED */ } spin_unlock_irq(&sighand->siglock); return signr; } /** * signal_delivered - * @sig: number of signal being delivered * @info: siginfo_t of signal being delivered * @ka: sigaction setting that chose the handler * @regs: user register state * @stepping: nonzero if debugger single-step or block-step in use * * This function should be called when a signal has succesfully been * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask * is always blocked, and the signal itself is blocked unless %SA_NODEFER * is set in @ka->sa.sa_flags. Tracing is notified. */ void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping) { sigset_t blocked; /* A signal was successfully delivered, and the saved sigmask was stored on the signal frame, and will be restored by sigreturn. So we can simply clear the restore sigmask flag. */ clear_restore_sigmask(); sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&blocked, sig); set_current_blocked(&blocked); tracehook_signal_handler(sig, info, ka, regs, stepping); } /* * It could be that complete_signal() picked us to notify about the * group-wide signal. Other threads should be notified now to take * the shared signals in @which since we will not. */ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) { sigset_t retarget; struct task_struct *t; sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); if (sigisemptyset(&retarget)) return; t = tsk; while_each_thread(tsk, t) { if (t->flags & PF_EXITING) continue; if (!has_pending_signals(&retarget, &t->blocked)) continue; /* Remove the signals this thread can handle. */ sigandsets(&retarget, &retarget, &t->blocked); if (!signal_pending(t)) signal_wake_up(t, 0); if (sigisemptyset(&retarget)) break; } } void exit_signals(struct task_struct *tsk) { int group_stop = 0; sigset_t unblocked; /* * @tsk is about to have PF_EXITING set - lock out users which * expect stable threadgroup. */ threadgroup_change_begin(tsk); if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { tsk->flags |= PF_EXITING; threadgroup_change_end(tsk); return; } spin_lock_irq(&tsk->sighand->siglock); /* * From now this task is not visible for group-wide signals, * see wants_signal(), do_signal_stop(). */ tsk->flags |= PF_EXITING; threadgroup_change_end(tsk); if (!signal_pending(tsk)) goto out; unblocked = tsk->blocked; signotset(&unblocked); retarget_shared_pending(tsk, &unblocked); if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && task_participate_group_stop(tsk)) group_stop = CLD_STOPPED; out: spin_unlock_irq(&tsk->sighand->siglock); /* * If group stop has completed, deliver the notification. This * should always go to the real parent of the group leader. */ if (unlikely(group_stop)) { read_lock(&tasklist_lock); do_notify_parent_cldstop(tsk, false, group_stop); read_unlock(&tasklist_lock); } } EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); EXPORT_SYMBOL(force_sig); EXPORT_SYMBOL(send_sig); EXPORT_SYMBOL(send_sig_info); EXPORT_SYMBOL(sigprocmask); EXPORT_SYMBOL(block_all_signals); EXPORT_SYMBOL(unblock_all_signals); /* * System call entry points. */ /** * sys_restart_syscall - restart a system call */ SYSCALL_DEFINE0(restart_syscall) { struct restart_block *restart = &current_thread_info()->restart_block; return restart->fn(restart); } long do_no_restart_syscall(struct restart_block *param) { return -EINTR; } static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) { if (signal_pending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ sigandnsets(&newblocked, newset, &current->blocked); retarget_shared_pending(tsk, &newblocked); } tsk->blocked = *newset; recalc_sigpending(); } /** * set_current_blocked - change current->blocked mask * @newset: new mask * * It is wrong to change ->blocked directly, this helper should be used * to ensure the process can't miss a shared signal we are going to block. */ void set_current_blocked(sigset_t *newset) { sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); __set_current_blocked(newset); } void __set_current_blocked(const sigset_t *newset) { struct task_struct *tsk = current; spin_lock_irq(&tsk->sighand->siglock); __set_task_blocked(tsk, newset); spin_unlock_irq(&tsk->sighand->siglock); } /* * This is also useful for kernel threads that want to temporarily * (or permanently) block certain signals. * * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel * interface happily blocks "unblockable" signals like SIGKILL * and friends. */ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) { struct task_struct *tsk = current; sigset_t newset; /* Lockless, only current can change ->blocked, never from irq */ if (oldset) *oldset = tsk->blocked; switch (how) { case SIG_BLOCK: sigorsets(&newset, &tsk->blocked, set); break; case SIG_UNBLOCK: sigandnsets(&newset, &tsk->blocked, set); break; case SIG_SETMASK: newset = *set; break; default: return -EINVAL; } __set_current_blocked(&newset); return 0; } /** * sys_rt_sigprocmask - change the list of currently blocked signals * @how: whether to add, remove, or set signals * @nset: stores pending signals * @oset: previous value of signal mask if non-null * @sigsetsize: size of sigset_t type */ SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, sigset_t __user *, oset, size_t, sigsetsize) { sigset_t old_set, new_set; int error; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; old_set = current->blocked; if (nset) { if (copy_from_user(&new_set, nset, sizeof(sigset_t))) return -EFAULT; sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); error = sigprocmask(how, &new_set, NULL); if (error) return error; } if (oset) { if (copy_to_user(oset, &old_set, sizeof(sigset_t))) return -EFAULT; } return 0; } long do_sigpending(void __user *set, unsigned long sigsetsize) { long error = -EINVAL; sigset_t pending; if (sigsetsize > sizeof(sigset_t)) goto out; spin_lock_irq(&current->sighand->siglock); sigorsets(&pending, &current->pending.signal, &current->signal->shared_pending.signal); spin_unlock_irq(&current->sighand->siglock); /* Outside the lock because only this thread touches it. */ sigandsets(&pending, &current->blocked, &pending); error = -EFAULT; if (!copy_to_user(set, &pending, sigsetsize)) error = 0; out: return error; } /** * sys_rt_sigpending - examine a pending signal that has been raised * while blocked * @set: stores pending signals * @sigsetsize: size of sigset_t type or larger */ SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) { return do_sigpending(set, sigsetsize); } #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) { int err; if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; if (from->si_code < 0) return __copy_to_user(to, from, sizeof(siginfo_t)) ? -EFAULT : 0; /* * If you change siginfo_t structure, please be sure * this code is fixed accordingly. * Please remember to update the signalfd_copyinfo() function * inside fs/signalfd.c too, in case siginfo_t changes. * It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); switch (from->si_code & __SI_MASK) { case __SI_KILL: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_FAULT: err |= __put_user(from->si_addr, &to->si_addr); #ifdef __ARCH_SI_TRAPNO err |= __put_user(from->si_trapno, &to->si_trapno); #endif #ifdef BUS_MCEERR_AO /* * Other callers might not initialize the si_lsb field, * so check explicitly for the right codes here. */ if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif break; case __SI_CHLD: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_status, &to->si_status); err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); break; case __SI_RT: /* This is not generated by the kernel as of now. */ case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_ptr, &to->si_ptr); break; #ifdef __ARCH_SIGSYS case __SI_SYS: err |= __put_user(from->si_call_addr, &to->si_call_addr); err |= __put_user(from->si_syscall, &to->si_syscall); err |= __put_user(from->si_arch, &to->si_arch); break; #endif default: /* this is just in case for now ... */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; } return err; } #endif /** * do_sigtimedwait - wait for queued signals specified in @which * @which: queued signals to wait for * @info: if non-null, the signal's siginfo is returned here * @ts: upper bound on process time suspension */ int do_sigtimedwait(const sigset_t *which, siginfo_t *info, const struct timespec *ts) { struct task_struct *tsk = current; long timeout = MAX_SCHEDULE_TIMEOUT; sigset_t mask = *which; int sig; if (ts) { if (!timespec_valid(ts)) return -EINVAL; timeout = timespec_to_jiffies(ts); /* * We can be close to the next tick, add another one * to ensure we will wait at least the time asked for. */ if (ts->tv_sec || ts->tv_nsec) timeout++; } /* * Invert the set of allowed signals to get those we want to block. */ sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); signotset(&mask); spin_lock_irq(&tsk->sighand->siglock); sig = dequeue_signal(tsk, &mask, info); if (!sig && timeout) { /* * None ready, temporarily unblock those we're interested * while we are sleeping in so that we'll be awakened when * they arrive. Unblocking is always fine, we can avoid * set_current_blocked(). */ tsk->real_blocked = tsk->blocked; sigandsets(&tsk->blocked, &tsk->blocked, &mask); recalc_sigpending(); spin_unlock_irq(&tsk->sighand->siglock); timeout = schedule_timeout_interruptible(timeout); spin_lock_irq(&tsk->sighand->siglock); __set_task_blocked(tsk, &tsk->real_blocked); siginitset(&tsk->real_blocked, 0); sig = dequeue_signal(tsk, &mask, info); } spin_unlock_irq(&tsk->sighand->siglock); if (sig) return sig; return timeout ? -EINTR : -EAGAIN; } /** * sys_rt_sigtimedwait - synchronously wait for queued signals specified * in @uthese * @uthese: queued signals to wait for * @uinfo: if non-null, the signal's siginfo is returned here * @uts: upper bound on process time suspension * @sigsetsize: size of sigset_t type */ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, siginfo_t __user *, uinfo, const struct timespec __user *, uts, size_t, sigsetsize) { sigset_t these; struct timespec ts; siginfo_t info; int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&these, uthese, sizeof(these))) return -EFAULT; if (uts) { if (copy_from_user(&ts, uts, sizeof(ts))) return -EFAULT; } ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); if (ret > 0 && uinfo) { if (copy_siginfo_to_user(uinfo, &info)) ret = -EFAULT; } return ret; } /** * sys_kill - send a signal to a process * @pid: the PID of the process * @sig: signal to be sent */ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct siginfo info; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; info.si_pid = task_tgid_vnr(current); info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); return kill_something_info(sig, &info, pid); } static int do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) { struct task_struct *p; int error = -ESRCH; rcu_read_lock(); p = find_task_by_vpid(pid); if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { error = check_kill_permission(sig, info, p); /* * The null signal is a permissions and process existence * probe. No signal is actually delivered. */ if (!error && sig) { error = do_send_sig_info(sig, info, p, false); /* * If lock_task_sighand() failed we pretend the task * dies after receiving the signal. The window is tiny, * and the signal is private anyway. */ if (unlikely(error == -ESRCH)) error = 0; } } rcu_read_unlock(); return error; } static int do_tkill(pid_t tgid, pid_t pid, int sig) { struct siginfo info; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_TKILL; info.si_pid = task_tgid_vnr(current); info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); return do_send_specific(tgid, pid, sig, &info); } /** * sys_tgkill - send signal to one specific thread * @tgid: the thread group ID of the thread * @pid: the PID of the thread * @sig: signal to be sent * * This syscall also checks the @tgid and returns -ESRCH even if the PID * exists but it's not belonging to the target process anymore. This * method solves the problem of threads exiting and PIDs getting reused. */ SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) { /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) return -EINVAL; return do_tkill(tgid, pid, sig); } /** * sys_tkill - send signal to one specific task * @pid: the PID of the task * @sig: signal to be sent * * Send a signal to only one task, even if it's a CLONE_THREAD task. */ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) { /* This is only valid for single tasks */ if (pid <= 0) return -EINVAL; return do_tkill(0, pid, sig); } /** * sys_rt_sigqueueinfo - send signal information to a signal * @pid: the PID of the thread * @sig: signal to be sent * @uinfo: signal info to be sent */ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { siginfo_t info; if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) return -EFAULT; /* Not even root can pretend to send signals from the kernel. * Nor can they impersonate a kill()/tgkill(), which adds source info. */ if (info.si_code >= 0 || info.si_code == SI_TKILL) { /* We used to allow any < 0 si_code */ WARN_ON_ONCE(info.si_code < 0); return -EPERM; } info.si_signo = sig; /* POSIX.1b doesn't mention process groups. */ return kill_proc_info(sig, &info, pid); } long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) { /* This is only valid for single tasks */ if (pid <= 0 || tgid <= 0) return -EINVAL; /* Not even root can pretend to send signals from the kernel. * Nor can they impersonate a kill()/tgkill(), which adds source info. */ if (info->si_code >= 0 || info->si_code == SI_TKILL) { /* We used to allow any < 0 si_code */ WARN_ON_ONCE(info->si_code < 0); return -EPERM; } info->si_signo = sig; return do_send_specific(tgid, pid, sig, info); } SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { siginfo_t info; if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) return -EFAULT; return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); } int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { struct task_struct *t = current; struct k_sigaction *k; sigset_t mask; if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) return -EINVAL; k = &t->sighand->action[sig-1]; spin_lock_irq(&current->sighand->siglock); if (oact) *oact = *k; if (act) { sigdelsetmask(&act->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); *k = *act; /* * POSIX 3.3.1.3: * "Setting a signal action to SIG_IGN for a signal that is * pending shall cause the pending signal to be discarded, * whether or not it is blocked." * * "Setting a signal action to SIG_DFL for a signal that is * pending and whose default action is to ignore the signal * (for example, SIGCHLD), shall cause the pending signal to * be discarded, whether or not it is blocked" */ if (sig_handler_ignored(sig_handler(t, sig), sig)) { sigemptyset(&mask); sigaddset(&mask, sig); rm_from_queue_full(&mask, &t->signal->shared_pending); do { rm_from_queue_full(&mask, &t->pending); t = next_thread(t); } while (t != current); } } spin_unlock_irq(&current->sighand->siglock); return 0; } int do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) { stack_t oss; int error; oss.ss_sp = (void __user *) current->sas_ss_sp; oss.ss_size = current->sas_ss_size; oss.ss_flags = sas_ss_flags(sp); if (uss) { void __user *ss_sp; size_t ss_size; int ss_flags; error = -EFAULT; if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) goto out; error = __get_user(ss_sp, &uss->ss_sp) | __get_user(ss_flags, &uss->ss_flags) | __get_user(ss_size, &uss->ss_size); if (error) goto out; error = -EPERM; if (on_sig_stack(sp)) goto out; error = -EINVAL; /* * Note - this code used to test ss_flags incorrectly: * old code may have been written using ss_flags==0 * to mean ss_flags==SS_ONSTACK (as this was the only * way that worked) - this fix preserves that older * mechanism. */ if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) goto out; if (ss_flags == SS_DISABLE) { ss_size = 0; ss_sp = NULL; } else { error = -ENOMEM; if (ss_size < MINSIGSTKSZ) goto out; } current->sas_ss_sp = (unsigned long) ss_sp; current->sas_ss_size = ss_size; } error = 0; if (uoss) { error = -EFAULT; if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) goto out; error = __put_user(oss.ss_sp, &uoss->ss_sp) | __put_user(oss.ss_size, &uoss->ss_size) | __put_user(oss.ss_flags, &uoss->ss_flags); } out: return error; } #ifdef CONFIG_GENERIC_SIGALTSTACK SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) { return do_sigaltstack(uss, uoss, current_user_stack_pointer()); } #endif int restore_altstack(const stack_t __user *uss) { int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); /* squash all but EFAULT for now */ return err == -EFAULT ? err : 0; } int __save_altstack(stack_t __user *uss, unsigned long sp) { struct task_struct *t = current; return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | __put_user(sas_ss_flags(sp), &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); } #ifdef CONFIG_COMPAT #ifdef CONFIG_GENERIC_SIGALTSTACK COMPAT_SYSCALL_DEFINE2(sigaltstack, const compat_stack_t __user *, uss_ptr, compat_stack_t __user *, uoss_ptr) { stack_t uss, uoss; int ret; mm_segment_t seg; if (uss_ptr) { compat_stack_t uss32; memset(&uss, 0, sizeof(stack_t)); if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) return -EFAULT; uss.ss_sp = compat_ptr(uss32.ss_sp); uss.ss_flags = uss32.ss_flags; uss.ss_size = uss32.ss_size; } seg = get_fs(); set_fs(KERNEL_DS); ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), (stack_t __force __user *) &uoss, compat_user_stack_pointer()); set_fs(seg); if (ret >= 0 && uoss_ptr) { if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || __put_user(uoss.ss_size, &uoss_ptr->ss_size)) ret = -EFAULT; } return ret; } int compat_restore_altstack(const compat_stack_t __user *uss) { int err = compat_sys_sigaltstack(uss, NULL); /* squash all but -EFAULT for now */ return err == -EFAULT ? err : 0; } int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) { struct task_struct *t = current; return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | __put_user(sas_ss_flags(sp), &uss->ss_flags) | __put_user(t->sas_ss_size, &uss->ss_size); } #endif #endif #ifdef __ARCH_WANT_SYS_SIGPENDING /** * sys_sigpending - examine pending signals * @set: where mask of pending signal is returned */ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) { return do_sigpending(set, sizeof(*set)); } #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK /** * sys_sigprocmask - examine and change blocked signals * @how: whether to add, remove, or set signals * @nset: signals to add or remove (if non-null) * @oset: previous value of signal mask if non-null * * Some platforms have their own version with special arguments; * others support only sys_rt_sigprocmask. */ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, old_sigset_t __user *, oset) { old_sigset_t old_set, new_set; sigset_t new_blocked; old_set = current->blocked.sig[0]; if (nset) { if (copy_from_user(&new_set, nset, sizeof(*nset))) return -EFAULT; new_blocked = current->blocked; switch (how) { case SIG_BLOCK: sigaddsetmask(&new_blocked, new_set); break; case SIG_UNBLOCK: sigdelsetmask(&new_blocked, new_set); break; case SIG_SETMASK: new_blocked.sig[0] = new_set; break; default: return -EINVAL; } set_current_blocked(&new_blocked); } if (oset) { if (copy_to_user(oset, &old_set, sizeof(*oset))) return -EFAULT; } return 0; } #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ #ifdef __ARCH_WANT_SYS_RT_SIGACTION /** * sys_rt_sigaction - alter an action taken by a process * @sig: signal to be sent * @act: new sigaction * @oact: used to save the previous sigaction * @sigsetsize: size of sigset_t type */ SYSCALL_DEFINE4(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, size_t, sigsetsize) { struct k_sigaction new_sa, old_sa; int ret = -EINVAL; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) goto out; if (act) { if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) return -EFAULT; } ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); if (!ret && oact) { if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) return -EFAULT; } out: return ret; } #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ #ifdef __ARCH_WANT_SYS_SGETMASK /* * For backwards compatibility. Functionality superseded by sigprocmask. */ SYSCALL_DEFINE0(sgetmask) { /* SMP safe */ return current->blocked.sig[0]; } SYSCALL_DEFINE1(ssetmask, int, newmask) { int old = current->blocked.sig[0]; sigset_t newset; siginitset(&newset, newmask); set_current_blocked(&newset); return old; } #endif /* __ARCH_WANT_SGETMASK */ #ifdef __ARCH_WANT_SYS_SIGNAL /* * For backwards compatibility. Functionality superseded by sigaction. */ SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) { struct k_sigaction new_sa, old_sa; int ret; new_sa.sa.sa_handler = handler; new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; sigemptyset(&new_sa.sa.sa_mask); ret = do_sigaction(sig, &new_sa, &old_sa); return ret ? ret : (unsigned long)old_sa.sa.sa_handler; } #endif /* __ARCH_WANT_SYS_SIGNAL */ #ifdef __ARCH_WANT_SYS_PAUSE SYSCALL_DEFINE0(pause) { while (!signal_pending(current)) { current->state = TASK_INTERRUPTIBLE; schedule(); } return -ERESTARTNOHAND; } #endif int sigsuspend(sigset_t *set) { current->saved_sigmask = current->blocked; set_current_blocked(set); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND /** * sys_rt_sigsuspend - replace the signal mask for a value with the * @unewset value until a signal is received * @unewset: new signal mask value * @sigsetsize: size of sigset_t type */ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) { sigset_t newset; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; return sigsuspend(&newset); } #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) { return NULL; } void __init signals_init(void) { sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); } #ifdef CONFIG_KGDB_KDB #include <linux/kdb.h> /* * kdb_send_sig_info - Allows kdb to send signals without exposing * signal internals. This function checks if the required locks are * available before calling the main signal code, to avoid kdb * deadlocks. */ void kdb_send_sig_info(struct task_struct *t, struct siginfo *info) { static struct task_struct *kdb_prev_t; int sig, new_t; if (!spin_trylock(&t->sighand->siglock)) { kdb_printf("Can't do kill command now.\n" "The sigmask lock is held somewhere else in " "kernel, try again later\n"); return; } spin_unlock(&t->sighand->siglock); new_t = kdb_prev_t != t; kdb_prev_t = t; if (t->state != TASK_RUNNING && new_t) { kdb_printf("Process is not RUNNING, sending a signal from " "kdb risks deadlock\n" "on the run queue locks. " "The signal has _not_ been sent.\n" "Reissue the kill command if you want to risk " "the deadlock.\n"); return; } sig = info->si_signo; if (send_sig_info(sig, info, t)) kdb_printf("Fail to deliver Signal %d to process %d.\n", sig, t->pid); else kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); } #endif /* CONFIG_KGDB_KDB */
./CrossVul/dataset_final_sorted/CWE-362/c/good_5572_2
crossvul-cpp_data_bad_3026_0
/* * fs/f2fs/node.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/mpage.h> #include <linux/backing-dev.h> #include <linux/blkdev.h> #include <linux/pagevec.h> #include <linux/swap.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "trace.h" #include <trace/events/f2fs.h> #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) static struct kmem_cache *nat_entry_slab; static struct kmem_cache *free_nid_slab; static struct kmem_cache *nat_entry_set_slab; bool available_free_memory(struct f2fs_sb_info *sbi, int type) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct sysinfo val; unsigned long avail_ram; unsigned long mem_size = 0; bool res = false; si_meminfo(&val); /* only uses low memory */ avail_ram = val.totalram - val.totalhigh; /* * give 25%, 25%, 50%, 50%, 50% memory for each components respectively */ if (type == FREE_NIDS) { mem_size = (nm_i->nid_cnt[FREE_NID_LIST] * sizeof(struct free_nid)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); if (excess_cached_nats(sbi)) res = false; } else if (type == DIRTY_DENTS) { if (sbi->sb->s_bdi->wb.dirty_exceeded) return false; mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else if (type == INO_ENTRIES) { int i; for (i = 0; i <= UPDATE_INO; i++) mem_size += sbi->im[i].ino_num * sizeof(struct ino_entry); mem_size >>= PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else if (type == EXTENT_CACHE) { mem_size = (atomic_read(&sbi->total_ext_tree) * sizeof(struct extent_tree) + atomic_read(&sbi->total_ext_node) * sizeof(struct extent_node)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) return true; } return res; } static void clear_node_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; unsigned int long flags; if (PageDirty(page)) { spin_lock_irqsave(&mapping->tree_lock, flags); radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); spin_unlock_irqrestore(&mapping->tree_lock, flags); clear_page_dirty_for_io(page); dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); } ClearPageUptodate(page); } static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) { pgoff_t index = current_nat_addr(sbi, nid); return get_meta_page(sbi, index); } static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) { struct page *src_page; struct page *dst_page; pgoff_t src_off; pgoff_t dst_off; void *src_addr; void *dst_addr; struct f2fs_nm_info *nm_i = NM_I(sbi); src_off = current_nat_addr(sbi, nid); dst_off = next_nat_addr(sbi, src_off); /* get current nat block page with lock */ src_page = get_meta_page(sbi, src_off); dst_page = grab_meta_page(sbi, dst_off); f2fs_bug_on(sbi, PageDirty(src_page)); src_addr = page_address(src_page); dst_addr = page_address(dst_page); memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); set_to_next_nat(nm_i, nid); return dst_page; } static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) { return radix_tree_lookup(&nm_i->nat_root, n); } static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t start, unsigned int nr, struct nat_entry **ep) { return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); } static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) { list_del(&e->list); radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); nm_i->nat_cnt--; kmem_cache_free(nat_entry_slab, e); } static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, struct nat_entry *ne) { nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); struct nat_entry_set *head; if (get_nat_flag(ne, IS_DIRTY)) return; head = radix_tree_lookup(&nm_i->nat_set_root, set); if (!head) { head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); INIT_LIST_HEAD(&head->entry_list); INIT_LIST_HEAD(&head->set_list); head->set = set; head->entry_cnt = 0; f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); } list_move_tail(&ne->list, &head->entry_list); nm_i->dirty_nat_cnt++; head->entry_cnt++; set_nat_flag(ne, IS_DIRTY, true); } static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, struct nat_entry_set *set, struct nat_entry *ne) { list_move_tail(&ne->list, &nm_i->nat_entries); set_nat_flag(ne, IS_DIRTY, false); set->entry_cnt--; nm_i->dirty_nat_cnt--; } static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, nid_t start, unsigned int nr, struct nat_entry_set **ep) { return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, start, nr); } int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; bool need = false; down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e) { if (!get_nat_flag(e, IS_CHECKPOINTED) && !get_nat_flag(e, HAS_FSYNCED_INODE)) need = true; } up_read(&nm_i->nat_tree_lock); return need; } bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; bool is_cp = true; down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e && !get_nat_flag(e, IS_CHECKPOINTED)) is_cp = false; up_read(&nm_i->nat_tree_lock); return is_cp; } bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; bool need_update = true; down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ino); if (e && get_nat_flag(e, HAS_LAST_FSYNC) && (get_nat_flag(e, IS_CHECKPOINTED) || get_nat_flag(e, HAS_FSYNCED_INODE))) need_update = false; up_read(&nm_i->nat_tree_lock); return need_update; } static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, bool no_fail) { struct nat_entry *new; if (no_fail) { new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); } else { new = kmem_cache_alloc(nat_entry_slab, GFP_NOFS); if (!new) return NULL; if (radix_tree_insert(&nm_i->nat_root, nid, new)) { kmem_cache_free(nat_entry_slab, new); return NULL; } } memset(new, 0, sizeof(struct nat_entry)); nat_set_nid(new, nid); nat_reset_flag(new); list_add_tail(&new->list, &nm_i->nat_entries); nm_i->nat_cnt++; return new; } static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, struct f2fs_nat_entry *ne) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; e = __lookup_nat_cache(nm_i, nid); if (!e) { e = grab_nat_entry(nm_i, nid, false); if (e) node_info_from_raw_nat(&e->ni, ne); } else { f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || nat_get_blkaddr(e) != le32_to_cpu(ne->block_addr) || nat_get_version(e) != ne->version); } } static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, block_t new_blkaddr, bool fsync_done) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *e; down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ni->nid); if (!e) { e = grab_nat_entry(nm_i, ni->nid, true); copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); } else if (new_blkaddr == NEW_ADDR) { /* * when nid is reallocated, * previous nat entry can be remained in nat cache. * So, reinitialize it with new information. */ copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); } /* sanity check */ f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && new_blkaddr == NULL_ADDR); f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && new_blkaddr == NEW_ADDR); f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR && nat_get_blkaddr(e) != NULL_ADDR && new_blkaddr == NEW_ADDR); /* increment version no as node is removed */ if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { unsigned char version = nat_get_version(e); nat_set_version(e, inc_node_version(version)); /* in order to reuse the nid */ if (nm_i->next_scan_nid > ni->nid) nm_i->next_scan_nid = ni->nid; } /* change address */ nat_set_blkaddr(e, new_blkaddr); if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR) set_nat_flag(e, IS_CHECKPOINTED, false); __set_nat_cache_dirty(nm_i, e); /* update fsync_mark if its inode nat entry is still alive */ if (ni->nid != ni->ino) e = __lookup_nat_cache(nm_i, ni->ino); if (e) { if (fsync_done && ni->nid == ni->ino) set_nat_flag(e, HAS_FSYNCED_INODE, true); set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); } up_write(&nm_i->nat_tree_lock); } int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) { struct f2fs_nm_info *nm_i = NM_I(sbi); int nr = nr_shrink; if (!down_write_trylock(&nm_i->nat_tree_lock)) return 0; while (nr_shrink && !list_empty(&nm_i->nat_entries)) { struct nat_entry *ne; ne = list_first_entry(&nm_i->nat_entries, struct nat_entry, list); __del_from_nat_cache(nm_i, ne); nr_shrink--; } up_write(&nm_i->nat_tree_lock); return nr - nr_shrink; } /* * This function always returns success */ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; nid_t start_nid = START_NID(nid); struct f2fs_nat_block *nat_blk; struct page *page = NULL; struct f2fs_nat_entry ne; struct nat_entry *e; int i; ni->nid = nid; /* Check nat cache */ down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e) { ni->ino = nat_get_ino(e); ni->blk_addr = nat_get_blkaddr(e); ni->version = nat_get_version(e); up_read(&nm_i->nat_tree_lock); return; } memset(&ne, 0, sizeof(struct f2fs_nat_entry)); /* Check current segment summary */ down_read(&curseg->journal_rwsem); i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); if (i >= 0) { ne = nat_in_journal(journal, i); node_info_from_raw_nat(ni, &ne); } up_read(&curseg->journal_rwsem); if (i >= 0) goto cache; /* Fill node_info from nat page */ page = get_current_nat_page(sbi, start_nid); nat_blk = (struct f2fs_nat_block *)page_address(page); ne = nat_blk->entries[nid - start_nid]; node_info_from_raw_nat(ni, &ne); f2fs_put_page(page, 1); cache: up_read(&nm_i->nat_tree_lock); /* cache nat entry */ down_write(&nm_i->nat_tree_lock); cache_nat_entry(sbi, nid, &ne); up_write(&nm_i->nat_tree_lock); } /* * readahead MAX_RA_NODE number of node pages. */ static void ra_node_pages(struct page *parent, int start, int n) { struct f2fs_sb_info *sbi = F2FS_P_SB(parent); struct blk_plug plug; int i, end; nid_t nid; blk_start_plug(&plug); /* Then, try readahead for siblings of the desired node */ end = start + n; end = min(end, NIDS_PER_BLOCK); for (i = start; i < end; i++) { nid = get_nid(parent, i, false); ra_node_page(sbi, nid); } blk_finish_plug(&plug); } pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) { const long direct_index = ADDRS_PER_INODE(dn->inode); const long direct_blks = ADDRS_PER_BLOCK; const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; unsigned int skipped_unit = ADDRS_PER_BLOCK; int cur_level = dn->cur_level; int max_level = dn->max_level; pgoff_t base = 0; if (!dn->max_level) return pgofs + 1; while (max_level-- > cur_level) skipped_unit *= NIDS_PER_BLOCK; switch (dn->max_level) { case 3: base += 2 * indirect_blks; case 2: base += 2 * direct_blks; case 1: base += direct_index; break; default: f2fs_bug_on(F2FS_I_SB(dn->inode), 1); } return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; } /* * The maximum depth is four. * Offset[0] will have raw inode offset. */ static int get_node_path(struct inode *inode, long block, int offset[4], unsigned int noffset[4]) { const long direct_index = ADDRS_PER_INODE(inode); const long direct_blks = ADDRS_PER_BLOCK; const long dptrs_per_blk = NIDS_PER_BLOCK; const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; int n = 0; int level = 0; noffset[0] = 0; if (block < direct_index) { offset[n] = block; goto got; } block -= direct_index; if (block < direct_blks) { offset[n++] = NODE_DIR1_BLOCK; noffset[n] = 1; offset[n] = block; level = 1; goto got; } block -= direct_blks; if (block < direct_blks) { offset[n++] = NODE_DIR2_BLOCK; noffset[n] = 2; offset[n] = block; level = 1; goto got; } block -= direct_blks; if (block < indirect_blks) { offset[n++] = NODE_IND1_BLOCK; noffset[n] = 3; offset[n++] = block / direct_blks; noffset[n] = 4 + offset[n - 1]; offset[n] = block % direct_blks; level = 2; goto got; } block -= indirect_blks; if (block < indirect_blks) { offset[n++] = NODE_IND2_BLOCK; noffset[n] = 4 + dptrs_per_blk; offset[n++] = block / direct_blks; noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; offset[n] = block % direct_blks; level = 2; goto got; } block -= indirect_blks; if (block < dindirect_blks) { offset[n++] = NODE_DIND_BLOCK; noffset[n] = 5 + (dptrs_per_blk * 2); offset[n++] = block / indirect_blks; noffset[n] = 6 + (dptrs_per_blk * 2) + offset[n - 1] * (dptrs_per_blk + 1); offset[n++] = (block / direct_blks) % dptrs_per_blk; noffset[n] = 7 + (dptrs_per_blk * 2) + offset[n - 2] * (dptrs_per_blk + 1) + offset[n - 1]; offset[n] = block % direct_blks; level = 3; goto got; } else { BUG(); } got: return level; } /* * Caller should call f2fs_put_dnode(dn). * Also, it should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op() only if ro is not set RDONLY_NODE. * In the case of RDONLY_NODE, we don't need to care about mutex. */ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct page *npage[4]; struct page *parent = NULL; int offset[4]; unsigned int noffset[4]; nid_t nids[4]; int level, i = 0; int err = 0; level = get_node_path(dn->inode, index, offset, noffset); nids[0] = dn->inode->i_ino; npage[0] = dn->inode_page; if (!npage[0]) { npage[0] = get_node_page(sbi, nids[0]); if (IS_ERR(npage[0])) return PTR_ERR(npage[0]); } /* if inline_data is set, should not report any block indices */ if (f2fs_has_inline_data(dn->inode) && index) { err = -ENOENT; f2fs_put_page(npage[0], 1); goto release_out; } parent = npage[0]; if (level != 0) nids[1] = get_nid(parent, offset[0], true); dn->inode_page = npage[0]; dn->inode_page_locked = true; /* get indirect or direct nodes */ for (i = 1; i <= level; i++) { bool done = false; if (!nids[i] && mode == ALLOC_NODE) { /* alloc new node */ if (!alloc_nid(sbi, &(nids[i]))) { err = -ENOSPC; goto release_pages; } dn->nid = nids[i]; npage[i] = new_node_page(dn, noffset[i], NULL); if (IS_ERR(npage[i])) { alloc_nid_failed(sbi, nids[i]); err = PTR_ERR(npage[i]); goto release_pages; } set_nid(parent, offset[i - 1], nids[i], i == 1); alloc_nid_done(sbi, nids[i]); done = true; } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { npage[i] = get_node_page_ra(parent, offset[i - 1]); if (IS_ERR(npage[i])) { err = PTR_ERR(npage[i]); goto release_pages; } done = true; } if (i == 1) { dn->inode_page_locked = false; unlock_page(parent); } else { f2fs_put_page(parent, 1); } if (!done) { npage[i] = get_node_page(sbi, nids[i]); if (IS_ERR(npage[i])) { err = PTR_ERR(npage[i]); f2fs_put_page(npage[0], 0); goto release_out; } } if (i < level) { parent = npage[i]; nids[i + 1] = get_nid(parent, offset[i], false); } } dn->nid = nids[level]; dn->ofs_in_node = offset[level]; dn->node_page = npage[level]; dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); return 0; release_pages: f2fs_put_page(parent, 1); if (i > 1) f2fs_put_page(npage[0], 0); release_out: dn->inode_page = NULL; dn->node_page = NULL; if (err == -ENOENT) { dn->cur_level = i; dn->max_level = level; dn->ofs_in_node = offset[level]; } return err; } static void truncate_node(struct dnode_of_data *dn) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct node_info ni; get_node_info(sbi, dn->nid, &ni); if (dn->inode->i_blocks == 0) { f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR); goto invalidate; } f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); /* Deallocate node address */ invalidate_blocks(sbi, ni.blk_addr); dec_valid_node_count(sbi, dn->inode); set_node_addr(sbi, &ni, NULL_ADDR, false); if (dn->nid == dn->inode->i_ino) { remove_orphan_inode(sbi, dn->nid); dec_valid_inode_count(sbi); f2fs_inode_synced(dn->inode); } invalidate: clear_node_page_dirty(dn->node_page); set_sbi_flag(sbi, SBI_IS_DIRTY); f2fs_put_page(dn->node_page, 1); invalidate_mapping_pages(NODE_MAPPING(sbi), dn->node_page->index, dn->node_page->index); dn->node_page = NULL; trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); } static int truncate_dnode(struct dnode_of_data *dn) { struct page *page; if (dn->nid == 0) return 1; /* get direct node */ page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) return 1; else if (IS_ERR(page)) return PTR_ERR(page); /* Make dnode_of_data for parameter */ dn->node_page = page; dn->ofs_in_node = 0; truncate_data_blocks(dn); truncate_node(dn); return 1; } static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, int ofs, int depth) { struct dnode_of_data rdn = *dn; struct page *page; struct f2fs_node *rn; nid_t child_nid; unsigned int child_nofs; int freed = 0; int i, ret; if (dn->nid == 0) return NIDS_PER_BLOCK + 1; trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); page = get_node_page(F2FS_I_SB(dn->inode), dn->nid); if (IS_ERR(page)) { trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); return PTR_ERR(page); } ra_node_pages(page, ofs, NIDS_PER_BLOCK); rn = F2FS_NODE(page); if (depth < 3) { for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { child_nid = le32_to_cpu(rn->in.nid[i]); if (child_nid == 0) continue; rdn.nid = child_nid; ret = truncate_dnode(&rdn); if (ret < 0) goto out_err; if (set_nid(page, i, 0, false)) dn->node_changed = true; } } else { child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; for (i = ofs; i < NIDS_PER_BLOCK; i++) { child_nid = le32_to_cpu(rn->in.nid[i]); if (child_nid == 0) { child_nofs += NIDS_PER_BLOCK + 1; continue; } rdn.nid = child_nid; ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); if (ret == (NIDS_PER_BLOCK + 1)) { if (set_nid(page, i, 0, false)) dn->node_changed = true; child_nofs += ret; } else if (ret < 0 && ret != -ENOENT) { goto out_err; } } freed = child_nofs; } if (!ofs) { /* remove current indirect node */ dn->node_page = page; truncate_node(dn); freed++; } else { f2fs_put_page(page, 1); } trace_f2fs_truncate_nodes_exit(dn->inode, freed); return freed; out_err: f2fs_put_page(page, 1); trace_f2fs_truncate_nodes_exit(dn->inode, ret); return ret; } static int truncate_partial_nodes(struct dnode_of_data *dn, struct f2fs_inode *ri, int *offset, int depth) { struct page *pages[2]; nid_t nid[3]; nid_t child_nid; int err = 0; int i; int idx = depth - 2; nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); if (!nid[0]) return 0; /* get indirect nodes in the path */ for (i = 0; i < idx + 1; i++) { /* reference count'll be increased */ pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]); if (IS_ERR(pages[i])) { err = PTR_ERR(pages[i]); idx = i - 1; goto fail; } nid[i + 1] = get_nid(pages[i], offset[i + 1], false); } ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); /* free direct nodes linked to a partial indirect node */ for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { child_nid = get_nid(pages[idx], i, false); if (!child_nid) continue; dn->nid = child_nid; err = truncate_dnode(dn); if (err < 0) goto fail; if (set_nid(pages[idx], i, 0, false)) dn->node_changed = true; } if (offset[idx + 1] == 0) { dn->node_page = pages[idx]; dn->nid = nid[idx]; truncate_node(dn); } else { f2fs_put_page(pages[idx], 1); } offset[idx]++; offset[idx + 1] = 0; idx--; fail: for (i = idx; i >= 0; i--) f2fs_put_page(pages[i], 1); trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); return err; } /* * All the block addresses of data and nodes should be nullified. */ int truncate_inode_blocks(struct inode *inode, pgoff_t from) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err = 0, cont = 1; int level, offset[4], noffset[4]; unsigned int nofs = 0; struct f2fs_inode *ri; struct dnode_of_data dn; struct page *page; trace_f2fs_truncate_inode_blocks_enter(inode, from); level = get_node_path(inode, from, offset, noffset); page = get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) { trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); return PTR_ERR(page); } set_new_dnode(&dn, inode, page, NULL, 0); unlock_page(page); ri = F2FS_INODE(page); switch (level) { case 0: case 1: nofs = noffset[1]; break; case 2: nofs = noffset[1]; if (!offset[level - 1]) goto skip_partial; err = truncate_partial_nodes(&dn, ri, offset, level); if (err < 0 && err != -ENOENT) goto fail; nofs += 1 + NIDS_PER_BLOCK; break; case 3: nofs = 5 + 2 * NIDS_PER_BLOCK; if (!offset[level - 1]) goto skip_partial; err = truncate_partial_nodes(&dn, ri, offset, level); if (err < 0 && err != -ENOENT) goto fail; break; default: BUG(); } skip_partial: while (cont) { dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); switch (offset[0]) { case NODE_DIR1_BLOCK: case NODE_DIR2_BLOCK: err = truncate_dnode(&dn); break; case NODE_IND1_BLOCK: case NODE_IND2_BLOCK: err = truncate_nodes(&dn, nofs, offset[1], 2); break; case NODE_DIND_BLOCK: err = truncate_nodes(&dn, nofs, offset[1], 3); cont = 0; break; default: BUG(); } if (err < 0 && err != -ENOENT) goto fail; if (offset[1] == 0 && ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { lock_page(page); BUG_ON(page->mapping != NODE_MAPPING(sbi)); f2fs_wait_on_page_writeback(page, NODE, true); ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; set_page_dirty(page); unlock_page(page); } offset[1] = 0; offset[0]++; nofs += err; } fail: f2fs_put_page(page, 0); trace_f2fs_truncate_inode_blocks_exit(inode, err); return err > 0 ? 0 : err; } int truncate_xattr_node(struct inode *inode, struct page *page) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t nid = F2FS_I(inode)->i_xattr_nid; struct dnode_of_data dn; struct page *npage; if (!nid) return 0; npage = get_node_page(sbi, nid); if (IS_ERR(npage)) return PTR_ERR(npage); f2fs_i_xnid_write(inode, 0); set_new_dnode(&dn, inode, page, npage, nid); if (page) dn.inode_page_locked = true; truncate_node(&dn); return 0; } /* * Caller should grab and release a rwsem by calling f2fs_lock_op() and * f2fs_unlock_op(). */ int remove_inode_page(struct inode *inode) { struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; err = truncate_xattr_node(inode, dn.inode_page); if (err) { f2fs_put_dnode(&dn); return err; } /* remove potential inline_data blocks */ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) truncate_data_blocks_range(&dn, 1); /* 0 is possible, after f2fs_new_inode() has failed */ f2fs_bug_on(F2FS_I_SB(inode), inode->i_blocks != 0 && inode->i_blocks != 1); /* will put inode & node pages */ truncate_node(&dn); return 0; } struct page *new_inode_page(struct inode *inode) { struct dnode_of_data dn; /* allocate inode page for new inode */ set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); /* caller should f2fs_put_page(page, 1); */ return new_node_page(&dn, 0, NULL); } struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct node_info new_ni; struct page *page; int err; if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) return ERR_PTR(-EPERM); page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); if (!page) return ERR_PTR(-ENOMEM); if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { err = -ENOSPC; goto fail; } #ifdef CONFIG_F2FS_CHECK_FS get_node_info(sbi, dn->nid, &new_ni); f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR); #endif new_ni.nid = dn->nid; new_ni.ino = dn->inode->i_ino; new_ni.blk_addr = NULL_ADDR; new_ni.flag = 0; new_ni.version = 0; set_node_addr(sbi, &new_ni, NEW_ADDR, false); f2fs_wait_on_page_writeback(page, NODE, true); fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); set_cold_node(dn->inode, page); if (!PageUptodate(page)) SetPageUptodate(page); if (set_page_dirty(page)) dn->node_changed = true; if (f2fs_has_xattr_block(ofs)) f2fs_i_xnid_write(dn->inode, dn->nid); if (ofs == 0) inc_valid_inode_count(sbi); return page; fail: clear_node_page_dirty(page); f2fs_put_page(page, 1); return ERR_PTR(err); } /* * Caller should do after getting the following values. * 0: f2fs_put_page(page, 0) * LOCKED_PAGE or error: f2fs_put_page(page, 1) */ static int read_node_page(struct page *page, int op_flags) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, .type = NODE, .op = REQ_OP_READ, .op_flags = op_flags, .page = page, .encrypted_page = NULL, }; if (PageUptodate(page)) return LOCKED_PAGE; get_node_info(sbi, page->index, &ni); if (unlikely(ni.blk_addr == NULL_ADDR)) { ClearPageUptodate(page); return -ENOENT; } fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; return f2fs_submit_page_bio(&fio); } /* * Readahead a node page */ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) { struct page *apage; int err; if (!nid) return; f2fs_bug_on(sbi, check_nid_range(sbi, nid)); rcu_read_lock(); apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid); rcu_read_unlock(); if (apage) return; apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); if (!apage) return; err = read_node_page(apage, REQ_RAHEAD); f2fs_put_page(apage, err ? 1 : 0); } static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, struct page *parent, int start) { struct page *page; int err; if (!nid) return ERR_PTR(-ENOENT); f2fs_bug_on(sbi, check_nid_range(sbi, nid)); repeat: page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); if (!page) return ERR_PTR(-ENOMEM); err = read_node_page(page, 0); if (err < 0) { f2fs_put_page(page, 1); return ERR_PTR(err); } else if (err == LOCKED_PAGE) { goto page_hit; } if (parent) ra_node_pages(parent, start + 1, MAX_RA_NODE); lock_page(page); if (unlikely(page->mapping != NODE_MAPPING(sbi))) { f2fs_put_page(page, 1); goto repeat; } if (unlikely(!PageUptodate(page))) goto out_err; page_hit: if(unlikely(nid != nid_of_node(page))) { f2fs_bug_on(sbi, 1); ClearPageUptodate(page); out_err: f2fs_put_page(page, 1); return ERR_PTR(-EIO); } return page; } struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) { return __get_node_page(sbi, nid, NULL, 0); } struct page *get_node_page_ra(struct page *parent, int start) { struct f2fs_sb_info *sbi = F2FS_P_SB(parent); nid_t nid = get_nid(parent, start, false); return __get_node_page(sbi, nid, parent, start); } static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) { struct inode *inode; struct page *page; int ret; /* should flush inline_data before evict_inode */ inode = ilookup(sbi->sb, ino); if (!inode) return; page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0); if (!page) goto iput_out; if (!PageUptodate(page)) goto page_out; if (!PageDirty(page)) goto page_out; if (!clear_page_dirty_for_io(page)) goto page_out; ret = f2fs_write_inline_data(inode, page); inode_dec_dirty_pages(inode); remove_dirty_inode(inode); if (ret) set_page_dirty(page); page_out: f2fs_put_page(page, 1); iput_out: iput(inode); } void move_node_page(struct page *node_page, int gc_type) { if (gc_type == FG_GC) { struct f2fs_sb_info *sbi = F2FS_P_SB(node_page); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, .for_reclaim = 0, }; set_page_dirty(node_page); f2fs_wait_on_page_writeback(node_page, NODE, true); f2fs_bug_on(sbi, PageWriteback(node_page)); if (!clear_page_dirty_for_io(node_page)) goto out_page; if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc)) unlock_page(node_page); goto release_page; } else { /* set page dirty and write it */ if (!PageWriteback(node_page)) set_page_dirty(node_page); } out_page: unlock_page(node_page); release_page: f2fs_put_page(node_page, 0); } static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) { pgoff_t index, end; struct pagevec pvec; struct page *last_page = NULL; pagevec_init(&pvec, 0); index = 0; end = ULONG_MAX; while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; if (unlikely(f2fs_cp_error(sbi))) { f2fs_put_page(last_page, 0); pagevec_release(&pvec); return ERR_PTR(-EIO); } if (!IS_DNODE(page) || !is_cold_node(page)) continue; if (ino_of_node(page) != ino) continue; lock_page(page); if (unlikely(page->mapping != NODE_MAPPING(sbi))) { continue_unlock: unlock_page(page); continue; } if (ino_of_node(page) != ino) goto continue_unlock; if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (last_page) f2fs_put_page(last_page, 0); get_page(page); last_page = page; unlock_page(page); } pagevec_release(&pvec); cond_resched(); } return last_page; } static int __write_node_page(struct page *page, bool atomic, bool *submitted, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); nid_t nid; struct node_info ni; struct f2fs_io_info fio = { .sbi = sbi, .type = NODE, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), .page = page, .encrypted_page = NULL, .submitted = false, }; trace_f2fs_writepage(page, NODE); if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (unlikely(f2fs_cp_error(sbi))) goto redirty_out; /* get old block addr of this node page */ nid = nid_of_node(page); f2fs_bug_on(sbi, page->index != nid); if (wbc->for_reclaim) { if (!down_read_trylock(&sbi->node_write)) goto redirty_out; } else { down_read(&sbi->node_write); } get_node_info(sbi, nid, &ni); /* This page is already truncated */ if (unlikely(ni.blk_addr == NULL_ADDR)) { ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_NODES); up_read(&sbi->node_write); unlock_page(page); return 0; } if (atomic && !test_opt(sbi, NOBARRIER)) fio.op_flags |= REQ_PREFLUSH | REQ_FUA; set_page_writeback(page); fio.old_blkaddr = ni.blk_addr; write_node_page(nid, &fio); set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); dec_page_count(sbi, F2FS_DIRTY_NODES); up_read(&sbi->node_write); if (wbc->for_reclaim) { f2fs_submit_merged_bio_cond(sbi, page->mapping->host, 0, page->index, NODE, WRITE); submitted = NULL; } unlock_page(page); if (unlikely(f2fs_cp_error(sbi))) { f2fs_submit_merged_bio(sbi, NODE, WRITE); submitted = NULL; } if (submitted) *submitted = fio.submitted; return 0; redirty_out: redirty_page_for_writepage(wbc, page); return AOP_WRITEPAGE_ACTIVATE; } static int f2fs_write_node_page(struct page *page, struct writeback_control *wbc) { return __write_node_page(page, false, NULL, wbc); } int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic) { pgoff_t index, end; pgoff_t last_idx = ULONG_MAX; struct pagevec pvec; int ret = 0; struct page *last_page = NULL; bool marked = false; nid_t ino = inode->i_ino; if (atomic) { last_page = last_fsync_dnode(sbi, ino); if (IS_ERR_OR_NULL(last_page)) return PTR_ERR_OR_ZERO(last_page); } retry: pagevec_init(&pvec, 0); index = 0; end = ULONG_MAX; while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; bool submitted = false; if (unlikely(f2fs_cp_error(sbi))) { f2fs_put_page(last_page, 0); pagevec_release(&pvec); ret = -EIO; goto out; } if (!IS_DNODE(page) || !is_cold_node(page)) continue; if (ino_of_node(page) != ino) continue; lock_page(page); if (unlikely(page->mapping != NODE_MAPPING(sbi))) { continue_unlock: unlock_page(page); continue; } if (ino_of_node(page) != ino) goto continue_unlock; if (!PageDirty(page) && page != last_page) { /* someone wrote it for us */ goto continue_unlock; } f2fs_wait_on_page_writeback(page, NODE, true); BUG_ON(PageWriteback(page)); if (!atomic || page == last_page) { set_fsync_mark(page, 1); if (IS_INODE(page)) { if (is_inode_flag_set(inode, FI_DIRTY_INODE)) update_inode(inode, page); set_dentry_mark(page, need_dentry_mark(sbi, ino)); } /* may be written by other thread */ if (!PageDirty(page)) set_page_dirty(page); } if (!clear_page_dirty_for_io(page)) goto continue_unlock; ret = __write_node_page(page, atomic && page == last_page, &submitted, wbc); if (ret) { unlock_page(page); f2fs_put_page(last_page, 0); break; } else if (submitted) { last_idx = page->index; } if (page == last_page) { f2fs_put_page(page, 0); marked = true; break; } } pagevec_release(&pvec); cond_resched(); if (ret || marked) break; } if (!ret && atomic && !marked) { f2fs_msg(sbi->sb, KERN_DEBUG, "Retry to write fsync mark: ino=%u, idx=%lx", ino, last_page->index); lock_page(last_page); f2fs_wait_on_page_writeback(last_page, NODE, true); set_page_dirty(last_page); unlock_page(last_page); goto retry; } out: if (last_idx != ULONG_MAX) f2fs_submit_merged_bio_cond(sbi, NULL, ino, last_idx, NODE, WRITE); return ret ? -EIO: 0; } int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) { pgoff_t index, end; struct pagevec pvec; int step = 0; int nwritten = 0; int ret = 0; pagevec_init(&pvec, 0); next_step: index = 0; end = ULONG_MAX; while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; bool submitted = false; if (unlikely(f2fs_cp_error(sbi))) { pagevec_release(&pvec); ret = -EIO; goto out; } /* * flushing sequence with step: * 0. indirect nodes * 1. dentry dnodes * 2. file dnodes */ if (step == 0 && IS_DNODE(page)) continue; if (step == 1 && (!IS_DNODE(page) || is_cold_node(page))) continue; if (step == 2 && (!IS_DNODE(page) || !is_cold_node(page))) continue; lock_node: if (!trylock_page(page)) continue; if (unlikely(page->mapping != NODE_MAPPING(sbi))) { continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } /* flush inline_data */ if (is_inline_node(page)) { clear_inline_node(page); unlock_page(page); flush_inline_data(sbi, ino_of_node(page)); goto lock_node; } f2fs_wait_on_page_writeback(page, NODE, true); BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; set_fsync_mark(page, 0); set_dentry_mark(page, 0); ret = __write_node_page(page, false, &submitted, wbc); if (ret) unlock_page(page); else if (submitted) nwritten++; if (--wbc->nr_to_write == 0) break; } pagevec_release(&pvec); cond_resched(); if (wbc->nr_to_write == 0) { step = 2; break; } } if (step < 2) { step++; goto next_step; } out: if (nwritten) f2fs_submit_merged_bio(sbi, NODE, WRITE); return ret; } int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) { pgoff_t index = 0, end = ULONG_MAX; struct pagevec pvec; int ret2, ret = 0; pagevec_init(&pvec, 0); while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, PAGECACHE_TAG_WRITEBACK, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* until radix tree lookup accepts end_index */ if (unlikely(page->index > end)) continue; if (ino && ino_of_node(page) == ino) { f2fs_wait_on_page_writeback(page, NODE, true); if (TestClearPageError(page)) ret = -EIO; } } pagevec_release(&pvec); cond_resched(); } ret2 = filemap_check_errors(NODE_MAPPING(sbi)); if (!ret) ret = ret2; return ret; } static int f2fs_write_node_pages(struct address_space *mapping, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); struct blk_plug plug; long diff; /* balancing f2fs's metadata in background */ f2fs_balance_fs_bg(sbi); /* collect a number of dirty node pages and write together */ if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE)) goto skip_write; trace_f2fs_writepages(mapping->host, wbc, NODE); diff = nr_pages_to_write(sbi, NODE, wbc); wbc->sync_mode = WB_SYNC_NONE; blk_start_plug(&plug); sync_node_pages(sbi, wbc); blk_finish_plug(&plug); wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); return 0; skip_write: wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); trace_f2fs_writepages(mapping->host, wbc, NODE); return 0; } static int f2fs_set_node_page_dirty(struct page *page) { trace_f2fs_set_page_dirty(page, NODE); if (!PageUptodate(page)) SetPageUptodate(page); if (!PageDirty(page)) { f2fs_set_page_dirty_nobuffers(page); inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); SetPagePrivate(page); f2fs_trace_pid(page); return 1; } return 0; } /* * Structure of the f2fs node operations */ const struct address_space_operations f2fs_node_aops = { .writepage = f2fs_write_node_page, .writepages = f2fs_write_node_pages, .set_page_dirty = f2fs_set_node_page_dirty, .invalidatepage = f2fs_invalidate_page, .releasepage = f2fs_release_page, #ifdef CONFIG_MIGRATION .migratepage = f2fs_migrate_page, #endif }; static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, nid_t n) { return radix_tree_lookup(&nm_i->free_nid_root, n); } static int __insert_nid_to_list(struct f2fs_sb_info *sbi, struct free_nid *i, enum nid_list list, bool new) { struct f2fs_nm_info *nm_i = NM_I(sbi); if (new) { int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); if (err) return err; } f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : i->state != NID_ALLOC); nm_i->nid_cnt[list]++; list_add_tail(&i->list, &nm_i->nid_list[list]); return 0; } static void __remove_nid_from_list(struct f2fs_sb_info *sbi, struct free_nid *i, enum nid_list list, bool reuse) { struct f2fs_nm_info *nm_i = NM_I(sbi); f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW : i->state != NID_ALLOC); nm_i->nid_cnt[list]--; list_del(&i->list); if (!reuse) radix_tree_delete(&nm_i->free_nid_root, i->nid); } /* return if the nid is recognized as free */ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; struct nat_entry *ne; int err; /* 0 nid should not be used */ if (unlikely(nid == 0)) return false; if (build) { /* do not add allocated nids */ ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || nat_get_blkaddr(ne) != NULL_ADDR)) return false; } i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); i->nid = nid; i->state = NID_NEW; if (radix_tree_preload(GFP_NOFS)) { kmem_cache_free(free_nid_slab, i); return true; } spin_lock(&nm_i->nid_list_lock); err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true); spin_unlock(&nm_i->nid_list_lock); radix_tree_preload_end(); if (err) { kmem_cache_free(free_nid_slab, i); return true; } return true; } static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; bool need_free = false; spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); if (i && i->state == NID_NEW) { __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); need_free = true; } spin_unlock(&nm_i->nid_list_lock); if (need_free) kmem_cache_free(free_nid_slab, i); } static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set, bool build) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); unsigned int nid_ofs = nid - START_NID(nid); if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) return; if (set) __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); else __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); if (set) nm_i->free_nid_count[nat_ofs]++; else if (!build) nm_i->free_nid_count[nat_ofs]--; } static void scan_nat_page(struct f2fs_sb_info *sbi, struct page *nat_page, nid_t start_nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nat_block *nat_blk = page_address(nat_page); block_t blk_addr; unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); int i; if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) return; __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); i = start_nid % NAT_ENTRY_PER_BLOCK; for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { bool freed = false; if (unlikely(start_nid >= nm_i->max_nid)) break; blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); f2fs_bug_on(sbi, blk_addr == NEW_ADDR); if (blk_addr == NULL_ADDR) freed = add_free_nid(sbi, start_nid, true); spin_lock(&NM_I(sbi)->nid_list_lock); update_free_nid_bitmap(sbi, start_nid, freed, true); spin_unlock(&NM_I(sbi)->nid_list_lock); } } static void scan_free_nid_bits(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; unsigned int i, idx; down_read(&nm_i->nat_tree_lock); for (i = 0; i < nm_i->nat_blocks; i++) { if (!test_bit_le(i, nm_i->nat_block_bitmap)) continue; if (!nm_i->free_nid_count[i]) continue; for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { nid_t nid; if (!test_bit_le(idx, nm_i->free_nid_bitmap[i])) continue; nid = i * NAT_ENTRY_PER_BLOCK + idx; add_free_nid(sbi, nid, true); if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) goto out; } } out: down_read(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { block_t addr; nid_t nid; addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); nid = le32_to_cpu(nid_in_journal(journal, i)); if (addr == NULL_ADDR) add_free_nid(sbi, nid, true); else remove_free_nid(sbi, nid); } up_read(&curseg->journal_rwsem); up_read(&nm_i->nat_tree_lock); } static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; int i = 0; nid_t nid = nm_i->next_scan_nid; /* Enough entries */ if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK) return; if (!sync && !available_free_memory(sbi, FREE_NIDS)) return; if (!mount) { /* try to find free nids in free_nid_bitmap */ scan_free_nid_bits(sbi); if (nm_i->nid_cnt[FREE_NID_LIST]) return; } /* readahead nat pages to be scanned */ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT, true); down_read(&nm_i->nat_tree_lock); while (1) { struct page *page = get_current_nat_page(sbi, nid); scan_nat_page(sbi, page, nid); f2fs_put_page(page, 1); nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); if (unlikely(nid >= nm_i->max_nid)) nid = 0; if (++i >= FREE_NID_PAGES) break; } /* go to the next free nat pages to find free nids abundantly */ nm_i->next_scan_nid = nid; /* find free nids from current sum_pages */ down_read(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { block_t addr; addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); nid = le32_to_cpu(nid_in_journal(journal, i)); if (addr == NULL_ADDR) add_free_nid(sbi, nid, true); else remove_free_nid(sbi, nid); } up_read(&curseg->journal_rwsem); up_read(&nm_i->nat_tree_lock); ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), nm_i->ra_nid_pages, META_NAT, false); } void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) { mutex_lock(&NM_I(sbi)->build_lock); __build_free_nids(sbi, sync, mount); mutex_unlock(&NM_I(sbi)->build_lock); } /* * If this function returns success, caller can obtain a new nid * from second parameter of this function. * The returned nid could be used ino as well as nid when inode is created. */ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i = NULL; retry: #ifdef CONFIG_F2FS_FAULT_INJECTION if (time_to_inject(sbi, FAULT_ALLOC_NID)) { f2fs_show_injection_info(FAULT_ALLOC_NID); return false; } #endif spin_lock(&nm_i->nid_list_lock); if (unlikely(nm_i->available_nids == 0)) { spin_unlock(&nm_i->nid_list_lock); return false; } /* We should not use stale free nids created by build_free_nids */ if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) { f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST])); i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST], struct free_nid, list); *nid = i->nid; __remove_nid_from_list(sbi, i, FREE_NID_LIST, true); i->state = NID_ALLOC; __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); nm_i->available_nids--; update_free_nid_bitmap(sbi, *nid, false, false); spin_unlock(&nm_i->nid_list_lock); return true; } spin_unlock(&nm_i->nid_list_lock); /* Let's scan nat pages and its caches to get free nids */ build_free_nids(sbi, true, false); goto retry; } /* * alloc_nid() should be called prior to this function. */ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); f2fs_bug_on(sbi, !i); __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); } /* * alloc_nid() should be called prior to this function. */ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; bool need_free = false; if (!nid) return; spin_lock(&nm_i->nid_list_lock); i = __lookup_free_nid_list(nm_i, nid); f2fs_bug_on(sbi, !i); if (!available_free_memory(sbi, FREE_NIDS)) { __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false); need_free = true; } else { __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true); i->state = NID_NEW; __insert_nid_to_list(sbi, i, FREE_NID_LIST, false); } nm_i->available_nids++; update_free_nid_bitmap(sbi, nid, true, false); spin_unlock(&nm_i->nid_list_lock); if (need_free) kmem_cache_free(free_nid_slab, i); } int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i, *next; int nr = nr_shrink; if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) return 0; if (!mutex_trylock(&nm_i->build_lock)) return 0; spin_lock(&nm_i->nid_list_lock); list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST], list) { if (nr_shrink <= 0 || nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS) break; __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); kmem_cache_free(free_nid_slab, i); nr_shrink--; } spin_unlock(&nm_i->nid_list_lock); mutex_unlock(&nm_i->build_lock); return nr - nr_shrink; } void recover_inline_xattr(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; size_t inline_size; struct page *ipage; struct f2fs_inode *ri; ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); ri = F2FS_INODE(page); if (!(ri->i_inline & F2FS_INLINE_XATTR)) { clear_inode_flag(inode, FI_INLINE_XATTR); goto update_inode; } dst_addr = inline_xattr_addr(ipage); src_addr = inline_xattr_addr(page); inline_size = inline_xattr_size(inode); f2fs_wait_on_page_writeback(ipage, NODE, true); memcpy(dst_addr, src_addr, inline_size); update_inode: update_inode(inode, ipage); f2fs_put_page(ipage, 1); } int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; nid_t new_xnid = nid_of_node(page); struct node_info ni; struct page *xpage; if (!prev_xnid) goto recover_xnid; /* 1: invalidate the previous xattr nid */ get_node_info(sbi, prev_xnid, &ni); f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR); invalidate_blocks(sbi, ni.blk_addr); dec_valid_node_count(sbi, inode); set_node_addr(sbi, &ni, NULL_ADDR, false); recover_xnid: /* 2: update xattr nid in inode */ remove_free_nid(sbi, new_xnid); f2fs_i_xnid_write(inode, new_xnid); if (unlikely(!inc_valid_node_count(sbi, inode))) f2fs_bug_on(sbi, 1); update_inode_page(inode); /* 3: update and set xattr node page dirty */ xpage = grab_cache_page(NODE_MAPPING(sbi), new_xnid); if (!xpage) return -ENOMEM; memcpy(F2FS_NODE(xpage), F2FS_NODE(page), PAGE_SIZE); get_node_info(sbi, new_xnid, &ni); ni.ino = inode->i_ino; set_node_addr(sbi, &ni, NEW_ADDR, false); set_page_dirty(xpage); f2fs_put_page(xpage, 1); return 0; } int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) { struct f2fs_inode *src, *dst; nid_t ino = ino_of_node(page); struct node_info old_ni, new_ni; struct page *ipage; get_node_info(sbi, ino, &old_ni); if (unlikely(old_ni.blk_addr != NULL_ADDR)) return -EINVAL; retry: ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); if (!ipage) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } /* Should not use this inode from free nid list */ remove_free_nid(sbi, ino); if (!PageUptodate(ipage)) SetPageUptodate(ipage); fill_node_footer(ipage, ino, ino, 0, true); src = F2FS_INODE(page); dst = F2FS_INODE(ipage); memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src); dst->i_size = 0; dst->i_blocks = cpu_to_le64(1); dst->i_links = cpu_to_le32(1); dst->i_xattr_nid = 0; dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; new_ni = old_ni; new_ni.ino = ino; if (unlikely(!inc_valid_node_count(sbi, NULL))) WARN_ON(1); set_node_addr(sbi, &new_ni, NEW_ADDR, false); inc_valid_inode_count(sbi); set_page_dirty(ipage); f2fs_put_page(ipage, 1); return 0; } int restore_node_summary(struct f2fs_sb_info *sbi, unsigned int segno, struct f2fs_summary_block *sum) { struct f2fs_node *rn; struct f2fs_summary *sum_entry; block_t addr; int i, idx, last_offset, nrpages; /* scan the node segment */ last_offset = sbi->blocks_per_seg; addr = START_BLOCK(sbi, segno); sum_entry = &sum->entries[0]; for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { nrpages = min(last_offset - i, BIO_MAX_PAGES); /* readahead node pages */ ra_meta_pages(sbi, addr, nrpages, META_POR, true); for (idx = addr; idx < addr + nrpages; idx++) { struct page *page = get_tmp_page(sbi, idx); rn = F2FS_NODE(page); sum_entry->nid = rn->footer.nid; sum_entry->version = 0; sum_entry->ofs_in_node = 0; sum_entry++; f2fs_put_page(page, 1); } invalidate_mapping_pages(META_MAPPING(sbi), addr, addr + nrpages); } return 0; } static void remove_nats_in_journal(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; int i; down_write(&curseg->journal_rwsem); for (i = 0; i < nats_in_cursum(journal); i++) { struct nat_entry *ne; struct f2fs_nat_entry raw_ne; nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); raw_ne = nat_in_journal(journal, i); ne = __lookup_nat_cache(nm_i, nid); if (!ne) { ne = grab_nat_entry(nm_i, nid, true); node_info_from_raw_nat(&ne->ni, &raw_ne); } /* * if a free nat in journal has not been used after last * checkpoint, we should remove it from available nids, * since later we will add it again. */ if (!get_nat_flag(ne, IS_DIRTY) && le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { spin_lock(&nm_i->nid_list_lock); nm_i->available_nids--; spin_unlock(&nm_i->nid_list_lock); } __set_nat_cache_dirty(nm_i, ne); } update_nats_in_cursum(journal, -i); up_write(&curseg->journal_rwsem); } static void __adjust_nat_entry_set(struct nat_entry_set *nes, struct list_head *head, int max) { struct nat_entry_set *cur; if (nes->entry_cnt >= max) goto add_out; list_for_each_entry(cur, head, set_list) { if (cur->entry_cnt >= nes->entry_cnt) { list_add(&nes->set_list, cur->set_list.prev); return; } } add_out: list_add_tail(&nes->set_list, head); } static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, struct page *page) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; struct f2fs_nat_block *nat_blk = page_address(page); int valid = 0; int i; if (!enabled_nat_bits(sbi, NULL)) return; for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { if (start_nid == 0 && i == 0) valid++; if (nat_blk->entries[i].block_addr) valid++; } if (valid == 0) { __set_bit_le(nat_index, nm_i->empty_nat_bits); __clear_bit_le(nat_index, nm_i->full_nat_bits); return; } __clear_bit_le(nat_index, nm_i->empty_nat_bits); if (valid == NAT_ENTRY_PER_BLOCK) __set_bit_le(nat_index, nm_i->full_nat_bits); else __clear_bit_le(nat_index, nm_i->full_nat_bits); } static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, struct nat_entry_set *set, struct cp_control *cpc) { struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; bool to_journal = true; struct f2fs_nat_block *nat_blk; struct nat_entry *ne, *cur; struct page *page = NULL; /* * there are two steps to flush nat entries: * #1, flush nat entries to journal in current hot data summary block. * #2, flush nat entries to nat page. */ if (enabled_nat_bits(sbi, cpc) || !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) to_journal = false; if (to_journal) { down_write(&curseg->journal_rwsem); } else { page = get_next_nat_page(sbi, start_nid); nat_blk = page_address(page); f2fs_bug_on(sbi, !nat_blk); } /* flush dirty nats in nat entry set */ list_for_each_entry_safe(ne, cur, &set->entry_list, list) { struct f2fs_nat_entry *raw_ne; nid_t nid = nat_get_nid(ne); int offset; if (nat_get_blkaddr(ne) == NEW_ADDR) continue; if (to_journal) { offset = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 1); f2fs_bug_on(sbi, offset < 0); raw_ne = &nat_in_journal(journal, offset); nid_in_journal(journal, offset) = cpu_to_le32(nid); } else { raw_ne = &nat_blk->entries[nid - start_nid]; } raw_nat_from_node_info(raw_ne, &ne->ni); nat_reset_flag(ne); __clear_nat_cache_dirty(NM_I(sbi), set, ne); if (nat_get_blkaddr(ne) == NULL_ADDR) { add_free_nid(sbi, nid, false); spin_lock(&NM_I(sbi)->nid_list_lock); NM_I(sbi)->available_nids++; update_free_nid_bitmap(sbi, nid, true, false); spin_unlock(&NM_I(sbi)->nid_list_lock); } else { spin_lock(&NM_I(sbi)->nid_list_lock); update_free_nid_bitmap(sbi, nid, false, false); spin_unlock(&NM_I(sbi)->nid_list_lock); } } if (to_journal) { up_write(&curseg->journal_rwsem); } else { __update_nat_bits(sbi, start_nid, page); f2fs_put_page(page, 1); } f2fs_bug_on(sbi, set->entry_cnt); radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); kmem_cache_free(nat_entry_set_slab, set); } /* * This function is called during the checkpointing process. */ void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_journal *journal = curseg->journal; struct nat_entry_set *setvec[SETVEC_SIZE]; struct nat_entry_set *set, *tmp; unsigned int found; nid_t set_idx = 0; LIST_HEAD(sets); if (!nm_i->dirty_nat_cnt) return; down_write(&nm_i->nat_tree_lock); /* * if there are no enough space in journal to store dirty nat * entries, remove all entries from journal and merge them * into nat entry set. */ if (enabled_nat_bits(sbi, cpc) || !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL)) remove_nats_in_journal(sbi); while ((found = __gang_lookup_nat_set(nm_i, set_idx, SETVEC_SIZE, setvec))) { unsigned idx; set_idx = setvec[found - 1]->set + 1; for (idx = 0; idx < found; idx++) __adjust_nat_entry_set(setvec[idx], &sets, MAX_NAT_JENTRIES(journal)); } /* flush dirty nats in nat entry set */ list_for_each_entry_safe(set, tmp, &sets, set_list) __flush_nat_entry_set(sbi, set, cpc); up_write(&nm_i->nat_tree_lock); f2fs_bug_on(sbi, nm_i->dirty_nat_cnt); } static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; unsigned int i; __u64 cp_ver = cur_cp_version(ckpt); block_t nat_bits_addr; if (!enabled_nat_bits(sbi, NULL)) return 0; nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 + F2FS_BLKSIZE - 1); nm_i->nat_bits = kzalloc(nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); if (!nm_i->nat_bits) return -ENOMEM; nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg - nm_i->nat_bits_blocks; for (i = 0; i < nm_i->nat_bits_blocks; i++) { struct page *page = get_meta_page(sbi, nat_bits_addr++); memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), page_address(page), F2FS_BLKSIZE); f2fs_put_page(page, 1); } cp_ver |= (cur_cp_crc(ckpt) << 32); if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { disable_nat_bits(sbi, true); return 0; } nm_i->full_nat_bits = nm_i->nat_bits + 8; nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint"); return 0; } inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned int i = 0; nid_t nid, last_nid; if (!enabled_nat_bits(sbi, NULL)) return; for (i = 0; i < nm_i->nat_blocks; i++) { i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); if (i >= nm_i->nat_blocks) break; __set_bit_le(i, nm_i->nat_block_bitmap); nid = i * NAT_ENTRY_PER_BLOCK; last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; spin_lock(&NM_I(sbi)->nid_list_lock); for (; nid < last_nid; nid++) update_free_nid_bitmap(sbi, nid, true, true); spin_unlock(&NM_I(sbi)->nid_list_lock); } for (i = 0; i < nm_i->nat_blocks; i++) { i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); if (i >= nm_i->nat_blocks) break; __set_bit_le(i, nm_i->nat_block_bitmap); } } static int init_node_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned char *version_bitmap; unsigned int nat_segs; int err; nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); /* segment_count_nat includes pair segment so divide to 2. */ nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; /* not used nids: 0, node, meta, (and root counted as valid node) */ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - F2FS_RESERVED_NODE_NUM; nm_i->nid_cnt[FREE_NID_LIST] = 0; nm_i->nid_cnt[ALLOC_NID_LIST] = 0; nm_i->nat_cnt = 0; nm_i->ram_thresh = DEF_RAM_THRESHOLD; nm_i->ra_nid_pages = DEF_RA_NID_PAGES; nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]); INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]); INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); INIT_LIST_HEAD(&nm_i->nat_entries); mutex_init(&nm_i->build_lock); spin_lock_init(&nm_i->nid_list_lock); init_rwsem(&nm_i->nat_tree_lock); nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); if (!version_bitmap) return -EFAULT; nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, GFP_KERNEL); if (!nm_i->nat_bitmap) return -ENOMEM; err = __get_nat_bitmaps(sbi); if (err) return err; #ifdef CONFIG_F2FS_CHECK_FS nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, GFP_KERNEL); if (!nm_i->nat_bitmap_mir) return -ENOMEM; #endif return 0; } static int init_free_nid_cache(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks * NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8, GFP_KERNEL); if (!nm_i->nat_block_bitmap) return -ENOMEM; nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * sizeof(unsigned short), GFP_KERNEL); if (!nm_i->free_nid_count) return -ENOMEM; return 0; } int build_node_manager(struct f2fs_sb_info *sbi) { int err; sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); if (!sbi->nm_info) return -ENOMEM; err = init_node_manager(sbi); if (err) return err; err = init_free_nid_cache(sbi); if (err) return err; /* load free nid status from nat_bits table */ load_free_nid_bitmap(sbi); build_free_nids(sbi, true, true); return 0; } void destroy_node_manager(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i, *next_i; struct nat_entry *natvec[NATVEC_SIZE]; struct nat_entry_set *setvec[SETVEC_SIZE]; nid_t nid = 0; unsigned int found; if (!nm_i) return; /* destroy free nid list */ spin_lock(&nm_i->nid_list_lock); list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST], list) { __remove_nid_from_list(sbi, i, FREE_NID_LIST, false); spin_unlock(&nm_i->nid_list_lock); kmem_cache_free(free_nid_slab, i); spin_lock(&nm_i->nid_list_lock); } f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]); f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]); f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST])); spin_unlock(&nm_i->nid_list_lock); /* destroy nat cache */ down_write(&nm_i->nat_tree_lock); while ((found = __gang_lookup_nat_cache(nm_i, nid, NATVEC_SIZE, natvec))) { unsigned idx; nid = nat_get_nid(natvec[found - 1]) + 1; for (idx = 0; idx < found; idx++) __del_from_nat_cache(nm_i, natvec[idx]); } f2fs_bug_on(sbi, nm_i->nat_cnt); /* destroy nat set cache */ nid = 0; while ((found = __gang_lookup_nat_set(nm_i, nid, SETVEC_SIZE, setvec))) { unsigned idx; nid = setvec[found - 1]->set + 1; for (idx = 0; idx < found; idx++) { /* entry_cnt is not zero, when cp_error was occurred */ f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); kmem_cache_free(nat_entry_set_slab, setvec[idx]); } } up_write(&nm_i->nat_tree_lock); kvfree(nm_i->nat_block_bitmap); kvfree(nm_i->free_nid_bitmap); kvfree(nm_i->free_nid_count); kfree(nm_i->nat_bitmap); kfree(nm_i->nat_bits); #ifdef CONFIG_F2FS_CHECK_FS kfree(nm_i->nat_bitmap_mir); #endif sbi->nm_info = NULL; kfree(nm_i); } int __init create_node_manager_caches(void) { nat_entry_slab = f2fs_kmem_cache_create("nat_entry", sizeof(struct nat_entry)); if (!nat_entry_slab) goto fail; free_nid_slab = f2fs_kmem_cache_create("free_nid", sizeof(struct free_nid)); if (!free_nid_slab) goto destroy_nat_entry; nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", sizeof(struct nat_entry_set)); if (!nat_entry_set_slab) goto destroy_free_nid; return 0; destroy_free_nid: kmem_cache_destroy(free_nid_slab); destroy_nat_entry: kmem_cache_destroy(nat_entry_slab); fail: return -ENOMEM; } void destroy_node_manager_caches(void) { kmem_cache_destroy(nat_entry_set_slab); kmem_cache_destroy(free_nid_slab); kmem_cache_destroy(nat_entry_slab); }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3026_0
crossvul-cpp_data_good_3021_0
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <asm/byteorder.h> #include <linux/swap.h> #include <linux/pipe_fs_i.h> #include <linux/mpage.h> #include <linux/quotaops.h> #include <linux/blkdev.h> #include <linux/uio.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "inode.h" #include "journal.h" #include "suballoc.h" #include "super.h" #include "symlink.h" #include "refcounttree.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" #include "dir.h" #include "namei.h" #include "sysfile.h" static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = -EIO; int status; struct ocfs2_dinode *fe = NULL; struct buffer_head *bh = NULL; struct buffer_head *buffer_cache_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); void *kaddr; trace_ocfs2_symlink_get_block( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); BUG_ON(ocfs2_inode_is_fast_symlink(inode)); if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { mlog(ML_ERROR, "block offset > PATH_MAX: %llu", (unsigned long long)iblock); goto bail; } status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { mlog_errno(status); goto bail; } fe = (struct ocfs2_dinode *) bh->b_data; if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, le32_to_cpu(fe->i_clusters))) { err = -ENOMEM; mlog(ML_ERROR, "block offset is outside the allocated size: " "%llu\n", (unsigned long long)iblock); goto bail; } /* We don't use the page cache to create symlink data, so if * need be, copy it over from the buffer cache. */ if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock; buffer_cache_bh = sb_getblk(osb->sb, blkno); if (!buffer_cache_bh) { err = -ENOMEM; mlog(ML_ERROR, "couldn't getblock for symlink!\n"); goto bail; } /* we haven't locked out transactions, so a commit * could've happened. Since we've got a reference on * the bh, even if it commits while we're doing the * copy, the data is still good. */ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) { kaddr = kmap_atomic(bh_result->b_page); if (!kaddr) { mlog(ML_ERROR, "couldn't kmap!\n"); goto bail; } memcpy(kaddr + (bh_result->b_size * iblock), buffer_cache_bh->b_data, bh_result->b_size); kunmap_atomic(kaddr); set_buffer_uptodate(bh_result); } brelse(buffer_cache_bh); } map_bh(bh_result, inode->i_sb, le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); err = 0; bail: brelse(bh); return err; } static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret = 0; struct ocfs2_inode_info *oi = OCFS2_I(inode); down_read(&oi->ip_alloc_sem); ret = ocfs2_get_block(inode, iblock, bh_result, create); up_read(&oi->ip_alloc_sem); return ret; } int ocfs2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = 0; unsigned int ext_flags; u64 max_blocks = bh_result->b_size >> inode->i_blkbits; u64 p_blkno, count, past_eof; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", inode, inode->i_ino); if (S_ISLNK(inode->i_mode)) { /* this always does I/O for some reason. */ err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); goto bail; } err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, &ext_flags); if (err) { mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " "%llu, NULL)\n", err, inode, (unsigned long long)iblock, (unsigned long long)p_blkno); goto bail; } if (max_blocks < count) count = max_blocks; /* * ocfs2 never allocates in this function - the only time we * need to use BH_New is when we're extending i_size on a file * system which doesn't support holes, in which case BH_New * allows __block_write_begin() to zero. * * If we see this on a sparse file system, then a truncate has * raced us and removed the cluster. In this case, we clear * the buffers dirty and uptodate bits and let the buffer code * ignore it as a hole. */ if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { clear_buffer_dirty(bh_result); clear_buffer_uptodate(bh_result); goto bail; } /* Treat the unwritten extent as a hole for zeroing purposes. */ if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) map_bh(bh_result, inode->i_sb, p_blkno); bh_result->b_size = count << inode->i_blkbits; if (!ocfs2_sparse_alloc(osb)) { if (p_blkno == 0) { err = -EIO; mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n", (unsigned long long)iblock, (unsigned long long)p_blkno, (unsigned long long)OCFS2_I(inode)->ip_blkno); mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); dump_stack(); goto bail; } } past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)past_eof); if (create && (iblock >= past_eof)) set_buffer_new(bh_result); bail: if (err < 0) err = -EIO; return err; } int ocfs2_read_inline_data(struct inode *inode, struct page *page, struct buffer_head *di_bh) { void *kaddr; loff_t size; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); return -EROFS; } size = i_size_read(inode); if (size > PAGE_SIZE || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)size); return -EROFS; } kaddr = kmap_atomic(page); if (size) memcpy(kaddr, di->id2.i_data.id_data, size); /* Clear the remaining part of the page */ memset(kaddr + size, 0, PAGE_SIZE - size); flush_dcache_page(page); kunmap_atomic(kaddr); SetPageUptodate(page); return 0; } static int ocfs2_readpage_inline(struct inode *inode, struct page *page) { int ret; struct buffer_head *di_bh = NULL; BUG_ON(!PageLocked(page)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_inline_data(inode, page, di_bh); out: unlock_page(page); brelse(di_bh); return ret; } static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start = (loff_t)page->index << PAGE_SHIFT; int ret, unlock = 1; trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, (page ? page->index : 0)); ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out; } if (down_read_trylock(&oi->ip_alloc_sem) == 0) { /* * Unlock the page and cycle ip_alloc_sem so that we don't * busyloop waiting for ip_alloc_sem to unlock */ ret = AOP_TRUNCATED_PAGE; unlock_page(page); unlock = 0; down_read(&oi->ip_alloc_sem); up_read(&oi->ip_alloc_sem); goto out_inode_unlock; } /* * i_size might have just been updated as we grabed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_page->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the page they just read isn't needed. * * XXX sys_readahead() seems to get that wrong? */ if (start >= i_size_read(inode)) { zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); ret = 0; goto out_alloc; } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) ret = ocfs2_readpage_inline(inode, page); else ret = block_read_full_page(page, ocfs2_get_block); unlock = 0; out_alloc: up_read(&OCFS2_I(inode)->ip_alloc_sem); out_inode_unlock: ocfs2_inode_unlock(inode, 0); out: if (unlock) unlock_page(page); return ret; } /* * This is used only for read-ahead. Failures or difficult to handle * situations are safe to ignore. * * Right now, we don't bother with BH_Boundary - in-inode extent lists * are quite large (243 extents on 4k blocks), so most inodes don't * grow out to a tree. If need be, detecting boundary extents could * trivially be added in a future version of ocfs2_get_block(). */ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { int ret, err = -EIO; struct inode *inode = mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start; struct page *last; /* * Use the nonblocking flag for the dlm code to avoid page * lock inversion, but don't bother with retrying. */ ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); if (ret) return err; if (down_read_trylock(&oi->ip_alloc_sem) == 0) { ocfs2_inode_unlock(inode, 0); return err; } /* * Don't bother with inline-data. There isn't anything * to read-ahead in that case anyway... */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) goto out_unlock; /* * Check whether a remote node truncated this file - we just * drop out in that case as it's not worth handling here. */ last = list_entry(pages->prev, struct page, lru); start = (loff_t)last->index << PAGE_SHIFT; if (start >= i_size_read(inode)) goto out_unlock; err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); out_unlock: up_read(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); return err; } /* Note: Because we don't support holes, our allocation has * already happened (allocation writes zeros to the file data) * so we don't have to worry about ordered writes in * ocfs2_writepage. * * ->writepage is called during the process of invalidating the page cache * during blocked lock processing. It can't block on any cluster locks * to during block mapping. It's relying on the fact that the block * mapping can't have disappeared under the dirty pages that it is * being asked to write back. */ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) { trace_ocfs2_writepage( (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, page->index); return block_write_full_page(page, ocfs2_get_block, wbc); } /* Taken from ext3. We don't necessarily need the full blown * functionality yet, but IMHO it's better to cut and paste the whole * thing so we can avoid introducing our own bugs (and easily pick up * their fixes when they happen) --Mark */ int walk_page_buffers( handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)( handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for ( bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) { sector_t status; u64 p_blkno = 0; int err = 0; struct inode *inode = mapping->host; trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)block); /* * The swap code (ab-)uses ->bmap to get a block mapping and then * bypasseѕ the file system for actual I/O. We really can't allow * that on refcounted inodes, so we have to skip out here. And yes, * 0 is the magic code for a bmap error.. */ if (ocfs2_is_refcount_inode(inode)) return 0; /* We don't need to lock journal system files, since they aren't * accessed concurrently from multiple nodes. */ if (!INODE_JOURNAL(inode)) { err = ocfs2_inode_lock(inode, NULL, 0); if (err) { if (err != -ENOENT) mlog_errno(err); goto bail; } down_read(&OCFS2_I(inode)->ip_alloc_sem); } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL); if (!INODE_JOURNAL(inode)) { up_read(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); } if (err) { mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", (unsigned long long)block); mlog_errno(err); goto bail; } bail: status = err ? 0 : p_blkno; return status; } static int ocfs2_releasepage(struct page *page, gfp_t wait) { if (!page_has_buffers(page)) return 0; return try_to_free_buffers(page); } static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, u32 cpos, unsigned int *start, unsigned int *end) { unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { unsigned int cpp; cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); cluster_start = cpos % cpp; cluster_start = cluster_start << osb->s_clustersize_bits; cluster_end = cluster_start + osb->s_clustersize; } BUG_ON(cluster_start > PAGE_SIZE); BUG_ON(cluster_end > PAGE_SIZE); if (start) *start = cluster_start; if (end) *end = cluster_end; } /* * 'from' and 'to' are the region in the page to avoid zeroing. * * If pagesize > clustersize, this function will avoid zeroing outside * of the cluster boundary. * * from == to == 0 is code for "zero the entire cluster region" */ static void ocfs2_clear_page_regions(struct page *page, struct ocfs2_super *osb, u32 cpos, unsigned from, unsigned to) { void *kaddr; unsigned int cluster_start, cluster_end; ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); kaddr = kmap_atomic(page); if (from || to) { if (from > cluster_start) memset(kaddr + cluster_start, 0, from - cluster_start); if (to < cluster_end) memset(kaddr + to, 0, cluster_end - to); } else { memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } kunmap_atomic(kaddr); } /* * Nonsparse file systems fully allocate before we get to the write * code. This prevents ocfs2_write() from tagging the write as an * allocating one, which means ocfs2_map_page_blocks() might try to * read-in the blocks at the tail of our file. Avoid reading them by * testing i_size against each block offset. */ static int ocfs2_should_read_blk(struct inode *inode, struct page *page, unsigned int block_start) { u64 offset = page_offset(page) + block_start; if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) return 1; if (i_size_read(inode) > offset) return 1; return 0; } /* * Some of this taken from __block_write_begin(). We already have our * mapping by now though, and the entire write will be allocating or * it won't, so not much need to use BH_New. * * This will also skip zeroing, which is handled externally. */ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; unsigned int bsize = i_blocksize(inode); if (!page_has_buffers(page)) create_empty_buffers(page, bsize, 0); head = page_buffers(page); for (bh = head, block_start = 0; bh != head || !block_start; bh = bh->b_this_page, block_start += bsize) { block_end = block_start + bsize; clear_buffer_new(bh); /* * Ignore blocks outside of our i/o range - * they may belong to unallocated clusters. */ if (block_start >= to || block_end <= from) { if (PageUptodate(page)) set_buffer_uptodate(bh); continue; } /* * For an allocating write with cluster size >= page * size, we always write the entire page. */ if (new) set_buffer_new(bh); if (!buffer_mapped(bh)) { map_bh(bh, inode->i_sb, *p_blkno); clean_bdev_bh_alias(bh); } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_new(bh) && ocfs2_should_read_blk(inode, page, block_start) && (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); *wait_bh++=bh; } *p_blkno = *p_blkno + 1; } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) ret = -EIO; } if (ret == 0 || !new) return ret; /* * If we get -EIO above, zero out any newly allocated blocks * to avoid exposing stale data. */ bh = head; block_start = 0; do { block_end = block_start + bsize; if (block_end <= from) goto next_bh; if (block_start >= to) break; zero_user(page, block_start, bh->b_size); set_buffer_uptodate(bh); mark_buffer_dirty(bh); next_bh: block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; } #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) #define OCFS2_MAX_CTXT_PAGES 1 #else #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) #endif #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) struct ocfs2_unwritten_extent { struct list_head ue_node; struct list_head ue_ip_node; u32 ue_cpos; u32 ue_phys; }; /* * Describe the state of a single cluster to be written to. */ struct ocfs2_write_cluster_desc { u32 c_cpos; u32 c_phys; /* * Give this a unique field because c_phys eventually gets * filled. */ unsigned c_new; unsigned c_clear_unwritten; unsigned c_needs_zero; }; struct ocfs2_write_ctxt { /* Logical cluster position / len of write */ u32 w_cpos; u32 w_clen; /* First cluster allocated in a nonsparse extend */ u32 w_first_new_cpos; /* Type of caller. Must be one of buffer, mmap, direct. */ ocfs2_write_type_t w_type; struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; /* * This is true if page_size > cluster_size. * * It triggers a set of special cases during write which might * have to deal with allocating writes to partial pages. */ unsigned int w_large_pages; /* * Pages involved in this write. * * w_target_page is the page being written to by the user. * * w_pages is an array of pages which always contains * w_target_page, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped * pages adjacent to w_target_page which need to be written * out in so that future reads from that region will get * zero's. */ unsigned int w_num_pages; struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; struct page *w_target_page; /* * w_target_locked is used for page_mkwrite path indicating no unlocking * against w_target_page in ocfs2_write_end_nolock. */ unsigned int w_target_locked:1; /* * ocfs2_write_end() uses this to know what the real range to * write in the target should be. */ unsigned int w_target_from; unsigned int w_target_to; /* * We could use journal_current_handle() but this is cleaner, * IMHO -Mark */ handle_t *w_handle; struct buffer_head *w_di_bh; struct ocfs2_cached_dealloc_ctxt w_dealloc; struct list_head w_unwritten_list; }; void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) { int i; for(i = 0; i < num_pages; i++) { if (pages[i]) { unlock_page(pages[i]); mark_page_accessed(pages[i]); put_page(pages[i]); } } } static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) { int i; /* * w_target_locked is only set to true in the page_mkwrite() case. * The intent is to allow us to lock the target page from write_begin() * to write_end(). The caller must hold a ref on w_target_page. */ if (wc->w_target_locked) { BUG_ON(!wc->w_target_page); for (i = 0; i < wc->w_num_pages; i++) { if (wc->w_target_page == wc->w_pages[i]) { wc->w_pages[i] = NULL; break; } } mark_page_accessed(wc->w_target_page); put_page(wc->w_target_page); } ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); } static void ocfs2_free_unwritten_list(struct inode *inode, struct list_head *head) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; list_for_each_entry_safe(ue, tmp, head, ue_node) { list_del(&ue->ue_node); spin_lock(&oi->ip_lock); list_del(&ue->ue_ip_node); spin_unlock(&oi->ip_lock); kfree(ue); } } static void ocfs2_free_write_ctxt(struct inode *inode, struct ocfs2_write_ctxt *wc) { ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); ocfs2_unlock_pages(wc); brelse(wc->w_di_bh); kfree(wc); } static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, struct ocfs2_super *osb, loff_t pos, unsigned len, ocfs2_write_type_t type, struct buffer_head *di_bh) { u32 cend; struct ocfs2_write_ctxt *wc; wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); if (!wc) return -ENOMEM; wc->w_cpos = pos >> osb->s_clustersize_bits; wc->w_first_new_cpos = UINT_MAX; cend = (pos + len - 1) >> osb->s_clustersize_bits; wc->w_clen = cend - wc->w_cpos + 1; get_bh(di_bh); wc->w_di_bh = di_bh; wc->w_type = type; if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) wc->w_large_pages = 1; else wc->w_large_pages = 0; ocfs2_init_dealloc_ctxt(&wc->w_dealloc); INIT_LIST_HEAD(&wc->w_unwritten_list); *wcp = wc; return 0; } /* * If a page has any new buffers, zero them out here, and mark them uptodate * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) return; bh = head = page_buffers(page); block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, end; start = max(from, block_start); end = min(to, block_end); zero_user_segment(page, start, end); set_buffer_uptodate(bh); } clear_buffer_new(bh); mark_buffer_dirty(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } /* * Only called when we have a failure during allocating write to write * zero's to the newly allocated region. */ static void ocfs2_write_failure(struct inode *inode, struct ocfs2_write_ctxt *wc, loff_t user_pos, unsigned user_len) { int i; unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; struct page *tmppage; if (wc->w_target_page) ocfs2_zero_new_buffers(wc->w_target_page, from, to); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; if (tmppage && page_has_buffers(tmppage)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(wc->w_handle, inode); block_commit_write(tmppage, from, to); } } } static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, struct ocfs2_write_ctxt *wc, struct page *page, u32 cpos, loff_t user_pos, unsigned user_len, int new) { int ret; unsigned int map_from = 0, map_to = 0; unsigned int cluster_start, cluster_end; unsigned int user_data_from = 0, user_data_to = 0; ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, &cluster_start, &cluster_end); /* treat the write as new if the a hole/lseek spanned across * the page boundary. */ new = new | ((i_size_read(inode) <= page_offset(page)) && (page_offset(page) <= user_pos)); if (page == wc->w_target_page) { map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; if (new) ret = ocfs2_map_page_blocks(page, p_blkno, inode, cluster_start, cluster_end, new); else ret = ocfs2_map_page_blocks(page, p_blkno, inode, map_from, map_to, new); if (ret) { mlog_errno(ret); goto out; } user_data_from = map_from; user_data_to = map_to; if (new) { map_from = cluster_start; map_to = cluster_end; } } else { /* * If we haven't allocated the new page yet, we * shouldn't be writing it out without copying user * data. This is likely a math error from the caller. */ BUG_ON(!new); map_from = cluster_start; map_to = cluster_end; ret = ocfs2_map_page_blocks(page, p_blkno, inode, cluster_start, cluster_end, new); if (ret) { mlog_errno(ret); goto out; } } /* * Parts of newly allocated pages need to be zero'd. * * Above, we have also rewritten 'to' and 'from' - as far as * the rest of the function is concerned, the entire cluster * range inside of a page needs to be written. * * We can skip this if the page is up to date - it's already * been zero'd from being read in as a hole. */ if (new && !PageUptodate(page)) ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); flush_dcache_page(page); out: return ret; } /* * This function will only grab one clusters worth of pages. */ static int ocfs2_grab_pages_for_write(struct address_space *mapping, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len, int new, struct page *mmap_page) { int ret = 0, i; unsigned long start, target_index, end_index, index; struct inode *inode = mapping->host; loff_t last_byte; target_index = user_pos >> PAGE_SHIFT; /* * Figure out how many pages we'll be manipulating here. For * non allocating write, we just change the one * page. Otherwise, we'll need a whole clusters worth. If we're * writing past i_size, we only need enough pages to cover the * last page of the write. */ if (new) { wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); /* * We need the index *past* the last page we could possibly * touch. This is the page past the end of the write or * i_size, whichever is greater. */ last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; if ((start + wc->w_num_pages) > end_index) wc->w_num_pages = end_index - start; } else { wc->w_num_pages = 1; start = target_index; } end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; for(i = 0; i < wc->w_num_pages; i++) { index = start + i; if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_MMAP) { /* * ocfs2_pagemkwrite() is a little different * and wants us to directly use the page * passed in. */ lock_page(mmap_page); /* Exit and let the caller retry */ if (mmap_page->mapping != mapping) { WARN_ON(mmap_page->mapping); unlock_page(mmap_page); ret = -EAGAIN; goto out; } get_page(mmap_page); wc->w_pages[i] = mmap_page; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_DIRECT) { /* Direct write has no mapping page. */ wc->w_pages[i] = NULL; continue; } else { wc->w_pages[i] = find_or_create_page(mapping, index, GFP_NOFS); if (!wc->w_pages[i]) { ret = -ENOMEM; mlog_errno(ret); goto out; } } wait_for_stable_page(wc->w_pages[i]); if (index == target_index) wc->w_target_page = wc->w_pages[i]; } out: if (ret) wc->w_target_locked = false; return ret; } /* * Prepare a single cluster for write one cluster into the file. */ static int ocfs2_write_cluster(struct address_space *mapping, u32 *phys, unsigned int new, unsigned int clear_unwritten, unsigned int should_zero, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len) { int ret, i; u64 p_blkno; struct inode *inode = mapping->host; struct ocfs2_extent_tree et; int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); if (new) { u32 tmp_pos; /* * This is safe to call with the page locks - it won't take * any additional semaphores or cluster locks. */ tmp_pos = cpos; ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, &tmp_pos, 1, !clear_unwritten, wc->w_di_bh, wc->w_handle, data_ac, meta_ac, NULL); /* * This shouldn't happen because we must have already * calculated the correct meta data allocation required. The * internal tree allocation code should know how to increase * transaction credits itself. * * If need be, we could handle -EAGAIN for a * RESTART_TRANS here. */ mlog_bug_on_msg(ret == -EAGAIN, "Inode %llu: EAGAIN return during allocation.\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); if (ret < 0) { mlog_errno(ret); goto out; } } else if (clear_unwritten) { ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_mark_extent_written(inode, &et, wc->w_handle, cpos, 1, *phys, meta_ac, &wc->w_dealloc); if (ret < 0) { mlog_errno(ret); goto out; } } /* * The only reason this should fail is due to an inability to * find the extent added. */ ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); if (ret < 0) { mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " "at logical cluster %u", (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); goto out; } BUG_ON(*phys == 0); p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); if (!should_zero) p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); for(i = 0; i < wc->w_num_pages; i++) { int tmpret; /* This is the direct io target page. */ if (wc->w_pages[i] == NULL) { p_blkno++; continue; } tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, wc->w_pages[i], cpos, user_pos, user_len, should_zero); if (tmpret) { mlog_errno(tmpret); if (ret == 0) ret = tmpret; } } /* * We only have cleanup to do in case of allocating write. */ if (ret && new) ocfs2_write_failure(inode, wc, user_pos, user_len); out: return ret; } static int ocfs2_write_cluster_by_desc(struct address_space *mapping, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len) { int ret, i; loff_t cluster_off; unsigned int local_len = len; struct ocfs2_write_cluster_desc *desc; struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; /* * We have to make sure that the total write passed in * doesn't extend past a single cluster. */ local_len = len; cluster_off = pos & (osb->s_clustersize - 1); if ((cluster_off + local_len) > osb->s_clustersize) local_len = osb->s_clustersize - cluster_off; ret = ocfs2_write_cluster(mapping, &desc->c_phys, desc->c_new, desc->c_clear_unwritten, desc->c_needs_zero, data_ac, meta_ac, wc, desc->c_cpos, pos, local_len); if (ret) { mlog_errno(ret); goto out; } len -= local_len; pos += local_len; } ret = 0; out: return ret; } /* * ocfs2_write_end() wants to know which parts of the target page it * should complete the write on. It's easiest to compute them ahead of * time when a more complete view of the write is available. */ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len, int alloc) { struct ocfs2_write_cluster_desc *desc; wc->w_target_from = pos & (PAGE_SIZE - 1); wc->w_target_to = wc->w_target_from + len; if (alloc == 0) return; /* * Allocating write - we may have different boundaries based * on page size and cluster size. * * NOTE: We can no longer compute one value from the other as * the actual write length and user provided length may be * different. */ if (wc->w_large_pages) { /* * We only care about the 1st and last cluster within * our range and whether they should be zero'd or not. Either * value may be extended out to the start/end of a * newly allocated cluster. */ desc = &wc->w_desc[0]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, &wc->w_target_from, NULL); desc = &wc->w_desc[wc->w_clen - 1]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, NULL, &wc->w_target_to); } else { wc->w_target_from = 0; wc->w_target_to = PAGE_SIZE; } } /* * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to * do the zero work. And should not to clear UNWRITTEN since it will be cleared * by the direct io procedure. * If this is a new extent that allocated by direct io, we should mark it in * the ip_unwritten_list. */ static int ocfs2_unwritten_check(struct inode *inode, struct ocfs2_write_ctxt *wc, struct ocfs2_write_cluster_desc *desc) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; int ret = 0; if (!desc->c_needs_zero) return 0; retry: spin_lock(&oi->ip_lock); /* Needs not to zero no metter buffer or direct. The one who is zero * the cluster is doing zero. And he will clear unwritten after all * cluster io finished. */ list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { if (desc->c_cpos == ue->ue_cpos) { BUG_ON(desc->c_new); desc->c_needs_zero = 0; desc->c_clear_unwritten = 0; goto unlock; } } if (wc->w_type != OCFS2_WRITE_DIRECT) goto unlock; if (new == NULL) { spin_unlock(&oi->ip_lock); new = kmalloc(sizeof(struct ocfs2_unwritten_extent), GFP_NOFS); if (new == NULL) { ret = -ENOMEM; goto out; } goto retry; } /* This direct write will doing zero. */ new->ue_cpos = desc->c_cpos; new->ue_phys = desc->c_phys; desc->c_clear_unwritten = 0; list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); list_add_tail(&new->ue_node, &wc->w_unwritten_list); new = NULL; unlock: spin_unlock(&oi->ip_lock); out: if (new) kfree(new); return ret; } /* * Populate each single-cluster write descriptor in the write context * with information about the i/o to be done. * * Returns the number of clusters that will have to be allocated, as * well as a worst case estimate of the number of extent records that * would have to be created during a write to an unwritten region. */ static int ocfs2_populate_write_desc(struct inode *inode, struct ocfs2_write_ctxt *wc, unsigned int *clusters_to_alloc, unsigned int *extents_to_split) { int ret; struct ocfs2_write_cluster_desc *desc; unsigned int num_clusters = 0; unsigned int ext_flags = 0; u32 phys = 0; int i; *clusters_to_alloc = 0; *extents_to_split = 0; for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; desc->c_cpos = wc->w_cpos + i; if (num_clusters == 0) { /* * Need to look up the next extent record. */ ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, &num_clusters, &ext_flags); if (ret) { mlog_errno(ret); goto out; } /* We should already CoW the refcountd extent. */ BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); /* * Assume worst case - that we're writing in * the middle of the extent. * * We can assume that the write proceeds from * left to right, in which case the extent * insert code is smart enough to coalesce the * next splits into the previous records created. */ if (ext_flags & OCFS2_EXT_UNWRITTEN) *extents_to_split = *extents_to_split + 2; } else if (phys) { /* * Only increment phys if it doesn't describe * a hole. */ phys++; } /* * If w_first_new_cpos is < UINT_MAX, we have a non-sparse * file that got extended. w_first_new_cpos tells us * where the newly allocated clusters are so we can * zero them. */ if (desc->c_cpos >= wc->w_first_new_cpos) { BUG_ON(phys == 0); desc->c_needs_zero = 1; } desc->c_phys = phys; if (phys == 0) { desc->c_new = 1; desc->c_needs_zero = 1; desc->c_clear_unwritten = 1; *clusters_to_alloc = *clusters_to_alloc + 1; } if (ext_flags & OCFS2_EXT_UNWRITTEN) { desc->c_clear_unwritten = 1; desc->c_needs_zero = 1; } ret = ocfs2_unwritten_check(inode, wc, desc); if (ret) { mlog_errno(ret); goto out; } num_clusters--; } ret = 0; out: return ret; } static int ocfs2_write_begin_inline(struct address_space *mapping, struct inode *inode, struct ocfs2_write_ctxt *wc) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct page *page; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } page = find_or_create_page(mapping, 0, GFP_NOFS); if (!page) { ocfs2_commit_trans(osb, handle); ret = -ENOMEM; mlog_errno(ret); goto out; } /* * If we don't set w_num_pages then this page won't get unlocked * and freed on cleanup of the write context. */ wc->w_pages[0] = wc->w_target_page = page; wc->w_num_pages = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { ocfs2_commit_trans(osb, handle); mlog_errno(ret); goto out; } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) ocfs2_set_inode_data_inline(inode, di); if (!PageUptodate(page)) { ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); goto out; } } wc->w_handle = handle; out: return ret; } int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) return 1; return 0; } static int ocfs2_try_to_write_inline_data(struct address_space *mapping, struct inode *inode, loff_t pos, unsigned len, struct page *mmap_page, struct ocfs2_write_ctxt *wc) { int ret, written = 0; loff_t end = pos + len; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = NULL; trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, oi->ip_dyn_features); /* * Handle inodes which already have inline data 1st. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { if (mmap_page == NULL && ocfs2_size_fits_inline_data(wc->w_di_bh, end)) goto do_inline_write; /* * The write won't fit - we have to give this inode an * inline extent list now. */ ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); if (ret) mlog_errno(ret); goto out; } /* * Check whether the inode can accept inline data. */ if (oi->ip_clusters != 0 || i_size_read(inode) != 0) return 0; /* * Check whether the write can fit. */ di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; if (mmap_page || end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) return 0; do_inline_write: ret = ocfs2_write_begin_inline(mapping, inode, wc); if (ret) { mlog_errno(ret); goto out; } /* * This signals to the caller that the data can be written * inline. */ written = 1; out: return written ? written : ret; } /* * This function only does anything for file systems which can't * handle sparse files. * * What we want to do here is fill in any hole between the current end * of allocation and the end of our write. That way the rest of the * write path can treat it as an non-allocating write, which has no * special case code for sparse/nonsparse files. */ static int ocfs2_expand_nonsparse_inode(struct inode *inode, struct buffer_head *di_bh, loff_t pos, unsigned len, struct ocfs2_write_ctxt *wc) { int ret; loff_t newsize = pos + len; BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (newsize <= i_size_read(inode)) return 0; ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); if (ret) mlog_errno(ret); /* There is no wc if this is call from direct. */ if (wc) wc->w_first_new_cpos = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); return ret; } static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, loff_t pos) { int ret = 0; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (pos > i_size_read(inode)) ret = ocfs2_zero_extend(inode, di_bh, pos); return ret; } int ocfs2_write_begin_nolock(struct address_space *mapping, loff_t pos, unsigned len, ocfs2_write_type_t type, struct page **pagep, void **fsdata, struct buffer_head *di_bh, struct page *mmap_page) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; struct ocfs2_write_ctxt *wc; struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle; struct ocfs2_extent_tree et; int try_free = 1, ret1; try_again: ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); if (ret) { mlog_errno(ret); return ret; } if (ocfs2_supports_inline_data(osb)) { ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, mmap_page, wc); if (ret == 1) { ret = 0; goto success; } if (ret < 0) { mlog_errno(ret); goto out; } } /* Direct io change i_size late, should not zero tail here. */ if (type != OCFS2_WRITE_DIRECT) { if (ocfs2_sparse_alloc(osb)) ret = ocfs2_zero_tail(inode, di_bh, pos); else ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len, wc); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_check_range_for_refcount(inode, pos, len); if (ret < 0) { mlog_errno(ret); goto out; } else if (ret == 1) { clusters_need = wc->w_clen; ret = ocfs2_refcount_cow(inode, di_bh, wc->w_cpos, wc->w_clen, UINT_MAX); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, &extents_to_split); if (ret) { mlog_errno(ret); goto out; } clusters_need += clusters_to_alloc; di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; trace_ocfs2_write_begin_nolock( (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), pos, len, type, mmap_page, clusters_to_alloc, extents_to_split); /* * We set w_target_from, w_target_to here so that * ocfs2_write_end() knows which range in the target page to * write out. An allocation requires that we write the entire * cluster range. */ if (clusters_to_alloc || extents_to_split) { /* * XXX: We are stretching the limits of * ocfs2_lock_allocators(). It greatly over-estimates * the work to be done. */ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_lock_allocators(inode, &et, clusters_to_alloc, extents_to_split, &data_ac, &meta_ac); if (ret) { mlog_errno(ret); goto out; } if (data_ac) data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); } else if (type == OCFS2_WRITE_DIRECT) /* direct write needs not to start trans if no extents alloc. */ goto success; /* * We have to zero sparse allocated clusters, unwritten extent clusters, * and non-sparse clusters we just extended. For non-sparse writes, * we know zeros will only be needed in the first and/or last cluster. */ if (wc->w_clen && (wc->w_desc[0].c_needs_zero || wc->w_desc[wc->w_clen - 1].c_needs_zero)) cluster_of_pages = 1; else cluster_of_pages = 0; ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } wc->w_handle = handle; if (clusters_to_alloc) { ret = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); if (ret) goto out_commit; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_quota; } /* * Fill our page array first. That way we've grabbed enough so * that we can zero and flush if we error after adding the * extent. */ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, cluster_of_pages, mmap_page); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out_quota; } /* * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock * the target page. In this case, we exit with no error and no target * page. This will trigger the caller, page_mkwrite(), to re-try * the operation. */ if (ret == -EAGAIN) { BUG_ON(wc->w_target_page); ret = 0; goto out_quota; } ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, len); if (ret) { mlog_errno(ret); goto out_quota; } if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); success: if (pagep) *pagep = wc->w_target_page; *fsdata = wc; return 0; out_quota: if (clusters_to_alloc) dquot_free_space(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); out_commit: ocfs2_commit_trans(osb, handle); out: /* * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(), * even in case of error here like ENOSPC and ENOMEM. So, we need * to unlock the target page manually to prevent deadlocks when * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED * to VM code. */ if (wc->w_target_locked) unlock_page(mmap_page); ocfs2_free_write_ctxt(inode, wc); if (data_ac) { ocfs2_free_alloc_context(data_ac); data_ac = NULL; } if (meta_ac) { ocfs2_free_alloc_context(meta_ac); meta_ac = NULL; } if (ret == -ENOSPC && try_free) { /* * Try to free some truncate log so that we can have enough * clusters to allocate. */ try_free = 0; ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); if (ret1 == 1) goto try_again; if (ret1 < 0) mlog_errno(ret1); } return ret; } static int ocfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; struct buffer_head *di_bh = NULL; struct inode *inode = mapping->host; ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); return ret; } /* * Take alloc sem here to prevent concurrent lookups. That way * the mapping, zeroing and tree manipulation within * ocfs2_write() will be safe against ->readpage(). This * should also serve to lock out allocation from a shared * writeable region. */ down_write(&OCFS2_I(inode)->ip_alloc_sem); ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, pagep, fsdata, di_bh, NULL); if (ret) { mlog_errno(ret); goto out_fail; } brelse(di_bh); return 0; out_fail: up_write(&OCFS2_I(inode)->ip_alloc_sem); brelse(di_bh); ocfs2_inode_unlock(inode, 1); return ret; } static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, unsigned len, unsigned *copied, struct ocfs2_dinode *di, struct ocfs2_write_ctxt *wc) { void *kaddr; if (unlikely(*copied < len)) { if (!PageUptodate(wc->w_target_page)) { *copied = 0; return; } } kaddr = kmap_atomic(wc->w_target_page); memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); kunmap_atomic(kaddr); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)pos, *copied, le16_to_cpu(di->id2.i_data.id_count), le16_to_cpu(di->i_dyn_features)); } int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, void *fsdata) { int i, ret; unsigned from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle_t *handle = wc->w_handle; struct page *tmppage; BUG_ON(!list_empty(&wc->w_unwritten_list)); if (handle) { ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { copied = ret; mlog_errno(ret); goto out; } } if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); goto out_write_size; } if (unlikely(copied < len) && wc->w_target_page) { if (!PageUptodate(wc->w_target_page)) copied = 0; ocfs2_zero_new_buffers(wc->w_target_page, start+copied, start+len); } if (wc->w_target_page) flush_dcache_page(wc->w_target_page); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; /* This is the direct io target page. */ if (tmppage == NULL) continue; if (tmppage == wc->w_target_page) { from = wc->w_target_from; to = wc->w_target_to; BUG_ON(from > PAGE_SIZE || to > PAGE_SIZE || to < from); } else { /* * Pages adjacent to the target (if any) imply * a hole-filling write in which case we want * to flush their entire range. */ from = 0; to = PAGE_SIZE; } if (page_has_buffers(tmppage)) { if (handle && ocfs2_should_order_data(inode)) ocfs2_jbd2_file_inode(handle, inode); block_commit_write(tmppage, from, to); } } out_write_size: /* Direct io do not update i_size here. */ if (wc->w_type != OCFS2_WRITE_DIRECT) { pos += copied; if (pos > i_size_read(inode)) { i_size_write(inode, pos); mark_inode_dirty(inode); } inode->i_blocks = ocfs2_inode_sector_count(inode); di->i_size = cpu_to_le64((u64)i_size_read(inode)); inode->i_mtime = inode->i_ctime = current_time(inode); di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ocfs2_update_inode_fsync_trans(handle, inode, 1); } if (handle) ocfs2_journal_dirty(handle, wc->w_di_bh); out: /* unlock pages before dealloc since it needs acquiring j_trans_barrier * lock, or it will cause a deadlock since journal commit threads holds * this lock and will ask for the page lock when flushing the data. * put it here to preserve the unlock order. */ ocfs2_unlock_pages(wc); if (handle) ocfs2_commit_trans(osb, handle); ocfs2_run_deallocs(osb, &wc->w_dealloc); brelse(wc->w_di_bh); kfree(wc); return copied; } static int ocfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int ret; struct inode *inode = mapping->host; ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata); up_write(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); return ret; } struct ocfs2_dio_write_ctxt { struct list_head dw_zero_list; unsigned dw_zero_count; int dw_orphaned; pid_t dw_writer_pid; }; static struct ocfs2_dio_write_ctxt * ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) { struct ocfs2_dio_write_ctxt *dwc = NULL; if (bh->b_private) return bh->b_private; dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); if (dwc == NULL) return NULL; INIT_LIST_HEAD(&dwc->dw_zero_list); dwc->dw_zero_count = 0; dwc->dw_orphaned = 0; dwc->dw_writer_pid = task_pid_nr(current); bh->b_private = dwc; *alloc = 1; return dwc; } static void ocfs2_dio_free_write_ctx(struct inode *inode, struct ocfs2_dio_write_ctxt *dwc) { ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); kfree(dwc); } /* * TODO: Make this into a generic get_blocks function. * * From do_direct_io in direct-io.c: * "So what we do is to permit the ->get_blocks function to populate * bh.b_size with the size of IO which is permitted at this offset and * this i_blkbits." * * This function is called directly from get_more_blocks in direct-io.c. * * called like this: dio->get_blocks(dio->inode, fs_startblk, * fs_count, map_bh, dio->rw == WRITE); */ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_write_ctxt *wc; struct ocfs2_write_cluster_desc *desc = NULL; struct ocfs2_dio_write_ctxt *dwc = NULL; struct buffer_head *di_bh = NULL; u64 p_blkno; loff_t pos = iblock << inode->i_sb->s_blocksize_bits; unsigned len, total_len = bh_result->b_size; int ret = 0, first_get_block = 0; len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); len = min(total_len, len); mlog(0, "get block of %lu at %llu:%u req %u\n", inode->i_ino, pos, len, total_len); /* * Because we need to change file size in ocfs2_dio_end_io_write(), or * we may need to add it to orphan dir. So can not fall to fast path * while file size will be changed. */ if (pos + total_len <= i_size_read(inode)) { /* This is the fast path for re-write. */ ret = ocfs2_lock_get_block(inode, iblock, bh_result, create); if (buffer_mapped(bh_result) && !buffer_new(bh_result) && ret == 0) goto out; /* Clear state set by ocfs2_get_block. */ bh_result->b_state = 0; } dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); if (unlikely(dwc == NULL)) { ret = -ENOMEM; mlog_errno(ret); goto out; } if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && !dwc->dw_orphaned) { /* * when we are going to alloc extents beyond file size, add the * inode to orphan dir, so we can recall those spaces when * system crashed during write. */ ret = ocfs2_add_inode_to_orphan(osb, inode); if (ret < 0) { mlog_errno(ret); goto out; } dwc->dw_orphaned = 1; } ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); goto out; } down_write(&oi->ip_alloc_sem); if (first_get_block) { if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) ret = ocfs2_zero_tail(inode, di_bh, pos); else ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, total_len, NULL); if (ret < 0) { mlog_errno(ret); goto unlock; } } ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, OCFS2_WRITE_DIRECT, NULL, (void **)&wc, di_bh, NULL); if (ret) { mlog_errno(ret); goto unlock; } desc = &wc->w_desc[0]; p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); BUG_ON(p_blkno == 0); p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); map_bh(bh_result, inode->i_sb, p_blkno); bh_result->b_size = len; if (desc->c_needs_zero) set_buffer_new(bh_result); /* May sleep in end_io. It should not happen in a irq context. So defer * it to dio work queue. */ set_buffer_defer_completion(bh_result); if (!list_empty(&wc->w_unwritten_list)) { struct ocfs2_unwritten_extent *ue = NULL; ue = list_first_entry(&wc->w_unwritten_list, struct ocfs2_unwritten_extent, ue_node); BUG_ON(ue->ue_cpos != desc->c_cpos); /* The physical address may be 0, fill it. */ ue->ue_phys = desc->c_phys; list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); dwc->dw_zero_count++; } ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc); BUG_ON(ret != len); ret = 0; unlock: up_write(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); brelse(di_bh); out: if (ret < 0) ret = -EIO; return ret; } static int ocfs2_dio_end_io_write(struct inode *inode, struct ocfs2_dio_write_ctxt *dwc, loff_t offset, ssize_t bytes) { struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_extent_tree et; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle = NULL; loff_t end = offset + bytes; int ret = 0, credits = 0, locked = 0; ocfs2_init_dealloc_ctxt(&dealloc); /* We do clear unwritten, delete orphan, change i_size here. If neither * of these happen, we can skip all this. */ if (list_empty(&dwc->dw_zero_list) && end <= i_size_read(inode) && !dwc->dw_orphaned) goto out; /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we * are in that context. */ if (dwc->dw_writer_pid != task_pid_nr(current)) { inode_lock(inode); locked = 1; } ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret < 0) { mlog_errno(ret); goto out; } down_write(&oi->ip_alloc_sem); /* Delete orphan before acquire i_mutex. */ if (dwc->dw_orphaned) { BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); end = end > i_size_read(inode) ? end : 0; ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, !!end, end); if (ret < 0) mlog_errno(ret); } di = (struct ocfs2_dinode *)di_bh->b_data; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, &data_ac, &meta_ac); if (ret) { mlog_errno(ret); goto unlock; } credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto unlock; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto commit; } list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { ret = ocfs2_mark_extent_written(inode, &et, handle, ue->ue_cpos, 1, ue->ue_phys, meta_ac, &dealloc); if (ret < 0) { mlog_errno(ret); break; } } if (end > i_size_read(inode)) { ret = ocfs2_set_inode_size(handle, inode, di_bh, end); if (ret < 0) mlog_errno(ret); } commit: ocfs2_commit_trans(osb, handle); unlock: up_write(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); brelse(di_bh); out: if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); ocfs2_run_deallocs(osb, &dealloc); if (locked) inode_unlock(inode); ocfs2_dio_free_write_ctx(inode, dwc); return ret; } /* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. We use the rw_lock DLM lock * to protect io on one node from truncation on another. */ static int ocfs2_dio_end_io(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private) { struct inode *inode = file_inode(iocb->ki_filp); int level; int ret = 0; /* this io's submitter should not have unlocked this before we could */ BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); if (bytes > 0 && private) ret = ocfs2_dio_end_io_write(inode, private, offset, bytes); ocfs2_iocb_clear_rw_locked(iocb); level = ocfs2_iocb_rw_locked_level(iocb); ocfs2_rw_unlock(inode, level); return ret; } static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); get_block_t *get_block; /* * Fallback to buffered I/O if we see an inode without * extents. */ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; /* Fallback to buffered I/O if we do not support append dio. */ if (iocb->ki_pos + iter->count > i_size_read(inode) && !ocfs2_supports_append_dio(osb)) return 0; if (iov_iter_rw(iter) == READ) get_block = ocfs2_lock_get_block; else get_block = ocfs2_dio_wr_get_block; return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, get_block, ocfs2_dio_end_io, NULL, 0); } const struct address_space_operations ocfs2_aops = { .readpage = ocfs2_readpage, .readpages = ocfs2_readpages, .writepage = ocfs2_writepage, .write_begin = ocfs2_write_begin, .write_end = ocfs2_write_end, .bmap = ocfs2_bmap, .direct_IO = ocfs2_direct_IO, .invalidatepage = block_invalidatepage, .releasepage = ocfs2_releasepage, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, };
./CrossVul/dataset_final_sorted/CWE-362/c/good_3021_0
crossvul-cpp_data_bad_1450_0
/****************************************************************************** * emulate.c * * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. * * Copyright (c) 2005 Keir Fraser * * Linux coding style, mod r/m decoder, segment base fixes, real-mode * privileged instructions: * * Copyright (C) 2006 Qumranet * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 */ #include <linux/kvm_host.h> #include "kvm_cache_regs.h" #include <linux/module.h> #include <asm/kvm_emulate.h> #include <linux/stringify.h> #include "x86.h" #include "tss.h" /* * Operand types */ #define OpNone 0ull #define OpImplicit 1ull /* No generic decode */ #define OpReg 2ull /* Register */ #define OpMem 3ull /* Memory */ #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ #define OpDI 5ull /* ES:DI/EDI/RDI */ #define OpMem64 6ull /* Memory, 64-bit */ #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ #define OpDX 8ull /* DX register */ #define OpCL 9ull /* CL register (for shifts) */ #define OpImmByte 10ull /* 8-bit sign extended immediate */ #define OpOne 11ull /* Implied 1 */ #define OpImm 12ull /* Sign extended up to 32-bit immediate */ #define OpMem16 13ull /* Memory operand (16-bit). */ #define OpMem32 14ull /* Memory operand (32-bit). */ #define OpImmU 15ull /* Immediate operand, zero extended */ #define OpSI 16ull /* SI/ESI/RSI */ #define OpImmFAddr 17ull /* Immediate far address */ #define OpMemFAddr 18ull /* Far address in memory */ #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ #define OpES 20ull /* ES */ #define OpCS 21ull /* CS */ #define OpSS 22ull /* SS */ #define OpDS 23ull /* DS */ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) /* * Opcode effective-address decode tables. * Note that we only emulate instructions that have at least one memory * operand (excluding implicit stack references). We assume that stack * references and instruction fetches will never occur in special memory * areas that require emulation. So, for example, 'mov <imm>,<reg>' need * not be handled. */ /* Operand sizes: 8-bit operands or specified/overridden size. */ #define ByteOp (1<<0) /* 8-bit operands. */ /* Destination operand type. */ #define DstShift 1 #define ImplicitOps (OpImplicit << DstShift) #define DstReg (OpReg << DstShift) #define DstMem (OpMem << DstShift) #define DstAcc (OpAcc << DstShift) #define DstDI (OpDI << DstShift) #define DstMem64 (OpMem64 << DstShift) #define DstImmUByte (OpImmUByte << DstShift) #define DstDX (OpDX << DstShift) #define DstAccLo (OpAccLo << DstShift) #define DstMask (OpMask << DstShift) /* Source operand type. */ #define SrcShift 6 #define SrcNone (OpNone << SrcShift) #define SrcReg (OpReg << SrcShift) #define SrcMem (OpMem << SrcShift) #define SrcMem16 (OpMem16 << SrcShift) #define SrcMem32 (OpMem32 << SrcShift) #define SrcImm (OpImm << SrcShift) #define SrcImmByte (OpImmByte << SrcShift) #define SrcOne (OpOne << SrcShift) #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) #define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) #define SrcImm64 (OpImm64 << SrcShift) #define SrcDX (OpDX << SrcShift) #define SrcMem8 (OpMem8 << SrcShift) #define SrcAccHi (OpAccHi << SrcShift) #define SrcMask (OpMask << SrcShift) #define BitOp (1<<11) #define MemAbs (1<<12) /* Memory operand is absolute displacement */ #define String (1<<13) /* String instruction (rep capable) */ #define Stack (1<<14) /* Stack instruction (push/pop) */ #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ #define Escape (5<<15) /* Escape to coprocessor instruction */ #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */ #define Sse (1<<18) /* SSE Vector instruction */ /* Generic ModRM decode. */ #define ModRM (1<<19) /* Destination is only written; never read. */ #define Mov (1<<20) /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ #define Undefined (1<<25) /* No Such Instruction */ #define Lock (1<<26) /* lock prefix is allowed for the instruction */ #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) #define PageTable (1 << 29) /* instruction used to write page table */ #define NotImpl (1 << 30) /* instruction is not implemented */ /* Source 2 operand type */ #define Src2Shift (31) #define Src2None (OpNone << Src2Shift) #define Src2Mem (OpMem << Src2Shift) #define Src2CL (OpCL << Src2Shift) #define Src2ImmByte (OpImmByte << Src2Shift) #define Src2One (OpOne << Src2Shift) #define Src2Imm (OpImm << Src2Shift) #define Src2ES (OpES << Src2Shift) #define Src2CS (OpCS << Src2Shift) #define Src2SS (OpSS << Src2Shift) #define Src2DS (OpDS << Src2Shift) #define Src2FS (OpFS << Src2Shift) #define Src2GS (OpGS << Src2Shift) #define Src2Mask (OpMask << Src2Shift) #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ #define NoWrite ((u64)1 << 45) /* No writeback */ #define SrcWrite ((u64)1 << 46) /* Write back src operand */ #define NoMod ((u64)1 << 47) /* Mod field is ignored */ #define Intercept ((u64)1 << 48) /* Has valid intercept field */ #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ #define NoBigReal ((u64)1 << 50) /* No big real mode */ #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ #define NearBranch ((u64)1 << 52) /* Near branches */ #define No16 ((u64)1 << 53) /* No 16 bit operand */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) #define X2(x...) x, x #define X3(x...) X2(x), x #define X4(x...) X2(x), X2(x) #define X5(x...) X4(x), x #define X6(x...) X4(x), X2(x) #define X7(x...) X4(x), X3(x) #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) #define FASTOP_SIZE 8 /* * fastop functions have a special calling convention: * * dst: rax (in/out) * src: rdx (in/out) * src2: rcx (in) * flags: rflags (in/out) * ex: rsi (in:fastop pointer, out:zero if exception) * * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for * different operand sizes can be reached by calculation, rather than a jump * table (which would be bigger than the code). * * fastop functions are declared as taking a never-defined fastop parameter, * so they can't be called from C directly. */ struct fastop; struct opcode { u64 flags : 56; u64 intercept : 8; union { int (*execute)(struct x86_emulate_ctxt *ctxt); const struct opcode *group; const struct group_dual *gdual; const struct gprefix *gprefix; const struct escape *esc; const struct instr_dual *idual; void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; struct group_dual { struct opcode mod012[8]; struct opcode mod3[8]; }; struct gprefix { struct opcode pfx_no; struct opcode pfx_66; struct opcode pfx_f2; struct opcode pfx_f3; }; struct escape { struct opcode op[8]; struct opcode high[64]; }; struct instr_dual { struct opcode mod012; struct opcode mod3; }; /* EFLAGS bit definitions. */ #define EFLG_ID (1<<21) #define EFLG_VIP (1<<20) #define EFLG_VIF (1<<19) #define EFLG_AC (1<<18) #define EFLG_VM (1<<17) #define EFLG_RF (1<<16) #define EFLG_IOPL (3<<12) #define EFLG_NT (1<<14) #define EFLG_OF (1<<11) #define EFLG_DF (1<<10) #define EFLG_IF (1<<9) #define EFLG_TF (1<<8) #define EFLG_SF (1<<7) #define EFLG_ZF (1<<6) #define EFLG_AF (1<<4) #define EFLG_PF (1<<2) #define EFLG_CF (1<<0) #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a #define EFLG_RESERVED_ONE_MASK 2 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) { if (!(ctxt->regs_valid & (1 << nr))) { ctxt->regs_valid |= 1 << nr; ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); } return ctxt->_regs[nr]; } static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) { ctxt->regs_valid |= 1 << nr; ctxt->regs_dirty |= 1 << nr; return &ctxt->_regs[nr]; } static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) { reg_read(ctxt, nr); return reg_write(ctxt, nr); } static void writeback_registers(struct x86_emulate_ctxt *ctxt) { unsigned reg; for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); } static void invalidate_registers(struct x86_emulate_ctxt *ctxt) { ctxt->regs_dirty = 0; ctxt->regs_valid = 0; } /* * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) #ifdef CONFIG_X86_64 #define ON64(x) x #else #define ON64(x) #endif static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" #define FOP_RET "ret \n\t" #define FOP_START(op) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ FOP_ALIGN \ "em_" #op ": \n\t" #define FOP_END \ ".popsection") #define FOPNOP() FOP_ALIGN FOP_RET #define FOP1E(op, dst) \ FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET #define FOP1EEX(op, dst) \ FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) #define FASTOP1(op) \ FOP_START(op) \ FOP1E(op##b, al) \ FOP1E(op##w, ax) \ FOP1E(op##l, eax) \ ON64(FOP1E(op##q, rax)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m) */ #define FASTOP1SRC2(op, name) \ FOP_START(name) \ FOP1E(op, cl) \ FOP1E(op, cx) \ FOP1E(op, ecx) \ ON64(FOP1E(op, rcx)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ #define FASTOP1SRC2EX(op, name) \ FOP_START(name) \ FOP1EEX(op, cl) \ FOP1EEX(op, cx) \ FOP1EEX(op, ecx) \ ON64(FOP1EEX(op, rcx)) \ FOP_END #define FOP2E(op, dst, src) \ FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET #define FASTOP2(op) \ FOP_START(op) \ FOP2E(op##b, al, dl) \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, word only */ #define FASTOP2W(op) \ FOP_START(op) \ FOPNOP() \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, src is CL */ #define FASTOP2CL(op) \ FOP_START(op) \ FOP2E(op##b, al, cl) \ FOP2E(op##w, ax, cl) \ FOP2E(op##l, eax, cl) \ ON64(FOP2E(op##q, rax, cl)) \ FOP_END /* 2 operand, src and dest are reversed */ #define FASTOP2R(op, name) \ FOP_START(name) \ FOP2E(op##b, dl, al) \ FOP2E(op##w, dx, ax) \ FOP2E(op##l, edx, eax) \ ON64(FOP2E(op##q, rdx, rax)) \ FOP_END #define FOP3E(op, dst, src, src2) \ FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET /* 3-operand, word-only, src2=cl */ #define FASTOP3WCL(op) \ FOP_START(op) \ FOPNOP() \ FOP3E(op##w, ax, dx, cl) \ FOP3E(op##l, eax, edx, cl) \ ON64(FOP3E(op##q, rax, rdx, cl)) \ FOP_END /* Special case for SETcc - 1 instruction per cc */ #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" asm(".global kvm_fastop_exception \n" "kvm_fastop_exception: xor %esi, %esi; ret"); FOP_START(setcc) FOP_SETCC(seto) FOP_SETCC(setno) FOP_SETCC(setc) FOP_SETCC(setnc) FOP_SETCC(setz) FOP_SETCC(setnz) FOP_SETCC(setbe) FOP_SETCC(setnbe) FOP_SETCC(sets) FOP_SETCC(setns) FOP_SETCC(setp) FOP_SETCC(setnp) FOP_SETCC(setl) FOP_SETCC(setnl) FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET FOP_END; static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage) { struct x86_instruction_info info = { .intercept = intercept, .rep_prefix = ctxt->rep_prefix, .modrm_mod = ctxt->modrm_mod, .modrm_reg = ctxt->modrm_reg, .modrm_rm = ctxt->modrm_rm, .src_val = ctxt->src.val64, .dst_val = ctxt->dst.val64, .src_bytes = ctxt->src.bytes, .dst_bytes = ctxt->dst.bytes, .ad_bytes = ctxt->ad_bytes, .next_rip = ctxt->eip, }; return ctxt->ops->intercept(ctxt, &info, stage); } static void assign_masked(ulong *dest, ulong src, ulong mask) { *dest = (*dest & ~mask) | (src & mask); } static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; } static ulong stack_mask(struct x86_emulate_ctxt *ctxt) { u16 sel; struct desc_struct ss; if (ctxt->mode == X86EMUL_MODE_PROT64) return ~0UL; ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ } static int stack_size(struct x86_emulate_ctxt *ctxt) { return (__fls(stack_mask(ctxt)) + 1) >> 3; } /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) { if (ctxt->ad_bytes == sizeof(unsigned long)) return reg; else return reg & ad_mask(ctxt); } static inline unsigned long register_address(struct x86_emulate_ctxt *ctxt, int reg) { return address_mask(ctxt, reg_read(ctxt, reg)); } static void masked_increment(ulong *reg, ulong mask, int inc) { assign_masked(reg, *reg + inc, mask); } static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) { ulong mask; if (ctxt->ad_bytes == sizeof(unsigned long)) mask = ~0UL; else mask = ad_mask(ctxt); masked_increment(reg_rmw(ctxt, reg), mask, inc); } static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) { masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); return desc->g ? (limit << 12) | 0xfff : limit; } static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) { if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) return 0; return ctxt->ops->get_cached_segment_base(ctxt, seg); } static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid) { WARN_ON(vec > 0x1f); ctxt->exception.vector = vec; ctxt->exception.error_code = error; ctxt->exception.error_code_valid = valid; return X86EMUL_PROPAGATE_FAULT; } static int emulate_db(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DB_VECTOR, 0, false); } static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, GP_VECTOR, err, true); } static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, SS_VECTOR, err, true); } static int emulate_ud(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, UD_VECTOR, 0, false); } static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, TS_VECTOR, err, true); } static int emulate_de(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DE_VECTOR, 0, false); } static int emulate_nm(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, NM_VECTOR, 0, false); } static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); return selector; } static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg) { u16 dummy; u32 base3; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); } /* * x86 defines three classes of vector instructions: explicitly * aligned, explicitly unaligned, and the rest, which change behaviour * depending on whether they're AVX encoded or not. * * Also included is CMPXCHG16B which is not a vector instruction, yet it is * subject to the same check. */ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) { if (likely(size < 16)) return false; if (ctxt->d & Aligned) return true; else if (ctxt->d & Unaligned) return false; else if (ctxt->d & Avx) return false; else return true; } static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, bool write, bool fetch, enum x86emul_mode mode, ulong *linear) { struct desc_struct desc; bool usable; ulong la; u32 lim; u16 sel; la = seg_base(ctxt, addr.seg) + addr.ea; *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: if (is_noncanonical_address(la)) goto bad; *max_size = min_t(u64, ~0u, (1ull << 48) - la); if (size > *max_size) goto bad; break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, addr.seg); if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && write) goto bad; /* unreadable code segment */ if (!fetch && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { /* expand-down segment */ if (addr.ea <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; } if (addr.ea > lim) goto bad; *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); if (size > *max_size) goto bad; la &= (u32)-1; break; } if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) return emulate_gp(ctxt, 0); *linear = la; return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) return emulate_ss(ctxt, 0); else return emulate_gp(ctxt, 0); } static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear) { unsigned max_size; return __linearize(ctxt, addr, &max_size, size, write, false, ctxt->mode, linear); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, enum x86emul_mode mode) { ulong linear; int rc; unsigned max_size; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = dst }; if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; } static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { return assign_eip(ctxt, dst, ctxt->mode); } static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, const struct desc_struct *cs_desc) { enum x86emul_mode mode = ctxt->mode; #ifdef CONFIG_X86_64 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) mode = X86EMUL_MODE_PROT64; } #endif if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; return assign_eip(ctxt, dst, mode); } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { return assign_eip_near(ctxt, ctxt->_eip + rel); } static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } /* * Prefetch the remaining bytes of the instruction without crossing page * boundary if they are not in fetch_cache yet. */ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; unsigned size, max_size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; /* * We do not know exactly how many bytes will be needed, and * __linearize is expensive, so fetch as much as possible. We * just have to avoid going beyond the 15 byte limit, the end * of the segment, or the end of the page. * * __linearize is called with size 0 so that it does not do any * boundary check itself. Instead, we use max_size to check * against op_size. */ rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; size = min_t(unsigned, 15UL ^ cur_size, max_size); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* * One instruction can only straddle two pages, * and one has been loaded at the beginning of * x86_decode_insn. So, if not enough bytes * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) return emulate_gp(ctxt, 0); rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; ctxt->fetch.end += size; return X86EMUL_CONTINUE; } static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size) { unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; if (unlikely(done_size < size)) return __do_insn_fetch_bytes(ctxt, size - done_size); else return X86EMUL_CONTINUE; } /* Fetch next part of the instruction being emulated. */ #define insn_fetch(_type, _ctxt) \ ({ _type _x; \ \ rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += sizeof(_type); \ _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ ctxt->fetch.ptr += sizeof(_type); \ _x; \ }) #define insn_fetch_arr(_arr, _size, _ctxt) \ ({ \ rc = do_insn_fetch_bytes(_ctxt, _size); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += (_size); \ memcpy(_arr, ctxt->fetch.ptr, _size); \ ctxt->fetch.ptr += (_size); \ }) /* * Given the 'reg' portion of a ModRM byte, and a register block, return a * pointer into the block that addresses the relevant register. * @highbyte_regs specifies whether to decode AH,CH,DH,BH. */ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop) { void *p; int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; else p = reg_rmw(ctxt, modrm_reg); return p; } static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; if (op_bytes == 2) op_bytes = 3; *address = 0; rc = segmented_read_std(ctxt, addr, size, 2); if (rc != X86EMUL_CONTINUE) return rc; addr.ea += 2; rc = segmented_read_std(ctxt, addr, address, op_bytes); return rc; } FASTOP2(add); FASTOP2(or); FASTOP2(adc); FASTOP2(sbb); FASTOP2(and); FASTOP2(sub); FASTOP2(xor); FASTOP2(cmp); FASTOP2(test); FASTOP1SRC2(mul, mul_ex); FASTOP1SRC2(imul, imul_ex); FASTOP1SRC2EX(div, div_ex); FASTOP1SRC2EX(idiv, idiv_ex); FASTOP3WCL(shld); FASTOP3WCL(shrd); FASTOP2W(imul); FASTOP1(not); FASTOP1(neg); FASTOP1(inc); FASTOP1(dec); FASTOP2CL(rol); FASTOP2CL(ror); FASTOP2CL(rcl); FASTOP2CL(rcr); FASTOP2CL(shl); FASTOP2CL(shr); FASTOP2CL(sar); FASTOP2W(bsf); FASTOP2W(bsr); FASTOP2W(bt); FASTOP2W(bts); FASTOP2W(btr); FASTOP2W(btc); FASTOP2(xadd); FASTOP2R(cmp, cmp_r); static u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; asm("push %[flags]; popf; call *%[fastop]" : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); return rc; } static void fetch_register_operand(struct operand *op) { switch (op->bytes) { case 1: op->val = *(u8 *)op->addr.reg; break; case 2: op->val = *(u16 *)op->addr.reg; break; case 4: op->val = *(u32 *)op->addr.reg; break; case 8: op->val = *(u64 *)op->addr.reg; break; } } static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static int em_fninit(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fninit"); ctxt->ops->put_fpu(ctxt); return X86EMUL_CONTINUE; } static int em_fnstcw(struct x86_emulate_ctxt *ctxt) { u16 fcw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstcw %0": "+m"(fcw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fcw; return X86EMUL_CONTINUE; } static int em_fnstsw(struct x86_emulate_ctxt *ctxt) { u16 fsw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstsw %0": "+m"(fsw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fsw; return X86EMUL_CONTINUE; } static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { unsigned reg = ctxt->modrm_reg; if (!(ctxt->d & ModRM)) reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = reg; read_sse_reg(ctxt, &op->vec_val, reg); return; } if (ctxt->d & Mmx) { reg &= 7; op->type = OP_MM; op->bytes = 8; op->addr.mm = reg; return; } op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); fetch_register_operand(op); op->orig_val = op->val; } static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) { if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) ctxt->modrm_seg = VCPU_SREG_SS; } static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op) { u8 sib; int index_reg, base_reg, scale; int rc = X86EMUL_CONTINUE; ulong modrm_ea = 0; ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = ctxt->modrm_rm; read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); return rc; } if (ctxt->d & Mmx) { op->type = OP_MM; op->bytes = 8; op->addr.mm = ctxt->modrm_rm & 7; return rc; } fetch_register_operand(op); return rc; } op->type = OP_MEM; if (ctxt->ad_bytes == 2) { unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); unsigned si = reg_read(ctxt, VCPU_REGS_RSI); unsigned di = reg_read(ctxt, VCPU_REGS_RDI); /* 16-bit ModR/M decode. */ switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 6) modrm_ea += insn_fetch(u16, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(u16, ctxt); break; } switch (ctxt->modrm_rm) { case 0: modrm_ea += bx + si; break; case 1: modrm_ea += bx + di; break; case 2: modrm_ea += bp + si; break; case 3: modrm_ea += bp + di; break; case 4: modrm_ea += si; break; case 5: modrm_ea += di; break; case 6: if (ctxt->modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) ctxt->modrm_seg = VCPU_SREG_SS; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ if ((ctxt->modrm_rm & 7) == 4) { sib = insn_fetch(u8, ctxt); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) modrm_ea += insn_fetch(s32, ctxt); else { modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } if (index_reg != 4) modrm_ea += reg_read(ctxt, index_reg) << scale; } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { modrm_ea += insn_fetch(s32, ctxt); if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->rip_relative = 1; } else { base_reg = ctxt->modrm_rm; modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } switch (ctxt->modrm_mod) { case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(s32, ctxt); break; } } op->addr.mem.ea = modrm_ea; if (ctxt->ad_bytes != 8) ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; done: return rc; } static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op) { int rc = X86EMUL_CONTINUE; op->type = OP_MEM; switch (ctxt->ad_bytes) { case 2: op->addr.mem.ea = insn_fetch(u16, ctxt); break; case 4: op->addr.mem.ea = insn_fetch(u32, ctxt); break; case 8: op->addr.mem.ea = insn_fetch(u64, ctxt); break; } done: return rc; } static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) { long sv = 0, mask; if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { mask = ~((long)ctxt->dst.bytes * 8 - 1); if (ctxt->src.bytes == 2) sv = (s16)ctxt->src.val & (s16)mask; else if (ctxt->src.bytes == 4) sv = (s32)ctxt->src.val & (s32)mask; else sv = (s64)ctxt->src.val & (s64)mask; ctxt->dst.addr.mem.ea = address_mask(ctxt, ctxt->dst.addr.mem.ea + (sv >> 3)); } /* only subword offset */ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; } static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size) { int rc; struct read_cache *mc = &ctxt->mem_read; if (mc->pos < mc->end) goto read_cached; WARN_ON((mc->end + size) >= sizeof(mc->data)); rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += size; read_cached: memcpy(dest, mc->data + mc->pos, size); mc->pos += size; return X86EMUL_CONTINUE; } static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return read_emulated(ctxt, linear, data, size); } static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest) { struct read_cache *rc = &ctxt->io_read; if (rc->pos == rc->end) { /* refill pio read ahead */ unsigned int in_page, n; unsigned int count = ctxt->rep_prefix ? address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; in_page = (ctxt->eflags & EFLG_DF) ? offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); if (n == 0) n = 1; rc->pos = rc->end = 0; if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) return 0; rc->end = n * size; } if (ctxt->rep_prefix && (ctxt->d & String) && !(ctxt->eflags & EFLG_DF)) { ctxt->dst.data = rc->data + rc->pos; ctxt->dst.type = OP_MEM_STR; ctxt->dst.count = (rc->end - rc->pos) / size; rc->pos = rc->end; } else { memcpy(dest, rc->data + rc->pos, size); rc->pos += size; } return 1; } static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc) { struct desc_ptr dt; ulong addr; ctxt->ops->get_idt(ctxt, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 base3 = 0; if (selector & 1 << 2) { struct desc_struct desc; u16 sel; memset (dt, 0, sizeof *dt); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ dt->address = get_desc_base(&desc) | ((u64)base3 << 32); } else ops->get_gdt(ctxt, dt); } /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); *desc_addr_p = addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* allowed just for 8 bytes segments */ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* Does not support long mode */ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, bool in_task_switch, struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) /* for NULL selector skip all following checks */ goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) goto exception; if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or segment selector's RPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and (both RPL and CPL > DPL)) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { /* mark segment as accessed */ seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (is_noncanonical_address(get_desc_base(&seg_desc) | ((u64)base3 << 32))) return emulate_gp(ctxt, 0); } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); if (desc) *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); } static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { u8 cpl = ctxt->ops->cpl(ctxt); return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); } static void write_register_operand(struct operand *op) { /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ switch (op->bytes) { case 1: *(u8 *)op->addr.reg = (u8)op->val; break; case 2: *(u16 *)op->addr.reg = (u16)op->val; break; case 4: *op->addr.reg = (u32)op->val; break; /* 64b: zero-extend */ case 8: *op->addr.reg = op->val; break; } } static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) { switch (op->type) { case OP_REG: write_register_operand(op); break; case OP_MEM: if (ctxt->lock_prefix) return segmented_cmpxchg(ctxt, op->addr.mem, &op->orig_val, &op->val, op->bytes); else return segmented_write(ctxt, op->addr.mem, &op->val, op->bytes); break; case OP_MEM_STR: return segmented_write(ctxt, op->addr.mem, op->data, op->bytes * op->count); break; case OP_XMM: write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); break; case OP_MM: write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); break; case OP_NONE: /* no writeback */ break; default: break; } return X86EMUL_CONTINUE; } static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; rsp_increment(ctxt, -bytes); addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, bytes); } static int em_push(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; return push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; struct segmented_address addr; addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, len); return rc; } static int em_pop(struct x86_emulate_ctxt *ctxt) { return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; unsigned long val, change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) return rc; change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; switch(ctxt->mode) { case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT16: if (cpl == 0) change_mask |= EFLG_IOPL; if (cpl <= iopl) change_mask |= EFLG_IF; break; case X86EMUL_MODE_VM86: if (iopl < 3) return emulate_gp(ctxt, 0); change_mask |= EFLG_IF; break; default: /* real mode */ change_mask |= (EFLG_IOPL | EFLG_IF); break; } *(unsigned long *)dest = (ctxt->eflags & ~change_mask) | (val & change_mask); return rc; } static int em_popf(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = &ctxt->eflags; ctxt->dst.bytes = ctxt->op_bytes; return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int em_enter(struct x86_emulate_ctxt *ctxt) { int rc; unsigned frame_size = ctxt->src.val; unsigned nesting_level = ctxt->src2.val & 31; ulong rbp; if (nesting_level) return X86EMUL_UNHANDLEABLE; rbp = reg_read(ctxt, VCPU_REGS_RBP); rc = push(ctxt, &rbp, stack_size(ctxt)); if (rc != X86EMUL_CONTINUE) return rc; assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), stack_mask(ctxt)); assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RSP) - frame_size, stack_mask(ctxt)); return X86EMUL_CONTINUE; } static int em_leave(struct x86_emulate_ctxt *ctxt) { assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), stack_mask(ctxt)); return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); } static int em_push_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; ctxt->src.val = get_segment_selector(ctxt, seg); if (ctxt->op_bytes == 4) { rsp_increment(ctxt, -2); ctxt->op_bytes = 2; } return em_push(ctxt); } static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned long selector; int rc; rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; rc = load_segment_descriptor(ctxt, (u16)selector, seg); return rc; } static int em_pusha(struct x86_emulate_ctxt *ctxt) { unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RAX; while (reg <= VCPU_REGS_RDI) { (reg == VCPU_REGS_RSP) ? (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ++reg; } return rc; } static int em_pushf(struct x86_emulate_ctxt *ctxt) { ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM; return em_push(ctxt); } static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { rsp_increment(ctxt, ctxt->op_bytes); --reg; } rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; --reg; } return rc; } static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { const struct x86_emulate_ops *ops = ctxt->ops; int rc; struct desc_ptr dt; gva_t cs_addr; gva_t eip_addr; u16 cs, eip; /* TODO: Add limit checks */ ctxt->src.val = ctxt->eflags; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = ctxt->_eip; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ops->get_idt(ctxt, &dt); eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = eip; return rc; } int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { int rc; invalidate_registers(ctxt); rc = __emulate_int_real(ctxt, irq); if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return rc; } static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return __emulate_int_real(ctxt, irq); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* Protected mode interrupts unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; unsigned long temp_eip = 0; unsigned long temp_eflags = 0; unsigned long cs = 0; unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; /* TODO: Add stack limit check */ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (temp_eip & ~0xffff) return emulate_gp(ctxt, 0); rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = temp_eip; if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); else if (ctxt->op_bytes == 2) { ctxt->eflags &= ~0xffff; ctxt->eflags |= temp_eflags; } ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ ctxt->eflags |= EFLG_RESERVED_ONE_MASK; return rc; } static int em_iret(struct x86_emulate_ctxt *ctxt) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return emulate_iret_real(ctxt); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* iret from protected mode unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel, old_sel; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; u8 cpl = ctxt->ops->cpl(ctxt); /* Assignment of RIP may only fail in 64-bit mode */ if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_sel, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; } return rc; } static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) { return assign_eip_near(ctxt, ctxt->src.val); } static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) { int rc; long int old_eip; old_eip = ctxt->_eip; rc = assign_eip_near(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = old_eip; rc = em_push(ctxt); return rc; } static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) { u64 old = ctxt->dst.orig_val64; if (ctxt->dst.bytes == 16) return X86EMUL_UNHANDLEABLE; if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | (u32) reg_read(ctxt, VCPU_REGS_RBX); ctxt->eflags |= EFLG_ZF; } return X86EMUL_CONTINUE; } static int em_ret(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; } static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) { int rc; rc = em_ret_far(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) { /* Save real source value, then compare EAX against destination. */ ctxt->dst.orig_val = ctxt->dst.val; ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); ctxt->src.orig_val = ctxt->src.val; ctxt->src.val = ctxt->dst.orig_val; fastop(ctxt, em_cmp); if (ctxt->eflags & EFLG_ZF) { /* Success: write back to memory. */ ctxt->dst.val = ctxt->src.orig_val; } else { /* Failure: write the value we saw to EAX. */ ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt->dst.val = ctxt->dst.orig_val; } return X86EMUL_CONTINUE; } static int em_lseg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned short sel; int rc; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, seg); if (rc != X86EMUL_CONTINUE) return rc; ctxt->dst.val = ctxt->src.val; return rc; } static void setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *cs, struct desc_struct *ss) { cs->l = 0; /* will be adjusted later */ set_desc_base(cs, 0); /* flat segment */ cs->g = 1; /* 4kb granularity */ set_desc_limit(cs, 0xfffff); /* 4GB limit */ cs->type = 0x0b; /* Read, Execute, Accessed */ cs->s = 1; cs->dpl = 0; /* will be adjusted later */ cs->p = 1; cs->d = 1; cs->avl = 0; set_desc_base(ss, 0); /* flat segment */ set_desc_limit(ss, 0xfffff); /* 4GB limit */ ss->g = 1; /* 4kb granularity */ ss->s = 1; ss->type = 0x03; /* Read/Write, Accessed */ ss->d = 1; /* 32bit stack segment */ ss->dpl = 0; ss->p = 1; ss->l = 0; ss->avl = 0; } static bool vendor_intel(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; } static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 eax, ebx, ecx, edx; /* * syscall should always be enabled in longmode - so only become * vendor specific (cpuid) if other modes are active... */ if (ctxt->mode == X86EMUL_MODE_PROT64) return true; eax = 0x00000000; ecx = 0x00000000; ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); /* * Intel ("GenuineIntel") * remark: Intel CPUs only support "syscall" in 64bit * longmode. Also an 64bit guest with a * 32bit compat-app running will #UD !! While this * behaviour can be fixed (by emulating) into AMD * response - CPUs of AMD can't behave like Intel. */ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) return false; /* AMD ("AuthenticAMD") */ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) return true; /* AMD ("AMDisbetter!") */ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) return true; /* default: (not Intel, not AMD), apply Intel's stricter rules... */ return false; } static int em_syscall(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); if (!(em_syscall_is_enabled(ctxt))) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, &cs, &ss); if (!(efer & EFER_SCE)) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); ss_sel = (u16)(msr_data + 8); if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~msr_data; ctxt->eflags |= EFLG_RESERVED_ONE_MASK; #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(EFLG_VM | EFLG_IF); } return X86EMUL_CONTINUE; } static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* sysenter/sysexit have not been tested in 64bit mode. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) return emulate_gp(ctxt, 0); break; default: break; } ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data; cs_sel &= ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; ss_sel &= ~SELECTOR_RPL_MASK; if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; return X86EMUL_CONTINUE; } static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; rcx = reg_read(ctxt, VCPU_REGS_RCX); rdx = reg_read(ctxt, VCPU_REGS_RDX); cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); rcx = (u32)rcx; rdx = (u32)rdx; break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; if (is_noncanonical_address(rcx) || is_noncanonical_address(rdx)) return emulate_gp(ctxt, 0); break; } cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) { int iopl; if (ctxt->mode == X86EMUL_MODE_REAL) return false; if (ctxt->mode == X86EMUL_MODE_VM86) return true; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; return ctxt->ops->cpl(ctxt) > iopl; } static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct tr_seg; u32 base3; int r; u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; unsigned mask = (1 << len) - 1; unsigned long base; ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); if (!tr_seg.p) return false; if (desc_limit_scaled(&tr_seg) < 103) return false; base = get_desc_base(&tr_seg); #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) return false; return true; } static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { if (ctxt->perm_ok) return true; if (emulator_bad_iopl(ctxt)) if (!emulator_io_port_access_allowed(ctxt, port, len)) return false; ctxt->perm_ok = true; return true; } static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { tss->ip = ctxt->_eip; tss->flag = ctxt->eflags; tss->ax = reg_read(ctxt, VCPU_REGS_RAX); tss->cx = reg_read(ctxt, VCPU_REGS_RCX); tss->dx = reg_read(ctxt, VCPU_REGS_RDX); tss->bx = reg_read(ctxt, VCPU_REGS_RBX); tss->sp = reg_read(ctxt, VCPU_REGS_RSP); tss->bp = reg_read(ctxt, VCPU_REGS_RBP); tss->si = reg_read(ctxt, VCPU_REGS_RSI); tss->di = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); } static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; /* * SDM says that segment selectors are loaded before segment * descriptors */ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } return load_state_from_tss16(ctxt, &tss_seg); } static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { /* CR3 and ldt selector are not saved intentionally */ tss->eip = ctxt->_eip; tss->eflags = ctxt->eflags; tss->eax = reg_read(ctxt, VCPU_REGS_RAX); tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); tss->edx = reg_read(ctxt, VCPU_REGS_RDX); tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); tss->esp = reg_read(ctxt, VCPU_REGS_RSP); tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); tss->esi = reg_read(ctxt, VCPU_REGS_RSI); tss->edi = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); } static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; /* General purpose registers */ *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; /* * SDM says that segment selectors are loaded before segment * descriptors. This is important because CPL checks will * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. */ if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; } return load_state_from_tss32(ctxt, &tss_seg); } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct curr_tss_desc, next_tss_desc; int ret; u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); ulong old_tss_base = ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); u32 desc_limit; ulong desc_addr; /* FIXME: old_tss_base == ~0 ? */ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; /* FIXME: check that next_tss_desc is tss */ /* * Check privileges. The three cases are task switch caused by... * * 1. jmp/call/int to task gate: Check against DPL of the task gate * 2. Exception/IRQ/iret: No check is performed * 3. jmp/call to TSS/task-gate: No check is performed since the * hardware checks it before exiting. */ if (reason == TASK_SWITCH_GATE) { if (idt_index != -1) { /* Software interrupts */ struct desc_struct task_gate_desc; int dpl; ret = read_interrupt_descriptor(ctxt, idt_index, &task_gate_desc); if (ret != X86EMUL_CONTINUE) return ret; dpl = task_gate_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, (idt_index << 3) | 0x2); } } desc_limit = desc_limit_scaled(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { return emulate_ts(ctxt, tss_selector & 0xfffc); } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); } if (reason == TASK_SWITCH_IRET) ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; /* set back link to prev task only if NT bit is set in eflags note that old_tss_sel is not used after this point */ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) old_tss_sel = 0xffff; if (next_tss_desc.type & 8) ret = task_switch_32(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); else ret = task_switch_16(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; if (reason != TASK_SWITCH_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); } ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); if (has_error_code) { ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; ctxt->lock_prefix = 0; ctxt->src.val = (unsigned long) error_code; ret = em_push(ctxt); } return ret; } int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { int rc; invalidate_registers(ctxt); ctxt->_eip = ctxt->eip; ctxt->dst.type = OP_NONE; rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) { ctxt->eip = ctxt->_eip; writeback_registers(ctxt); } return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op) { int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; register_address_increment(ctxt, reg, df * op->bytes); op->addr.mem.ea = register_address(ctxt, reg); } static int em_das(struct x86_emulate_ctxt *ctxt) { u8 al, old_al; bool af, cf, old_cf; cf = ctxt->eflags & X86_EFLAGS_CF; al = ctxt->dst.val; old_al = al; old_cf = cf; cf = false; af = ctxt->eflags & X86_EFLAGS_AF; if ((al & 0x0f) > 9 || af) { al -= 6; cf = old_cf | (al >= 250); af = true; } else { af = false; } if (old_al > 0x99 || old_cf) { al -= 0x60; cf = true; } ctxt->dst.val = al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; if (af) ctxt->eflags |= X86_EFLAGS_AF; return X86EMUL_CONTINUE; } static int em_aam(struct x86_emulate_ctxt *ctxt) { u8 al, ah; if (ctxt->src.val == 0) return emulate_de(ctxt); al = ctxt->dst.val & 0xff; ah = al / ctxt->src.val; al %= ctxt->src.val; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; u8 ah = (ctxt->dst.val >> 8) & 0xff; al = (al + (ah * ctxt->src.val)) & 0xff; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_call(struct x86_emulate_ctxt *ctxt) { int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; rc = jmp_rel(ctxt, rel); if (rc != X86EMUL_CONTINUE) return rc; return em_push(ctxt); } static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; int cpl = ctxt->ops->cpl(ctxt); old_eip = ctxt->_eip; ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return X86EMUL_CONTINUE; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_eip; rc = em_push(ctxt); /* If we failed, we tainted the memory, but the very least we should restore cs */ if (rc != X86EMUL_CONTINUE) goto fail; return rc; fail: ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); return rc; } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ ctxt->src.val = ctxt->dst.val; write_register_operand(&ctxt->src); /* Write back the memory destination with implicit LOCK prefix. */ ctxt->dst.val = ctxt->src.orig_val; ctxt->lock_prefix = 1; return X86EMUL_CONTINUE; } static int em_imul_3op(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = ctxt->src2.val; return fastop(ctxt, em_imul); } static int em_cwd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.bytes = ctxt->src.bytes; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); return X86EMUL_CONTINUE; } static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 tsc = 0; ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; return X86EMUL_CONTINUE; } static int em_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 pmc; if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; return X86EMUL_CONTINUE; } static int em_mov(struct x86_emulate_ctxt *ctxt) { memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); return X86EMUL_CONTINUE; } #define FFL(x) bit(X86_FEATURE_##x) static int em_movbe(struct x86_emulate_ctxt *ctxt) { u32 ebx, ecx, edx, eax = 1; u16 tmp; /* * Check MOVBE is set in the guest-visible CPUID leaf. */ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); if (!(ecx & FFL(MOVBE))) return emulate_ud(ctxt); switch (ctxt->op_bytes) { case 2: /* * From MOVBE definition: "...When the operand size is 16 bits, * the upper word of the destination register remains unchanged * ..." * * Both casting ->valptr and ->val to u16 breaks strict aliasing * rules so we have to do the operation almost per hand. */ tmp = (u16)ctxt->src.val; ctxt->dst.val &= ~0xffffUL; ctxt->dst.val |= (unsigned long)swab16(tmp); break; case 4: ctxt->dst.val = swab32((u32)ctxt->src.val); break; case 8: ctxt->dst.val = swab64(ctxt->src.val); break; default: BUG(); } return X86EMUL_CONTINUE; } static int em_cr_write(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_dr_write(struct x86_emulate_ctxt *ctxt) { unsigned long val; if (ctxt->mode == X86EMUL_MODE_PROT64) val = ctxt->src.val & ~0ULL; else val = ctxt->src.val & ~0U; /* #UD condition is already handled. */ if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_wrmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int em_rdmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; return X86EMUL_CONTINUE; } static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) { if (ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; return X86EMUL_CONTINUE; } static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } static int em_lldt(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } static int em_ltr(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); } static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_clts(struct x86_emulate_ctxt *ctxt) { ulong cr0; cr0 = ctxt->ops->get_cr(ctxt, 0); cr0 &= ~X86_CR0_TS; ctxt->ops->set_cr(ctxt, 0, cr0); return X86EMUL_CONTINUE; } static int em_vmcall(struct x86_emulate_ctxt *ctxt) { int rc = ctxt->ops->fix_hypercall(ctxt); if (rc != X86EMUL_CONTINUE) return rc; /* Let the processor re-execute the fixed hypercall */ ctxt->_eip = ctxt->eip; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void (*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr)) { struct desc_ptr desc_ptr; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; get(ctxt, &desc_ptr); if (ctxt->op_bytes == 2) { ctxt->op_bytes = 4; desc_ptr.address &= 0x00ffffff; } /* Disable writeback. */ ctxt->dst.type = OP_NONE; return segmented_write(ctxt, ctxt->dst.addr.mem, &desc_ptr, 2 + ctxt->op_bytes); } static int em_sgdt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); } static int em_sidt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); } static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->mode == X86EMUL_MODE_PROT64 && is_noncanonical_address(desc_ptr.address)) return emulate_gp(ctxt, 0); if (lgdt) ctxt->ops->set_gdt(ctxt, &desc_ptr); else ctxt->ops->set_idt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_lgdt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, true); } static int em_vmmcall(struct x86_emulate_ctxt *ctxt) { int rc; rc = ctxt->ops->fix_hypercall(ctxt); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return rc; } static int em_lidt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, false); } static int em_smsw(struct x86_emulate_ctxt *ctxt) { if (ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); return X86EMUL_CONTINUE; } static int em_lmsw(struct x86_emulate_ctxt *ctxt) { ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | (ctxt->src.val & 0x0f)); ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_loop(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; register_address_increment(ctxt, VCPU_REGS_RCX, -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) { if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, &ctxt->dst.val)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } static int em_out(struct x86_emulate_ctxt *ctxt) { ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, &ctxt->src.val, 1); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_cli(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->eflags &= ~X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_sti(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt->eflags |= X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_cpuid(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = reg_read(ctxt, VCPU_REGS_RAX); ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); *reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx; *reg_write(ctxt, VCPU_REGS_RDX) = edx; return X86EMUL_CONTINUE; } static int em_sahf(struct x86_emulate_ctxt *ctxt) { u32 flags; flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt->eflags &= ~0xffUL; ctxt->eflags |= flags | X86_EFLAGS_FIXED; return X86EMUL_CONTINUE; } static int em_lahf(struct x86_emulate_ctxt *ctxt) { *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; return X86EMUL_CONTINUE; } static int em_bswap(struct x86_emulate_ctxt *ctxt) { switch (ctxt->op_bytes) { #ifdef CONFIG_X86_64 case 8: asm("bswap %0" : "+r"(ctxt->dst.val)); break; #endif default: asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); break; } return X86EMUL_CONTINUE; } static int em_clflush(struct x86_emulate_ctxt *ctxt) { /* emulating clflush regardless of cpuid */ return X86EMUL_CONTINUE; } static bool valid_cr(int nr) { switch (nr) { case 0: case 2 ... 4: case 8: return true; default: return false; } } static int check_cr_read(struct x86_emulate_ctxt *ctxt) { if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_cr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int cr = ctxt->modrm_reg; u64 efer = 0; static u64 cr_reserved_bits[] = { 0xffffffff00000000ULL, 0, 0, 0, /* CR3 checked later */ CR4_RESERVED_BITS, 0, 0, 0, CR8_RESERVED_BITS, }; if (!valid_cr(cr)) return emulate_ud(ctxt); if (new_val & cr_reserved_bits[cr]) return emulate_gp(ctxt, 0); switch (cr) { case 0: { u64 cr4; if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) return emulate_gp(ctxt, 0); cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && !(cr4 & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } case 3: { u64 rsvd = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; if (new_val & rsvd) return emulate_gp(ctxt, 0); break; } case 4: { ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } } return X86EMUL_CONTINUE; } static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) { unsigned long dr7; ctxt->ops->get_dr(ctxt, 7, &dr7); /* Check if DR7.Global_Enable is set */ return dr7 & (1 << 13); } static int check_dr_read(struct x86_emulate_ctxt *ctxt) { int dr = ctxt->modrm_reg; u64 cr4; if (dr > 7) return emulate_ud(ctxt); cr4 = ctxt->ops->get_cr(ctxt, 4); if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) return emulate_ud(ctxt); if (check_dr7_gd(ctxt)) { ulong dr6; ctxt->ops->get_dr(ctxt, 6, &dr6); dr6 &= ~15; dr6 |= DR6_BD | DR6_RTM; ctxt->ops->set_dr(ctxt, 6, dr6); return emulate_db(ctxt); } return X86EMUL_CONTINUE; } static int check_dr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int dr = ctxt->modrm_reg; if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) return emulate_gp(ctxt, 0); return check_dr_read(ctxt); } static int check_svme(struct x86_emulate_ctxt *ctxt) { u64 efer; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_SVME)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_svme_pa(struct x86_emulate_ctxt *ctxt) { u64 rax = reg_read(ctxt, VCPU_REGS_RAX); /* Valid physical address? */ if (rax & 0xffff000000000000ULL) return emulate_gp(ctxt, 0); return check_svme(ctxt); } static int check_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt->ops->check_pmc(ctxt, rcx)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_in(struct x86_emulate_ctxt *ctxt) { ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_out(struct x86_emulate_ctxt *ctxt) { ctxt->src.bytes = min(ctxt->src.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } #define D(_y) { .flags = (_y) } #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define N D(NotImpl) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) } #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define II(_f, _e, _i) \ { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } #define IIP(_f, _e, _i, _p) \ { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } #define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) static const struct opcode group7_rm0[] = { N, I(SrcNone | Priv | EmulateOnUD, em_vmcall), N, N, N, N, N, N, }; static const struct opcode group7_rm1[] = { DI(SrcNone | Priv, monitor), DI(SrcNone | Priv, mwait), N, N, N, N, N, N, }; static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), DIP(SrcNone | Prot | Priv, clgi, check_svme), DIP(SrcNone | Prot | Priv, skinit, check_svme), DIP(SrcNone | Prot | Priv, invlpga, check_svme), }; static const struct opcode group7_rm7[] = { N, DIP(SrcNone, rdtscp, check_rdtsc), N, N, N, N, N, N, }; static const struct opcode group1[] = { F(Lock, em_add), F(Lock | PageTable, em_or), F(Lock, em_adc), F(Lock, em_sbb), F(Lock | PageTable, em_and), F(Lock, em_sub), F(Lock, em_xor), F(NoWrite, em_cmp), }; static const struct opcode group1A[] = { I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, }; static const struct opcode group2[] = { F(DstMem | ModRM, em_rol), F(DstMem | ModRM, em_ror), F(DstMem | ModRM, em_rcl), F(DstMem | ModRM, em_rcr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_shr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_sar), }; static const struct opcode group3[] = { F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcNone | Lock, em_not), F(DstMem | SrcNone | Lock, em_neg), F(DstXacc | Src2Mem, em_mul_ex), F(DstXacc | Src2Mem, em_imul_ex), F(DstXacc | Src2Mem, em_div_ex), F(DstXacc | Src2Mem, em_idiv_ex), }; static const struct opcode group4[] = { F(ByteOp | DstMem | SrcNone | Lock, em_inc), F(ByteOp | DstMem | SrcNone | Lock, em_dec), N, N, N, N, N, N, }; static const struct opcode group5[] = { F(DstMem | SrcNone | Lock, em_inc), F(DstMem | SrcNone | Lock, em_dec), I(SrcMem | NearBranch, em_call_near_abs), I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), I(SrcMem | NearBranch, em_jmp_abs), I(SrcMemFAddr | ImplicitOps, em_jmp_far), I(SrcMem | Stack, em_push), D(Undefined), }; static const struct opcode group6[] = { DI(Prot | DstMem, sldt), DI(Prot | DstMem, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, }; static const struct group_dual group7 = { { II(Mov | DstMem, em_sgdt, sgdt), II(Mov | DstMem, em_sidt, sidt), II(SrcMem | Priv, em_lgdt, lgdt), II(SrcMem | Priv, em_lidt, lidt), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), }, { EXT(0, group7_rm0), EXT(0, group7_rm1), N, EXT(0, group7_rm3), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), } }; static const struct opcode group8[] = { N, N, N, N, F(DstMem | SrcImmByte | NoWrite, em_bt), F(DstMem | SrcImmByte | Lock | PageTable, em_bts), F(DstMem | SrcImmByte | Lock, em_btr), F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; static const struct group_dual group9 = { { N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, }, { N, N, N, N, N, N, N, N, } }; static const struct opcode group11[] = { I(DstMem | SrcImm | Mov | PageTable, em_mov), X7(D(Undefined)), }; static const struct gprefix pfx_0f_ae_7 = { I(SrcMem | ByteOp, em_clflush), N, N, N, }; static const struct group_dual group15 = { { N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7), }, { N, N, N, N, N, N, N, N, } }; static const struct gprefix pfx_0f_6f_0f_7f = { I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), }; static const struct instr_dual instr_dual_0f_2b = { I(0, em_mov), N }; static const struct gprefix pfx_0f_2b = { ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N, }; static const struct gprefix pfx_0f_28_0f_29 = { I(Aligned, em_mov), I(Aligned, em_mov), N, N, }; static const struct gprefix pfx_0f_e7 = { N, I(Sse, em_mov), N, N, }; static const struct escape escape_d9 = { { N, N, N, N, N, N, N, I(DstMem, em_fnstcw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_db = { { N, N, N, N, N, N, N, N, }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_dd = { { N, N, N, N, N, N, N, I(DstMem, em_fnstsw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct instr_dual instr_dual_0f_c3 = { I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N }; static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ F6ALU(Lock, em_add), I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), /* 0x08 - 0x0F */ F6ALU(Lock | PageTable, em_or), I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), N, /* 0x10 - 0x17 */ F6ALU(Lock, em_adc), I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), /* 0x18 - 0x1F */ F6ALU(Lock, em_sbb), I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), /* 0x20 - 0x27 */ F6ALU(Lock | PageTable, em_and), N, N, /* 0x28 - 0x2F */ F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), /* 0x30 - 0x37 */ F6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), /* 0x50 - 0x57 */ X8(I(SrcReg | Stack, em_push)), /* 0x58 - 0x5F */ X8(I(DstReg | Stack, em_pop)), /* 0x60 - 0x67 */ I(ImplicitOps | Stack | No64, em_pusha), I(ImplicitOps | Stack | No64, em_popa), N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , N, N, N, N, /* 0x68 - 0x6F */ I(SrcImm | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte | NearBranch)), /* 0x80 - 0x87 */ G(ByteOp | DstMem | SrcImm, group1), G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), D(ModRM | SrcMem | NoAccess | DstReg), I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), G(0, group1A), /* 0x90 - 0x97 */ DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), II(ImplicitOps | Stack, em_popf, popf), I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String, em_mov), F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r), /* 0xA8 - 0xAF */ F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm), I(ImplicitOps | NearBranch, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), I(ImplicitOps | Stack, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), I(DstAcc | SrcImmUByte | No64, em_aam), I(DstAcc | SrcImmUByte | No64, em_aad), F(DstAcc | ByteOp | No64, em_salc), I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ X3(I(SrcImmByte | NearBranch, em_loop)), I(SrcImmByte | NearBranch, em_jcxz), I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), /* 0xE8 - 0xEF */ I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch), I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps | NearBranch), I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), G(ByteOp, group3), G(0, group3), /* 0xF8 - 0xFF */ D(ImplicitOps), D(ImplicitOps), I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), }; static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, N, I(ImplicitOps | EmulateOnUD, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, /* 0x10 - 0x1F */ N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, check_cr_write), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, check_dr_write), N, N, N, N, GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), N, N, N, N, /* 0x30 - 0x3F */ II(ImplicitOps | Priv, em_wrmsr, wrmsr), IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), I(ImplicitOps | EmulateOnUD, em_sysenter), I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ X16(D(DstReg | SrcMem | ModRM)), /* 0x50 - 0x5F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0x60 - 0x6F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x70 - 0x7F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x80 - 0x8F */ X16(D(SrcImm | NearBranch)), /* 0x90 - 0x9F */ X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), II(ImplicitOps, em_cpuid, cpuid), F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), DI(ImplicitOps, rsm), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), N, ID(0, &instr_dual_0f_c3), N, N, N, GD(0, &group9), /* 0xC8 - 0xCF */ X8(I(DstReg, em_bswap)), /* 0xD0 - 0xDF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0xE0 - 0xEF */ N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), N, N, N, N, N, N, N, N, /* 0xF0 - 0xFF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N }; static const struct instr_dual instr_dual_0f_38_f0 = { I(DstReg | SrcMem | Mov, em_movbe), N }; static const struct instr_dual instr_dual_0f_38_f1 = { I(DstMem | SrcReg | Mov, em_movbe), N }; static const struct gprefix three_byte_0f_38_f0 = { ID(0, &instr_dual_0f_38_f0), N, N, N }; static const struct gprefix three_byte_0f_38_f1 = { ID(0, &instr_dual_0f_38_f1), N, N, N }; /* * Insns below are selected by the prefix which indexed by the third opcode * byte. */ static const struct opcode opcode_map_0f_38[256] = { /* 0x00 - 0x7f */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0x80 - 0xef */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0xf0 - 0xf1 */ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0), GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1), /* 0xf2 - 0xff */ N, N, X4(N), X8(N) }; #undef D #undef N #undef G #undef GD #undef I #undef GP #undef EXT #undef D2bv #undef D2bvIP #undef I2bv #undef I2bvIP #undef I6ALU static unsigned imm_size(struct x86_emulate_ctxt *ctxt) { unsigned size; size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; if (size == 8) size = 4; return size; } static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension) { int rc = X86EMUL_CONTINUE; op->type = OP_IMM; op->bytes = size; op->addr.mem.ea = ctxt->_eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: op->val = insn_fetch(s8, ctxt); break; case 2: op->val = insn_fetch(s16, ctxt); break; case 4: op->val = insn_fetch(s32, ctxt); break; case 8: op->val = insn_fetch(s64, ctxt); break; } if (!sign_extension) { switch (op->bytes) { case 1: op->val &= 0xff; break; case 2: op->val &= 0xffff; break; case 4: op->val &= 0xffffffff; break; } } done: return rc; } static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d) { int rc = X86EMUL_CONTINUE; switch (d) { case OpReg: decode_register_operand(ctxt, op); break; case OpImmUByte: rc = decode_imm(ctxt, op, 1, false); break; case OpMem: ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; mem_common: *op = ctxt->memop; ctxt->memopp = op; if (ctxt->d & BitOp) fetch_bit_operand(ctxt); op->orig_val = op->val; break; case OpMem64: ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; goto mem_common; case OpAcc: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccLo: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccHi: if (ctxt->d & ByteOp) { op->type = OP_NONE; break; } op->type = OP_REG; op->bytes = ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); op->orig_val = op->val; break; case OpDI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RDI); op->addr.mem.seg = VCPU_SREG_ES; op->val = 0; op->count = 1; break; case OpDX: op->type = OP_REG; op->bytes = 2; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); break; case OpCL: op->type = OP_IMM; op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; case OpImmByte: rc = decode_imm(ctxt, op, 1, true); break; case OpOne: op->type = OP_IMM; op->bytes = 1; op->val = 1; break; case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; case OpImm64: rc = decode_imm(ctxt, op, ctxt->op_bytes, true); break; case OpMem8: ctxt->memop.bytes = 1; if (ctxt->memop.type == OP_REG) { ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, true); fetch_register_operand(&ctxt->memop); } goto mem_common; case OpMem16: ctxt->memop.bytes = 2; goto mem_common; case OpMem32: ctxt->memop.bytes = 4; goto mem_common; case OpImmU16: rc = decode_imm(ctxt, op, 2, false); break; case OpImmU: rc = decode_imm(ctxt, op, imm_size(ctxt), false); break; case OpSI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RSI); op->addr.mem.seg = ctxt->seg_override; op->val = 0; op->count = 1; break; case OpXLat: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RBX) + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; op->bytes = ctxt->op_bytes + 2; insn_fetch_arr(op->valptr, op->bytes, ctxt); break; case OpMemFAddr: ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: op->type = OP_IMM; op->val = VCPU_SREG_ES; break; case OpCS: op->type = OP_IMM; op->val = VCPU_SREG_CS; break; case OpSS: op->type = OP_IMM; op->val = VCPU_SREG_SS; break; case OpDS: op->type = OP_IMM; op->val = VCPU_SREG_DS; break; case OpFS: op->type = OP_IMM; op->val = VCPU_SREG_FS; break; case OpGS: op->type = OP_IMM; op->val = VCPU_SREG_GS; break; case OpImplicit: /* Special instructions do their own operand decoding. */ default: op->type = OP_NONE; /* Disable writeback. */ break; } done: return rc; } int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: /* FS override */ case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; case InstrDual: if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.idual->mod3; else opcode = opcode.u.idual->mod012; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| No16))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64) { if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) ctxt->op_bytes = 8; else if (ctxt->d & NearBranch) ctxt->op_bytes = 8; } if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if ((ctxt->d & No16) && ctxt->op_bytes == 2) ctxt->op_bytes = 4; if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt->memopp->addr.mem.ea + ctxt->_eip); done: return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) { return ctxt->d & PageTable; } static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) { /* The second termination condition only applies for REPE * and REPNE. Test if the repeat string operation prefix is * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the * corresponding termination condition according to: * - if REPE/REPZ and ZF = 0 then done * - if REPNE/REPNZ and ZF = 1 then done */ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || (ctxt->b == 0xae) || (ctxt->b == 0xaf)) && (((ctxt->rep_prefix == REPE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == 0)) || ((ctxt->rep_prefix == REPNE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) return true; return false; } static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { bool fault = false; ctxt->ops->get_fpu(ctxt); asm volatile("1: fwait \n\t" "2: \n\t" ".pushsection .fixup,\"ax\" \n\t" "3: \n\t" "movb $1, %[fault] \n\t" "jmp 2b \n\t" ".popsection \n\t" _ASM_EXTABLE(1b, 3b) : [fault]"+qm"(fault)); ctxt->ops->put_fpu(ctxt); if (unlikely(fault)) return emulate_exception(ctxt, MF_VECTOR, 0, false); return X86EMUL_CONTINUE; } static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { if (op->type == OP_MM) read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); } static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) { ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), [fastop]"+S"(fop) : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); if (!fop) /* exception is returned in fop variable */ return emulate_de(ctxt); return X86EMUL_CONTINUE; } void init_decode_cache(struct x86_emulate_ctxt *ctxt) { memset(&ctxt->rip_relative, 0, (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.end = 0; } int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; ctxt->mem_read.pos = 0; /* LOCK prefix is allowed only with some instructions */ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; /* * Now that we know the fpu is exception safe, we can fetch * operands from it. */ fetch_possible_mmx_operand(ctxt, &ctxt->src); fetch_possible_mmx_operand(ctxt, &ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Instruction can only be executed in protected mode */ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } /* Do instruction specific permission checks */ if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt->eip = ctxt->_eip; ctxt->eflags &= ~EFLG_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; } ctxt->dst.orig_val = ctxt->dst.val; special_insn: if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= EFLG_RF; else ctxt->eflags &= ~EFLG_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { void (*fop)(struct fastop *) = (void *)ctxt->execute; rc = fastop(ctxt, fop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; ctxt->dst.val = (s32) ctxt->src.val; break; case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: /* cbw/cwde/cdqe */ switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; case 0xcd: /* int n */ rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ if (ctxt->eflags & EFLG_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ ctxt->eflags ^= EFLG_CF; break; case 0xf8: /* clc */ ctxt->eflags &= ~EFLG_CF; break; case 0xf9: /* stc */ ctxt->eflags |= EFLG_CF; break; case 0xfc: /* cld */ ctxt->eflags &= ~EFLG_DF; break; case 0xfd: /* std */ ctxt->eflags |= EFLG_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } /* * restore dst type in case the decoding will be reused * (happens for string instruction ) */ ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, VCPU_REGS_RCX, -count); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; /* skip rip writeback */ } ctxt->eflags &= ~EFLG_RF; } ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { WARN_ON(ctxt->exception.vector > 0x1f); ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->mode != X86EMUL_MODE_PROT64 || ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; } void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) { invalidate_registers(ctxt); } void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) { writeback_registers(ctxt); }
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1450_0
crossvul-cpp_data_bad_829_0
/* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/cdev.h> #include <linux/anon_inodes.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/uaccess.h> #include <rdma/ib.h> #include <rdma/uverbs_std_types.h> #include "uverbs.h" #include "core_priv.h" #include "rdma_core.h" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace verbs access"); MODULE_LICENSE("Dual BSD/GPL"); enum { IB_UVERBS_MAJOR = 231, IB_UVERBS_BASE_MINOR = 192, IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS, IB_UVERBS_NUM_FIXED_MINOR = 32, IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR, }; #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR) static dev_t dynamic_uverbs_dev; static struct class *uverbs_class; static DEFINE_IDA(uverbs_ida); static void ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); /* * Must be called with the ufile->device->disassociate_srcu held, and the lock * must be held until use of the ucontext is finished. */ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile) { /* * We do not hold the hw_destroy_rwsem lock for this flow, instead * srcu is used. It does not matter if someone races this with * get_context, we get NULL or valid ucontext. */ struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext); if (!srcu_dereference(ufile->device->ib_dev, &ufile->device->disassociate_srcu)) return ERR_PTR(-EIO); if (!ucontext) return ERR_PTR(-EINVAL); return ucontext; } EXPORT_SYMBOL(ib_uverbs_get_ucontext_file); int uverbs_dealloc_mw(struct ib_mw *mw) { struct ib_pd *pd = mw->pd; int ret; ret = mw->device->ops.dealloc_mw(mw); if (!ret) atomic_dec(&pd->usecnt); return ret; } static void ib_uverbs_release_dev(struct device *device) { struct ib_uverbs_device *dev = container_of(device, struct ib_uverbs_device, dev); uverbs_destroy_api(dev->uapi); cleanup_srcu_struct(&dev->disassociate_srcu); kfree(dev); } static void ib_uverbs_release_async_event_file(struct kref *ref) { struct ib_uverbs_async_event_file *file = container_of(ref, struct ib_uverbs_async_event_file, ref); kfree(file); } void ib_uverbs_release_ucq(struct ib_uverbs_file *file, struct ib_uverbs_completion_event_file *ev_file, struct ib_ucq_object *uobj) { struct ib_uverbs_event *evt, *tmp; if (ev_file) { spin_lock_irq(&ev_file->ev_queue.lock); list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&ev_file->ev_queue.lock); uverbs_uobject_put(&ev_file->uobj); } spin_lock_irq(&file->async_file->ev_queue.lock); list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&file->async_file->ev_queue.lock); } void ib_uverbs_release_uevent(struct ib_uverbs_file *file, struct ib_uevent_object *uobj) { struct ib_uverbs_event *evt, *tmp; spin_lock_irq(&file->async_file->ev_queue.lock); list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { list_del(&evt->list); kfree(evt); } spin_unlock_irq(&file->async_file->ev_queue.lock); } void ib_uverbs_detach_umcast(struct ib_qp *qp, struct ib_uqp_object *uobj) { struct ib_uverbs_mcast_entry *mcast, *tmp; list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { ib_detach_mcast(qp, &mcast->gid, mcast->lid); list_del(&mcast->list); kfree(mcast); } } static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) { complete(&dev->comp); } void ib_uverbs_release_file(struct kref *ref) { struct ib_uverbs_file *file = container_of(ref, struct ib_uverbs_file, ref); struct ib_device *ib_dev; int srcu_key; release_ufile_idr_uobject(file); srcu_key = srcu_read_lock(&file->device->disassociate_srcu); ib_dev = srcu_dereference(file->device->ib_dev, &file->device->disassociate_srcu); if (ib_dev && !ib_dev->ops.disassociate_ucontext) module_put(ib_dev->owner); srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); if (atomic_dec_and_test(&file->device->refcount)) ib_uverbs_comp_dev(file->device); if (file->async_file) kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file); put_device(&file->device->dev); kfree(file); } static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue, struct ib_uverbs_file *uverbs_file, struct file *filp, char __user *buf, size_t count, loff_t *pos, size_t eventsz) { struct ib_uverbs_event *event; int ret = 0; spin_lock_irq(&ev_queue->lock); while (list_empty(&ev_queue->event_list)) { spin_unlock_irq(&ev_queue->lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(ev_queue->poll_wait, (!list_empty(&ev_queue->event_list) || /* The barriers built into wait_event_interruptible() * and wake_up() guarentee this will see the null set * without using RCU */ !uverbs_file->device->ib_dev))) return -ERESTARTSYS; /* If device was disassociated and no event exists set an error */ if (list_empty(&ev_queue->event_list) && !uverbs_file->device->ib_dev) return -EIO; spin_lock_irq(&ev_queue->lock); } event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list); if (eventsz > count) { ret = -EINVAL; event = NULL; } else { list_del(ev_queue->event_list.next); if (event->counter) { ++(*event->counter); list_del(&event->obj_list); } } spin_unlock_irq(&ev_queue->lock); if (event) { if (copy_to_user(buf, event, eventsz)) ret = -EFAULT; else ret = eventsz; } kfree(event); return ret; } static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_async_event_file *file = filp->private_data; return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp, buf, count, pos, sizeof(struct ib_uverbs_async_event_desc)); } static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_completion_event_file *comp_ev_file = filp->private_data; return ib_uverbs_event_read(&comp_ev_file->ev_queue, comp_ev_file->uobj.ufile, filp, buf, count, pos, sizeof(struct ib_uverbs_comp_event_desc)); } static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, struct file *filp, struct poll_table_struct *wait) { __poll_t pollflags = 0; poll_wait(filp, &ev_queue->poll_wait, wait); spin_lock_irq(&ev_queue->lock); if (!list_empty(&ev_queue->event_list)) pollflags = EPOLLIN | EPOLLRDNORM; spin_unlock_irq(&ev_queue->lock); return pollflags; } static __poll_t ib_uverbs_async_event_poll(struct file *filp, struct poll_table_struct *wait) { return ib_uverbs_event_poll(filp->private_data, filp, wait); } static __poll_t ib_uverbs_comp_event_poll(struct file *filp, struct poll_table_struct *wait) { struct ib_uverbs_completion_event_file *comp_ev_file = filp->private_data; return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait); } static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on) { struct ib_uverbs_event_queue *ev_queue = filp->private_data; return fasync_helper(fd, filp, on, &ev_queue->async_queue); } static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on) { struct ib_uverbs_completion_event_file *comp_ev_file = filp->private_data; return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue); } static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp) { struct ib_uverbs_async_event_file *file = filp->private_data; struct ib_uverbs_file *uverbs_file = file->uverbs_file; struct ib_uverbs_event *entry, *tmp; int closed_already = 0; mutex_lock(&uverbs_file->device->lists_mutex); spin_lock_irq(&file->ev_queue.lock); closed_already = file->ev_queue.is_closed; file->ev_queue.is_closed = 1; list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) { if (entry->counter) list_del(&entry->obj_list); kfree(entry); } spin_unlock_irq(&file->ev_queue.lock); if (!closed_already) { list_del(&file->list); ib_unregister_event_handler(&uverbs_file->event_handler); } mutex_unlock(&uverbs_file->device->lists_mutex); kref_put(&uverbs_file->ref, ib_uverbs_release_file); kref_put(&file->ref, ib_uverbs_release_async_event_file); return 0; } static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp) { struct ib_uobject *uobj = filp->private_data; struct ib_uverbs_completion_event_file *file = container_of( uobj, struct ib_uverbs_completion_event_file, uobj); struct ib_uverbs_event *entry, *tmp; spin_lock_irq(&file->ev_queue.lock); list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) { if (entry->counter) list_del(&entry->obj_list); kfree(entry); } file->ev_queue.is_closed = 1; spin_unlock_irq(&file->ev_queue.lock); uverbs_close_fd(filp); return 0; } const struct file_operations uverbs_event_fops = { .owner = THIS_MODULE, .read = ib_uverbs_comp_event_read, .poll = ib_uverbs_comp_event_poll, .release = ib_uverbs_comp_event_close, .fasync = ib_uverbs_comp_event_fasync, .llseek = no_llseek, }; static const struct file_operations uverbs_async_event_fops = { .owner = THIS_MODULE, .read = ib_uverbs_async_event_read, .poll = ib_uverbs_async_event_poll, .release = ib_uverbs_async_event_close, .fasync = ib_uverbs_async_event_fasync, .llseek = no_llseek, }; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) { struct ib_uverbs_event_queue *ev_queue = cq_context; struct ib_ucq_object *uobj; struct ib_uverbs_event *entry; unsigned long flags; if (!ev_queue) return; spin_lock_irqsave(&ev_queue->lock, flags); if (ev_queue->is_closed) { spin_unlock_irqrestore(&ev_queue->lock, flags); return; } entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { spin_unlock_irqrestore(&ev_queue->lock, flags); return; } uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); entry->desc.comp.cq_handle = cq->uobject->user_handle; entry->counter = &uobj->comp_events_reported; list_add_tail(&entry->list, &ev_queue->event_list); list_add_tail(&entry->obj_list, &uobj->comp_list); spin_unlock_irqrestore(&ev_queue->lock, flags); wake_up_interruptible(&ev_queue->poll_wait); kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN); } static void ib_uverbs_async_handler(struct ib_uverbs_file *file, __u64 element, __u64 event, struct list_head *obj_list, u32 *counter) { struct ib_uverbs_event *entry; unsigned long flags; spin_lock_irqsave(&file->async_file->ev_queue.lock, flags); if (file->async_file->ev_queue.is_closed) { spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags); return; } entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags); return; } entry->desc.async.element = element; entry->desc.async.event_type = event; entry->desc.async.reserved = 0; entry->counter = counter; list_add_tail(&entry->list, &file->async_file->ev_queue.event_list); if (obj_list) list_add_tail(&entry->obj_list, obj_list); spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags); wake_up_interruptible(&file->async_file->ev_queue.poll_wait); kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN); } void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) { struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, struct ib_ucq_object, uobject); ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle, event->event, &uobj->async_list, &uobj->async_events_reported); } void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) { struct ib_uevent_object *uobj; /* for XRC target qp's, check that qp is live */ if (!event->element.qp->uobject) return; uobj = container_of(event->element.qp->uobject, struct ib_uevent_object, uobject); ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, event->event, &uobj->event_list, &uobj->events_reported); } void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr) { struct ib_uevent_object *uobj = container_of(event->element.wq->uobject, struct ib_uevent_object, uobject); ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, event->event, &uobj->event_list, &uobj->events_reported); } void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) { struct ib_uevent_object *uobj; uobj = container_of(event->element.srq->uobject, struct ib_uevent_object, uobject); ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, event->event, &uobj->event_list, &uobj->events_reported); } void ib_uverbs_event_handler(struct ib_event_handler *handler, struct ib_event *event) { struct ib_uverbs_file *file = container_of(handler, struct ib_uverbs_file, event_handler); ib_uverbs_async_handler(file, event->element.port_num, event->event, NULL, NULL); } void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file) { kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file); file->async_file = NULL; } void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue) { spin_lock_init(&ev_queue->lock); INIT_LIST_HEAD(&ev_queue->event_list); init_waitqueue_head(&ev_queue->poll_wait); ev_queue->is_closed = 0; ev_queue->async_queue = NULL; } struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file, struct ib_device *ib_dev) { struct ib_uverbs_async_event_file *ev_file; struct file *filp; ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL); if (!ev_file) return ERR_PTR(-ENOMEM); ib_uverbs_init_event_queue(&ev_file->ev_queue); ev_file->uverbs_file = uverbs_file; kref_get(&ev_file->uverbs_file->ref); kref_init(&ev_file->ref); filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops, ev_file, O_RDONLY); if (IS_ERR(filp)) goto err_put_refs; mutex_lock(&uverbs_file->device->lists_mutex); list_add_tail(&ev_file->list, &uverbs_file->device->uverbs_events_file_list); mutex_unlock(&uverbs_file->device->lists_mutex); WARN_ON(uverbs_file->async_file); uverbs_file->async_file = ev_file; kref_get(&uverbs_file->async_file->ref); INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler, ib_dev, ib_uverbs_event_handler); ib_register_event_handler(&uverbs_file->event_handler); /* At that point async file stuff was fully set */ return filp; err_put_refs: kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file); kref_put(&ev_file->ref, ib_uverbs_release_async_event_file); return filp; } static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count, const struct uverbs_api_write_method *method_elm) { if (method_elm->is_ex) { count -= sizeof(*hdr) + sizeof(*ex_hdr); if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count) return -EINVAL; if (hdr->in_words * 8 < method_elm->req_size) return -ENOSPC; if (ex_hdr->cmd_hdr_reserved) return -EINVAL; if (ex_hdr->response) { if (!hdr->out_words && !ex_hdr->provider_out_words) return -EINVAL; if (hdr->out_words * 8 < method_elm->resp_size) return -ENOSPC; if (!access_ok(u64_to_user_ptr(ex_hdr->response), (hdr->out_words + ex_hdr->provider_out_words) * 8)) return -EFAULT; } else { if (hdr->out_words || ex_hdr->provider_out_words) return -EINVAL; } return 0; } /* not extended command */ if (hdr->in_words * 4 != count) return -EINVAL; if (count < method_elm->req_size + sizeof(hdr)) { /* * rdma-core v18 and v19 have a bug where they send DESTROY_CQ * with a 16 byte write instead of 24. Old kernels didn't * check the size so they allowed this. Now that the size is * checked provide a compatibility work around to not break * those userspaces. */ if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ && count == 16) { hdr->in_words = 6; return 0; } return -ENOSPC; } if (hdr->out_words * 4 < method_elm->resp_size) return -ENOSPC; return 0; } static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_file *file = filp->private_data; const struct uverbs_api_write_method *method_elm; struct uverbs_api *uapi = file->device->uapi; struct ib_uverbs_ex_cmd_hdr ex_hdr; struct ib_uverbs_cmd_hdr hdr; struct uverbs_attr_bundle bundle; int srcu_key; ssize_t ret; if (!ib_safe_file_access(filp)) { pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", task_tgid_vnr(current), current->comm); return -EACCES; } if (count < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; method_elm = uapi_get_method(uapi, hdr.command); if (IS_ERR(method_elm)) return PTR_ERR(method_elm); if (method_elm->is_ex) { if (count < (sizeof(hdr) + sizeof(ex_hdr))) return -EINVAL; if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) return -EFAULT; } ret = verify_hdr(&hdr, &ex_hdr, count, method_elm); if (ret) return ret; srcu_key = srcu_read_lock(&file->device->disassociate_srcu); buf += sizeof(hdr); memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); bundle.ufile = file; bundle.context = NULL; /* only valid if bundle has uobject */ if (!method_elm->is_ex) { size_t in_len = hdr.in_words * 4 - sizeof(hdr); size_t out_len = hdr.out_words * 4; u64 response = 0; if (method_elm->has_udata) { bundle.driver_udata.inlen = in_len - method_elm->req_size; in_len = method_elm->req_size; if (bundle.driver_udata.inlen) bundle.driver_udata.inbuf = buf + in_len; else bundle.driver_udata.inbuf = NULL; } else { memset(&bundle.driver_udata, 0, sizeof(bundle.driver_udata)); } if (method_elm->has_resp) { /* * The macros check that if has_resp is set * then the command request structure starts * with a '__aligned u64 response' member. */ ret = get_user(response, (const u64 *)buf); if (ret) goto out_unlock; if (method_elm->has_udata) { bundle.driver_udata.outlen = out_len - method_elm->resp_size; out_len = method_elm->resp_size; if (bundle.driver_udata.outlen) bundle.driver_udata.outbuf = u64_to_user_ptr(response + out_len); else bundle.driver_udata.outbuf = NULL; } } else { bundle.driver_udata.outlen = 0; bundle.driver_udata.outbuf = NULL; } ib_uverbs_init_udata_buf_or_null( &bundle.ucore, buf, u64_to_user_ptr(response), in_len, out_len); } else { buf += sizeof(ex_hdr); ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf, u64_to_user_ptr(ex_hdr.response), hdr.in_words * 8, hdr.out_words * 8); ib_uverbs_init_udata_buf_or_null( &bundle.driver_udata, buf + bundle.ucore.inlen, u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen, ex_hdr.provider_in_words * 8, ex_hdr.provider_out_words * 8); } ret = method_elm->handler(&bundle); out_unlock: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return (ret) ? : count; } static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) { struct ib_uverbs_file *file = filp->private_data; struct ib_ucontext *ucontext; int ret = 0; int srcu_key; srcu_key = srcu_read_lock(&file->device->disassociate_srcu); ucontext = ib_uverbs_get_ucontext_file(file); if (IS_ERR(ucontext)) { ret = PTR_ERR(ucontext); goto out; } ret = ucontext->device->ops.mmap(ucontext, vma); out: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return ret; } /* * Each time we map IO memory into user space this keeps track of the mapping. * When the device is hot-unplugged we 'zap' the mmaps in user space to point * to the zero page and allow the hot unplug to proceed. * * This is necessary for cases like PCI physical hot unplug as the actual BAR * memory may vanish after this and access to it from userspace could MCE. * * RDMA drivers supporting disassociation must have their user space designed * to cope in some way with their IO pages going to the zero page. */ struct rdma_umap_priv { struct vm_area_struct *vma; struct list_head list; }; static const struct vm_operations_struct rdma_umap_ops; static void rdma_umap_priv_init(struct rdma_umap_priv *priv, struct vm_area_struct *vma) { struct ib_uverbs_file *ufile = vma->vm_file->private_data; priv->vma = vma; vma->vm_private_data = priv; vma->vm_ops = &rdma_umap_ops; mutex_lock(&ufile->umap_lock); list_add(&priv->list, &ufile->umaps); mutex_unlock(&ufile->umap_lock); } /* * The VMA has been dup'd, initialize the vm_private_data with a new tracking * struct */ static void rdma_umap_open(struct vm_area_struct *vma) { struct ib_uverbs_file *ufile = vma->vm_file->private_data; struct rdma_umap_priv *opriv = vma->vm_private_data; struct rdma_umap_priv *priv; if (!opriv) return; /* We are racing with disassociation */ if (!down_read_trylock(&ufile->hw_destroy_rwsem)) goto out_zap; /* * Disassociation already completed, the VMA should already be zapped. */ if (!ufile->ucontext) goto out_unlock; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto out_unlock; rdma_umap_priv_init(priv, vma); up_read(&ufile->hw_destroy_rwsem); return; out_unlock: up_read(&ufile->hw_destroy_rwsem); out_zap: /* * We can't allow the VMA to be created with the actual IO pages, that * would break our API contract, and it can't be stopped at this * point, so zap it. */ vma->vm_private_data = NULL; zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); } static void rdma_umap_close(struct vm_area_struct *vma) { struct ib_uverbs_file *ufile = vma->vm_file->private_data; struct rdma_umap_priv *priv = vma->vm_private_data; if (!priv) return; /* * The vma holds a reference on the struct file that created it, which * in turn means that the ib_uverbs_file is guaranteed to exist at * this point. */ mutex_lock(&ufile->umap_lock); list_del(&priv->list); mutex_unlock(&ufile->umap_lock); kfree(priv); } static const struct vm_operations_struct rdma_umap_ops = { .open = rdma_umap_open, .close = rdma_umap_close, }; static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext, struct vm_area_struct *vma, unsigned long size) { struct ib_uverbs_file *ufile = ucontext->ufile; struct rdma_umap_priv *priv; if (vma->vm_end - vma->vm_start != size) return ERR_PTR(-EINVAL); /* Driver is using this wrong, must be called by ib_uverbs_mmap */ if (WARN_ON(!vma->vm_file || vma->vm_file->private_data != ufile)) return ERR_PTR(-EINVAL); lockdep_assert_held(&ufile->device->disassociate_srcu); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); return priv; } /* * Map IO memory into a process. This is to be called by drivers as part of * their mmap() functions if they wish to send something like PCI-E BAR memory * to userspace. */ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, unsigned long pfn, unsigned long size, pgprot_t prot) { struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size); if (IS_ERR(priv)) return PTR_ERR(priv); vma->vm_page_prot = prot; if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) { kfree(priv); return -EAGAIN; } rdma_umap_priv_init(priv, vma); return 0; } EXPORT_SYMBOL(rdma_user_mmap_io); /* * The page case is here for a slightly different reason, the driver expects * to be able to free the page it is sharing to user space when it destroys * its ucontext, which means we need to zap the user space references. * * We could handle this differently by providing an API to allocate a shared * page and then only freeing the shared page when the last ufile is * destroyed. */ int rdma_user_mmap_page(struct ib_ucontext *ucontext, struct vm_area_struct *vma, struct page *page, unsigned long size) { struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size); if (IS_ERR(priv)) return PTR_ERR(priv); if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size, vma->vm_page_prot)) { kfree(priv); return -EAGAIN; } rdma_umap_priv_init(priv, vma); return 0; } EXPORT_SYMBOL(rdma_user_mmap_page); void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) { struct rdma_umap_priv *priv, *next_priv; lockdep_assert_held(&ufile->hw_destroy_rwsem); while (1) { struct mm_struct *mm = NULL; /* Get an arbitrary mm pointer that hasn't been cleaned yet */ mutex_lock(&ufile->umap_lock); while (!list_empty(&ufile->umaps)) { int ret; priv = list_first_entry(&ufile->umaps, struct rdma_umap_priv, list); mm = priv->vma->vm_mm; ret = mmget_not_zero(mm); if (!ret) { list_del_init(&priv->list); mm = NULL; continue; } break; } mutex_unlock(&ufile->umap_lock); if (!mm) return; /* * The umap_lock is nested under mmap_sem since it used within * the vma_ops callbacks, so we have to clean the list one mm * at a time to get the lock ordering right. Typically there * will only be one mm, so no big deal. */ down_write(&mm->mmap_sem); mutex_lock(&ufile->umap_lock); list_for_each_entry_safe (priv, next_priv, &ufile->umaps, list) { struct vm_area_struct *vma = priv->vma; if (vma->vm_mm != mm) continue; list_del_init(&priv->list); zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE); } mutex_unlock(&ufile->umap_lock); up_write(&mm->mmap_sem); mmput(mm); } } /* * ib_uverbs_open() does not need the BKL: * * - the ib_uverbs_device structures are properly reference counted and * everything else is purely local to the file being created, so * races against other open calls are not a problem; * - there is no ioctl method to race against; * - the open method will either immediately run -ENXIO, or all * required initialization will be done. */ static int ib_uverbs_open(struct inode *inode, struct file *filp) { struct ib_uverbs_device *dev; struct ib_uverbs_file *file; struct ib_device *ib_dev; int ret; int module_dependent; int srcu_key; dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); if (!atomic_inc_not_zero(&dev->refcount)) return -ENXIO; get_device(&dev->dev); srcu_key = srcu_read_lock(&dev->disassociate_srcu); mutex_lock(&dev->lists_mutex); ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); if (!ib_dev) { ret = -EIO; goto err; } /* In case IB device supports disassociate ucontext, there is no hard * dependency between uverbs device and its low level device. */ module_dependent = !(ib_dev->ops.disassociate_ucontext); if (module_dependent) { if (!try_module_get(ib_dev->owner)) { ret = -ENODEV; goto err; } } file = kzalloc(sizeof(*file), GFP_KERNEL); if (!file) { ret = -ENOMEM; if (module_dependent) goto err_module; goto err; } file->device = dev; kref_init(&file->ref); mutex_init(&file->ucontext_lock); spin_lock_init(&file->uobjects_lock); INIT_LIST_HEAD(&file->uobjects); init_rwsem(&file->hw_destroy_rwsem); mutex_init(&file->umap_lock); INIT_LIST_HEAD(&file->umaps); filp->private_data = file; list_add_tail(&file->list, &dev->uverbs_file_list); mutex_unlock(&dev->lists_mutex); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); setup_ufile_idr_uobject(file); return nonseekable_open(inode, filp); err_module: module_put(ib_dev->owner); err: mutex_unlock(&dev->lists_mutex); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); if (atomic_dec_and_test(&dev->refcount)) ib_uverbs_comp_dev(dev); put_device(&dev->dev); return ret; } static int ib_uverbs_close(struct inode *inode, struct file *filp) { struct ib_uverbs_file *file = filp->private_data; uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE); mutex_lock(&file->device->lists_mutex); list_del_init(&file->list); mutex_unlock(&file->device->lists_mutex); kref_put(&file->ref, ib_uverbs_release_file); return 0; } static const struct file_operations uverbs_fops = { .owner = THIS_MODULE, .write = ib_uverbs_write, .open = ib_uverbs_open, .release = ib_uverbs_close, .llseek = no_llseek, .unlocked_ioctl = ib_uverbs_ioctl, .compat_ioctl = ib_uverbs_ioctl, }; static const struct file_operations uverbs_mmap_fops = { .owner = THIS_MODULE, .write = ib_uverbs_write, .mmap = ib_uverbs_mmap, .open = ib_uverbs_open, .release = ib_uverbs_close, .llseek = no_llseek, .unlocked_ioctl = ib_uverbs_ioctl, .compat_ioctl = ib_uverbs_ioctl, }; static struct ib_client uverbs_client = { .name = "uverbs", .no_kverbs_req = true, .add = ib_uverbs_add_one, .remove = ib_uverbs_remove_one }; static ssize_t ibdev_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_uverbs_device *dev = container_of(device, struct ib_uverbs_device, dev); int ret = -ENODEV; int srcu_key; struct ib_device *ib_dev; srcu_key = srcu_read_lock(&dev->disassociate_srcu); ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); if (ib_dev) ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev)); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); return ret; } static DEVICE_ATTR_RO(ibdev); static ssize_t abi_version_show(struct device *device, struct device_attribute *attr, char *buf) { struct ib_uverbs_device *dev = container_of(device, struct ib_uverbs_device, dev); int ret = -ENODEV; int srcu_key; struct ib_device *ib_dev; srcu_key = srcu_read_lock(&dev->disassociate_srcu); ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); if (ib_dev) ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); return ret; } static DEVICE_ATTR_RO(abi_version); static struct attribute *ib_dev_attrs[] = { &dev_attr_abi_version.attr, &dev_attr_ibdev.attr, NULL, }; static const struct attribute_group dev_attr_group = { .attrs = ib_dev_attrs, }; static CLASS_ATTR_STRING(abi_version, S_IRUGO, __stringify(IB_USER_VERBS_ABI_VERSION)); static int ib_uverbs_create_uapi(struct ib_device *device, struct ib_uverbs_device *uverbs_dev) { struct uverbs_api *uapi; uapi = uverbs_alloc_api(device); if (IS_ERR(uapi)) return PTR_ERR(uapi); uverbs_dev->uapi = uapi; return 0; } static void ib_uverbs_add_one(struct ib_device *device) { int devnum; dev_t base; struct ib_uverbs_device *uverbs_dev; int ret; if (!device->ops.alloc_ucontext) return; uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL); if (!uverbs_dev) return; ret = init_srcu_struct(&uverbs_dev->disassociate_srcu); if (ret) { kfree(uverbs_dev); return; } device_initialize(&uverbs_dev->dev); uverbs_dev->dev.class = uverbs_class; uverbs_dev->dev.parent = device->dev.parent; uverbs_dev->dev.release = ib_uverbs_release_dev; uverbs_dev->groups[0] = &dev_attr_group; uverbs_dev->dev.groups = uverbs_dev->groups; atomic_set(&uverbs_dev->refcount, 1); init_completion(&uverbs_dev->comp); uverbs_dev->xrcd_tree = RB_ROOT; mutex_init(&uverbs_dev->xrcd_tree_mutex); mutex_init(&uverbs_dev->lists_mutex); INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list); INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list); rcu_assign_pointer(uverbs_dev->ib_dev, device); uverbs_dev->num_comp_vectors = device->num_comp_vectors; devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1, GFP_KERNEL); if (devnum < 0) goto err; uverbs_dev->devnum = devnum; if (devnum >= IB_UVERBS_NUM_FIXED_MINOR) base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR; else base = IB_UVERBS_BASE_DEV + devnum; if (ib_uverbs_create_uapi(device, uverbs_dev)) goto err_uapi; uverbs_dev->dev.devt = base; dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum); cdev_init(&uverbs_dev->cdev, device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops); uverbs_dev->cdev.owner = THIS_MODULE; ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev); if (ret) goto err_uapi; ib_set_client_data(device, &uverbs_client, uverbs_dev); return; err_uapi: ida_free(&uverbs_ida, devnum); err: if (atomic_dec_and_test(&uverbs_dev->refcount)) ib_uverbs_comp_dev(uverbs_dev); wait_for_completion(&uverbs_dev->comp); put_device(&uverbs_dev->dev); return; } static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, struct ib_device *ib_dev) { struct ib_uverbs_file *file; struct ib_uverbs_async_event_file *event_file; struct ib_event event; /* Pending running commands to terminate */ uverbs_disassociate_api_pre(uverbs_dev); event.event = IB_EVENT_DEVICE_FATAL; event.element.port_num = 0; event.device = ib_dev; mutex_lock(&uverbs_dev->lists_mutex); while (!list_empty(&uverbs_dev->uverbs_file_list)) { file = list_first_entry(&uverbs_dev->uverbs_file_list, struct ib_uverbs_file, list); list_del_init(&file->list); kref_get(&file->ref); /* We must release the mutex before going ahead and calling * uverbs_cleanup_ufile, as it might end up indirectly calling * uverbs_close, for example due to freeing the resources (e.g * mmput). */ mutex_unlock(&uverbs_dev->lists_mutex); ib_uverbs_event_handler(&file->event_handler, &event); uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE); kref_put(&file->ref, ib_uverbs_release_file); mutex_lock(&uverbs_dev->lists_mutex); } while (!list_empty(&uverbs_dev->uverbs_events_file_list)) { event_file = list_first_entry(&uverbs_dev-> uverbs_events_file_list, struct ib_uverbs_async_event_file, list); spin_lock_irq(&event_file->ev_queue.lock); event_file->ev_queue.is_closed = 1; spin_unlock_irq(&event_file->ev_queue.lock); list_del(&event_file->list); ib_unregister_event_handler( &event_file->uverbs_file->event_handler); event_file->uverbs_file->event_handler.device = NULL; wake_up_interruptible(&event_file->ev_queue.poll_wait); kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN); } mutex_unlock(&uverbs_dev->lists_mutex); uverbs_disassociate_api(uverbs_dev->uapi); } static void ib_uverbs_remove_one(struct ib_device *device, void *client_data) { struct ib_uverbs_device *uverbs_dev = client_data; int wait_clients = 1; if (!uverbs_dev) return; cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev); ida_free(&uverbs_ida, uverbs_dev->devnum); if (device->ops.disassociate_ucontext) { /* We disassociate HW resources and immediately return. * Userspace will see a EIO errno for all future access. * Upon returning, ib_device may be freed internally and is not * valid any more. * uverbs_device is still available until all clients close * their files, then the uverbs device ref count will be zero * and its resources will be freed. * Note: At this point no more files can be opened since the * cdev was deleted, however active clients can still issue * commands and close their open files. */ ib_uverbs_free_hw_resources(uverbs_dev, device); wait_clients = 0; } if (atomic_dec_and_test(&uverbs_dev->refcount)) ib_uverbs_comp_dev(uverbs_dev); if (wait_clients) wait_for_completion(&uverbs_dev->comp); put_device(&uverbs_dev->dev); } static char *uverbs_devnode(struct device *dev, umode_t *mode) { if (mode) *mode = 0666; return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } static int __init ib_uverbs_init(void) { int ret; ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_NUM_FIXED_MINOR, "infiniband_verbs"); if (ret) { pr_err("user_verbs: couldn't register device number\n"); goto out; } ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0, IB_UVERBS_NUM_DYNAMIC_MINOR, "infiniband_verbs"); if (ret) { pr_err("couldn't register dynamic device number\n"); goto out_alloc; } uverbs_class = class_create(THIS_MODULE, "infiniband_verbs"); if (IS_ERR(uverbs_class)) { ret = PTR_ERR(uverbs_class); pr_err("user_verbs: couldn't create class infiniband_verbs\n"); goto out_chrdev; } uverbs_class->devnode = uverbs_devnode; ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); if (ret) { pr_err("user_verbs: couldn't create abi_version attribute\n"); goto out_class; } ret = ib_register_client(&uverbs_client); if (ret) { pr_err("user_verbs: couldn't register client\n"); goto out_class; } return 0; out_class: class_destroy(uverbs_class); out_chrdev: unregister_chrdev_region(dynamic_uverbs_dev, IB_UVERBS_NUM_DYNAMIC_MINOR); out_alloc: unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_NUM_FIXED_MINOR); out: return ret; } static void __exit ib_uverbs_cleanup(void) { ib_unregister_client(&uverbs_client); class_destroy(uverbs_class); unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_NUM_FIXED_MINOR); unregister_chrdev_region(dynamic_uverbs_dev, IB_UVERBS_NUM_DYNAMIC_MINOR); } module_init(ib_uverbs_init); module_exit(ib_uverbs_cleanup);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_829_0
crossvul-cpp_data_bad_1768_1
404: Not Found
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1768_1
crossvul-cpp_data_good_1819_3
/* * linux/fs/ext4/inode.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) * * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 */ #include <linux/fs.h> #include <linux/time.h> #include <linux/highuid.h> #include <linux/pagemap.h> #include <linux/dax.h> #include <linux/quotaops.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/mpage.h> #include <linux/namei.h> #include <linux/uio.h> #include <linux/bio.h> #include <linux/workqueue.h> #include <linux/kernel.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/bitops.h> #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "truncate.h" #include <trace/events/ext4.h> #define MPAGE_DA_EXTENT_TAIL 0x01 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u16 csum_lo; __u16 csum_hi = 0; __u32 csum; csum_lo = le16_to_cpu(raw->i_checksum_lo); raw->i_checksum_lo = 0; if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { csum_hi = le16_to_cpu(raw->i_checksum_hi); raw->i_checksum_hi = 0; } csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, EXT4_INODE_SIZE(inode->i_sb)); raw->i_checksum_lo = cpu_to_le16(csum_lo); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) raw->i_checksum_hi = cpu_to_le16(csum_hi); return csum; } static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 provided, calculated; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_metadata_csum(inode->i_sb)) return 1; provided = le16_to_cpu(raw->i_checksum_lo); calculated = ext4_inode_csum(inode, raw, ei); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; else calculated &= 0xFFFF; return provided == calculated; } static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, struct ext4_inode_info *ei) { __u32 csum; if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_LINUX) || !ext4_has_metadata_csum(inode->i_sb)) return; csum = ext4_inode_csum(inode, raw, ei); raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) raw->i_checksum_hi = cpu_to_le16(csum >> 16); } static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { trace_ext4_begin_ordered_truncate(inode, new_size); /* * If jinode is zero, then we never opened the file for * writing, so there's no need to call * jbd2_journal_begin_ordered_truncate() since there's no * outstanding writes we need to flush. */ if (!EXT4_I(inode)->jinode) return 0; return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode, new_size); } static void ext4_invalidatepage(struct page *page, unsigned int offset, unsigned int length); static int __ext4_journalled_writepage(struct page *page, unsigned int len); static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents); /* * Test whether an inode is a fast symlink. */ int ext4_inode_is_fast_symlink(struct inode *inode) { int ea_blocks = EXT4_I(inode)->i_file_acl ? EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; if (ext4_has_inline_data(inode)) return 0; return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); } /* * Restart the transaction associated with *handle. This does a commit, * so before we call here everything must be consistently dirtied against * this transaction. */ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, int nblocks) { int ret; /* * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this * moment, get_block can be called only for blocks inside i_size since * page cache has been already dropped and writes are blocked by * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); jbd_debug(2, "restarting handle %p\n", handle); up_write(&EXT4_I(inode)->i_data_sem); ret = ext4_journal_restart(handle, nblocks); down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); return ret; } /* * Called at the last iput() if i_nlink is zero. */ void ext4_evict_inode(struct inode *inode) { handle_t *handle; int err; trace_ext4_evict_inode(inode); if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the * journal. So although mm thinks everything is clean and * ready for reaping the inode might still have some pages to * write in the running transaction or waiting to be * checkpointed. Thus calling jbd2_journal_invalidatepage() * (via truncate_inode_pages()) to discard these buffers can * cause data loss. Also even if we did not discard these * buffers, we would have no way to find them after the inode * is reaped and thus user could see stale data if he tries to * read them before the transaction is checkpointed. So be * careful and force everything to disk here... We use * ei->i_datasync_tid to store the newest transaction * containing inode's data. * * Note that directories do not have this problem because they * don't use page cache. */ if (ext4_should_journal_data(inode) && (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && inode->i_ino != EXT4_JOURNAL_INO) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; jbd2_complete_transaction(journal, commit_tid); filemap_write_and_wait(&inode->i_data); } truncate_inode_pages_final(&inode->i_data); WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); goto no_delete; } if (is_bad_inode(inode)) goto no_delete; dquot_initialize(inode); if (ext4_should_order_data(inode)) ext4_begin_ordered_truncate(inode, 0); truncate_inode_pages_final(&inode->i_data); WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); /* * Protect us against freezing - iput() caller didn't have to have any * protection against it */ sb_start_intwrite(inode->i_sb); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, ext4_blocks_for_truncate(inode)+3); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); /* * If we're going to skip the normal cleanup, we still need to * make sure that the in-core orphan linked list is properly * cleaned up. */ ext4_orphan_del(NULL, inode); sb_end_intwrite(inode->i_sb); goto no_delete; } if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_size = 0; err = ext4_mark_inode_dirty(handle, inode); if (err) { ext4_warning(inode->i_sb, "couldn't mark inode dirty (err %d)", err); goto stop_handle; } if (inode->i_blocks) ext4_truncate(inode); /* * ext4_ext_truncate() doesn't reserve any slop when it * restarts journal transactions; therefore there may not be * enough credits left in the handle to remove the inode from * the orphan list and set the dtime field. */ if (!ext4_handle_has_enough_credits(handle, 3)) { err = ext4_journal_extend(handle, 3); if (err > 0) err = ext4_journal_restart(handle, 3); if (err != 0) { ext4_warning(inode->i_sb, "couldn't extend journal (err %d)", err); stop_handle: ext4_journal_stop(handle); ext4_orphan_del(NULL, inode); sb_end_intwrite(inode->i_sb); goto no_delete; } } /* * Kill off the orphan record which ext4_truncate created. * AKPM: I think this can be inside the above `if'. * Note that ext4_orphan_del() has to be able to cope with the * deletion of a non-existent orphan - this is because we don't * know if ext4_truncate() actually created an orphan record. * (Well, we could do this if we need to, but heck - it works) */ ext4_orphan_del(handle, inode); EXT4_I(inode)->i_dtime = get_seconds(); /* * One subtle ordering requirement: if anything has gone wrong * (transaction abort, IO errors, whatever), then we can still * do these next steps (the fs will already have been marked as * having errors), but we can't free the inode if the mark_dirty * fails. */ if (ext4_mark_inode_dirty(handle, inode)) /* If that failed, just do the required in-core inode clear. */ ext4_clear_inode(inode); else ext4_free_inode(handle, inode); ext4_journal_stop(handle); sb_end_intwrite(inode->i_sb); return; no_delete: ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ } #ifdef CONFIG_QUOTA qsize_t *ext4_get_reserved_space(struct inode *inode) { return &EXT4_I(inode)->i_reserved_quota; } #endif /* * Called with i_data_sem down, which is important since we can call * ext4_discard_preallocations() from here. */ void ext4_da_update_reserve_space(struct inode *inode, int used, int quota_claim) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); spin_lock(&ei->i_block_reservation_lock); trace_ext4_da_update_reserve_space(inode, used, quota_claim); if (unlikely(used > ei->i_reserved_data_blocks)) { ext4_warning(inode->i_sb, "%s: ino %lu, used %d " "with only %d reserved data blocks", __func__, inode->i_ino, used, ei->i_reserved_data_blocks); WARN_ON(1); used = ei->i_reserved_data_blocks; } /* Update per-inode reservations */ ei->i_reserved_data_blocks -= used; percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); /* Update quota subsystem for data blocks */ if (quota_claim) dquot_claim_block(inode, EXT4_C2B(sbi, used)); else { /* * We did fallocate with an offset that is already delayed * allocated. So on delayed allocated writeback we should * not re-claim the quota for fallocated blocks. */ dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); } /* * If we have done all the pending block allocations and if * there aren't any writers on the inode, we can discard the * inode's preallocations. */ if ((ei->i_reserved_data_blocks == 0) && (atomic_read(&inode->i_writecount) == 0)) ext4_discard_preallocations(inode); } static int __check_block_validity(struct inode *inode, const char *func, unsigned int line, struct ext4_map_blocks *map) { if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock " "(length %d)", (unsigned long) map->m_lblk, map->m_len); return -EFSCORRUPTED; } return 0; } #define check_block_validity(inode, map) \ __check_block_validity((inode), __func__, __LINE__, (map)) #ifdef ES_AGGRESSIVE_TEST static void ext4_map_blocks_es_recheck(handle_t *handle, struct inode *inode, struct ext4_map_blocks *es_map, struct ext4_map_blocks *map, int flags) { int retval; map->m_flags = 0; /* * There is a race window that the result is not the same. * e.g. xfstests #223 when dioread_nolock enables. The reason * is that we lookup a block mapping in extent status tree with * out taking i_data_sem. So at the time the unwritten extent * could be converted. */ if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) up_read((&EXT4_I(inode)->i_data_sem)); /* * We don't check m_len because extent will be collpased in status * tree. So the m_len might not equal. */ if (es_map->m_lblk != map->m_lblk || es_map->m_flags != map->m_flags || es_map->m_pblk != map->m_pblk) { printk("ES cache assertion failed for inode: %lu " "es_cached ex [%d/%d/%llu/%x] != " "found ex [%d/%d/%llu/%x] retval %d flags %x\n", inode->i_ino, es_map->m_lblk, es_map->m_len, es_map->m_pblk, es_map->m_flags, map->m_lblk, map->m_len, map->m_pblk, map->m_flags, retval, flags); } } #endif /* ES_AGGRESSIVE_TEST */ /* * The ext4_map_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. * * Otherwise it takes the write lock of the i_data_sem and allocate blocks * and store the allocated blocks in the result buffer head and mark it * mapped. * * If file type is extents based, it will call ext4_ext_map_blocks(), * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping * based files * * On success, it returns the number of blocks being mapped or allocated. * if create==0 and the blocks are pre-allocated and unwritten block, * the result buffer head is unmapped. If the create ==1, it will make sure * the buffer head is mapped. * * It returns 0 if plain look up failed (blocks have not been allocated), in * that case, buffer head is unmapped * * It returns the error in case of allocation failure. */ int ext4_map_blocks(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, int flags) { struct extent_status es; int retval; int ret = 0; #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif map->m_flags = 0; ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," "logical block %lu\n", inode->i_ino, flags, map->m_len, (unsigned long) map->m_lblk); /* * ext4_map_blocks returns an int, and m_len is an unsigned int */ if (unlikely(map->m_len > INT_MAX)) map->m_len = INT_MAX; /* We can handle the block number less than EXT_MAX_BLOCKS */ if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) return -EFSCORRUPTED; /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { map->m_pblk = ext4_es_pblock(&es) + map->m_lblk - es.es_lblk; map->m_flags |= ext4_es_is_written(&es) ? EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; retval = es.es_len - (map->m_lblk - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { retval = 0; } else { BUG_ON(1); } #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(handle, inode, map, &orig_map, flags); #endif goto found; } /* * Try to see if we can get the block without requesting a new * file system block. */ if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) down_read(&EXT4_I(inode)->i_data_sem); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags & EXT4_GET_BLOCKS_KEEP_SIZE); } if (retval > 0) { unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && !(status & EXTENT_STATUS_WRITTEN) && ext4_find_delalloc_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret < 0) retval = ret; } if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) up_read((&EXT4_I(inode)->i_data_sem)); found: if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; } /* If it is only a block(s) look up */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) return retval; /* * Returns if the blocks have already allocated * * Note that if blocks have been preallocated * ext4_ext_get_block() returns the create = 0 * with buffer head unmapped. */ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) /* * If we need to convert extent to unwritten * we continue and do the actual work in * ext4_ext_map_blocks() */ if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) return retval; /* * Here we clear m_flags because after allocating an new extent, * it will be set again. */ map->m_flags &= ~EXT4_MAP_FLAGS; /* * New blocks allocate and/or writing to unwritten extent * will possibly result in updating i_data, so we take * the write lock of i_data_sem, and call get_block() * with create == 1 flag. */ down_write(&EXT4_I(inode)->i_data_sem); /* * We need to check for EXT4 here because migrate * could have changed the inode type in between */ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { retval = ext4_ext_map_blocks(handle, inode, map, flags); } else { retval = ext4_ind_map_blocks(handle, inode, map, flags); if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { /* * We allocated new blocks which will result in * i_data's format changing. Force the migrate * to fail by clearing migrate flags */ ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); } /* * Update reserved blocks/metadata blocks after successful * block allocation which had been deferred till now. We don't * support fallocate for non extent files. So we can update * reserve space here. */ if ((retval > 0) && (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) ext4_da_update_reserve_space(inode, retval, 1); } if (retval > 0) { unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } /* * If the extent has been zeroed out, we don't need to update * extent status tree. */ if ((flags & EXT4_GET_BLOCKS_PRE_IO) && ext4_es_lookup_extent(inode, map->m_lblk, &es)) { if (ext4_es_is_written(&es)) goto has_zeroout; } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && !(status & EXTENT_STATUS_WRITTEN) && ext4_find_delalloc_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret < 0) retval = ret; } has_zeroout: up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { ret = check_block_validity(inode, map); if (ret != 0) return ret; } return retval; } /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 static int _ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int flags) { handle_t *handle = ext4_journal_current_handle(); struct ext4_map_blocks map; int ret = 0, started = 0; int dio_credits; if (ext4_has_inline_data(inode)) return -ERANGE; map.m_lblk = iblock; map.m_len = bh->b_size >> inode->i_blkbits; if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { /* Direct IO write... */ if (map.m_len > DIO_MAX_BLOCKS) map.m_len = DIO_MAX_BLOCKS; dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); return ret; } started = 1; } ret = ext4_map_blocks(handle, inode, &map, flags); if (ret > 0) { ext4_io_end_t *io_end = ext4_inode_aio(inode); map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; if (IS_DAX(inode) && buffer_unwritten(bh)) { /* * dgc: I suspect unwritten conversion on ext4+DAX is * fundamentally broken here when there are concurrent * read/write in progress on this inode. */ WARN_ON_ONCE(io_end); bh->b_assoc_map = inode->i_mapping; bh->b_private = (void *)(unsigned long)iblock; } if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN) set_buffer_defer_completion(bh); bh->b_size = inode->i_sb->s_blocksize * map.m_len; ret = 0; } if (started) ext4_journal_stop(handle); return ret; } int ext4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { return _ext4_get_block(inode, iblock, bh, create ? EXT4_GET_BLOCKS_CREATE : 0); } /* * `handle' can be NULL if create is zero */ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct ext4_map_blocks map; struct buffer_head *bh; int create = map_flags & EXT4_GET_BLOCKS_CREATE; int err; J_ASSERT(handle != NULL || create == 0); map.m_lblk = block; map.m_len = 1; err = ext4_map_blocks(handle, inode, &map, map_flags); if (err == 0) return create ? ERR_PTR(-ENOSPC) : NULL; if (err < 0) return ERR_PTR(err); bh = sb_getblk(inode->i_sb, map.m_pblk); if (unlikely(!bh)) return ERR_PTR(-ENOMEM); if (map.m_flags & EXT4_MAP_NEW) { J_ASSERT(create != 0); J_ASSERT(handle != NULL); /* * Now that we do not always journal data, we should * keep in mind whether this should always journal the * new buffer as metadata. For now, regular file * writes use ext4_get_block instead, so it's not a * problem. */ lock_buffer(bh); BUFFER_TRACE(bh, "call get_create_access"); err = ext4_journal_get_create_access(handle, bh); if (unlikely(err)) { unlock_buffer(bh); goto errout; } if (!buffer_uptodate(bh)) { memset(bh->b_data, 0, inode->i_sb->s_blocksize); set_buffer_uptodate(bh); } unlock_buffer(bh); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); err = ext4_handle_dirty_metadata(handle, inode, bh); if (unlikely(err)) goto errout; } else BUFFER_TRACE(bh, "not a new buffer"); return bh; errout: brelse(bh); return ERR_PTR(err); } struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ext4_lblk_t block, int map_flags) { struct buffer_head *bh; bh = ext4_getblk(handle, inode, block, map_flags); if (IS_ERR(bh)) return bh; if (!bh || buffer_uptodate(bh)) return bh; ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; put_bh(bh); return ERR_PTR(-EIO); } int ext4_walk_page_buffers(handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)(handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for (bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } /* * To preserve ordering, it is essential that the hole instantiation and * the data write be encapsulated in a single transaction. We cannot * close off a transaction and start a new one between the ext4_get_block() * and the commit_write(). So doing the jbd2_journal_start at the start of * prepare_write() is the right place. * * Also, this function can nest inside ext4_writepage(). In that case, we * *know* that ext4_writepage() has generated enough buffer credits to do the * whole page. So we won't block on the journal in that case, which is good, * because the caller may be PF_MEMALLOC. * * By accident, ext4 can be reentered when a transaction is open via * quota file writes. If we were to commit the transaction while thus * reentered, there can be a deadlock - we would be holding a quota * lock, and the commit would never complete if another thread had a * transaction open and was blocking on the quota lock - a ranking * violation. * * So what we do is to rely on the fact that jbd2_journal_stop/journal_start * will _not_ run commit under these circumstances because handle->h_ref * is elevated. We'll still have enough credits for the tiny quotafile * write. */ int do_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { int dirty = buffer_dirty(bh); int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; /* * __block_write_begin() could have dirtied some buffers. Clean * the dirty bit as jbd2_journal_get_write_access() could complain * otherwise about fs integrity issues. Setting of the dirty bit * by __block_write_begin() isn't a real problem here as we clear * the bit before releasing a page lock and thus writeback cannot * ever write the buffer. */ if (dirty) clear_buffer_dirty(bh); BUFFER_TRACE(bh, "get write access"); ret = ext4_journal_get_write_access(handle, bh); if (!ret && dirty) ret = ext4_handle_dirty_metadata(handle, NULL, bh); return ret; } static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); #ifdef CONFIG_EXT4_FS_ENCRYPTION static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; sector_t block; int err = 0; unsigned blocksize = inode->i_sb->s_blocksize; unsigned bbits; struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; bool decrypt = false; BUG_ON(!PageLocked(page)); BUG_ON(from > PAGE_CACHE_SIZE); BUG_ON(to > PAGE_CACHE_SIZE); BUG_ON(from > to); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bbits = ilog2(blocksize); block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } continue; } if (buffer_new(bh)) clear_buffer_new(bh); if (!buffer_mapped(bh)) { WARN_ON(bh->b_size != blocksize); err = get_block(inode, block, bh, 1); if (err) break; if (buffer_new(bh)) { unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); if (PageUptodate(page)) { clear_buffer_new(bh); set_buffer_uptodate(bh); mark_buffer_dirty(bh); continue; } if (block_end > to || block_start < from) zero_user_segments(page, to, block_end, block_start, from); continue; } } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); continue; } if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); *wait_bh++ = bh; decrypt = ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode); } } /* * If we issued read requests, let them complete. */ while (wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) err = -EIO; } if (unlikely(err)) page_zero_new_buffers(page, from, to); else if (decrypt) err = ext4_decrypt(page); return err; } #endif static int ext4_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int ret, needed_blocks; handle_t *handle; int retries = 0; struct page *page; pgoff_t index; unsigned from, to; trace_ext4_write_begin(inode, pos, len, flags); /* * Reserve one block more for addition to orphan list in case * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, flags, pagep); if (ret < 0) return ret; if (ret == 1) return 0; } /* * grab_cache_page_write_begin() can take a long time if the * system is thrashing due to memory pressure, or if the page * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the page (if needed) without using GFP_NOFS. */ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; unlock_page(page); retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); if (IS_ERR(handle)) { page_cache_release(page); return PTR_ERR(handle); } lock_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); page_cache_release(page); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); #ifdef CONFIG_EXT4_FS_ENCRYPTION if (ext4_should_dioread_nolock(inode)) ret = ext4_block_write_begin(page, pos, len, ext4_get_block_write); else ret = ext4_block_write_begin(page, pos, len, ext4_get_block); #else if (ext4_should_dioread_nolock(inode)) ret = __block_write_begin(page, pos, len, ext4_get_block_write); else ret = __block_write_begin(page, pos, len, ext4_get_block); #endif if (!ret && ext4_should_journal_data(inode)) { ret = ext4_walk_page_buffers(handle, page_buffers(page), from, to, NULL, do_journal_get_write_access); } if (ret) { unlock_page(page); /* * __block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. * * Add inode to orphan list in case we crash before * truncate finishes */ if (pos + len > inode->i_size && ext4_can_truncate(inode)) ext4_orphan_add(handle, inode); ext4_journal_stop(handle); if (pos + len > inode->i_size) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might * still be on the orphan list; we need to * make sure the inode is removed from the * orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; page_cache_release(page); return ret; } *pagep = page; return ret; } /* For write_end() in data=journal mode */ static int write_end_fn(handle_t *handle, struct buffer_head *bh) { int ret; if (!buffer_mapped(bh) || buffer_freed(bh)) return 0; set_buffer_uptodate(bh); ret = ext4_handle_dirty_metadata(handle, NULL, bh); clear_buffer_meta(bh); clear_buffer_prio(bh); return ret; } /* * We need to pick up the new inode size which generic_commit_write gave us * `file' can be NULL - eg, when called from page_symlink(). * * ext4 never places buffers on inode->i_mapping->private_list. metadata * buffers are managed internally. */ static int ext4_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; trace_ext4_write_end(inode, pos, len, copied); if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { ret = ext4_jbd2_file_inode(handle, inode); if (ret) { unlock_page(page); page_cache_release(page); goto errout; } } if (ext4_has_inline_data(inode)) { ret = ext4_write_inline_data_end(inode, pos, len, copied, page); if (ret < 0) goto errout; copied = ret; } else copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); /* * it's important to update i_size while still holding page lock: * page writeout could otherwise come in and zero beyond i_size. */ i_size_changed = ext4_update_inode_size(inode, pos + copied); unlock_page(page); page_cache_release(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock * ordering of page lock and transaction start for journaling * filesystems. */ if (i_size_changed) ext4_mark_inode_dirty(handle, inode); if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); errout: ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * This is a private version of page_zero_new_buffers() which doesn't * set the buffer to be dirty, since in data=journalled mode we need * to call ext4_handle_dirty_metadata() instead. */ static void zero_new_buffers(struct page *page, unsigned from, unsigned to) { unsigned int block_start = 0, block_end; struct buffer_head *head, *bh; bh = head = page_buffers(page); do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!PageUptodate(page)) { unsigned start, size; start = max(from, block_start); size = min(to, block_end) - start; zero_user(page, start, size); set_buffer_uptodate(bh); } clear_buffer_new(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } static int ext4_journalled_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; loff_t old_size = inode->i_size; int ret = 0, ret2; int partial = 0; unsigned from, to; int size_changed = 0; trace_ext4_journalled_write_end(inode, pos, len, copied); from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; BUG_ON(!ext4_handle_valid(handle)); if (ext4_has_inline_data(inode)) copied = ext4_write_inline_data_end(inode, pos, len, copied, page); else { if (copied < len) { if (!PageUptodate(page)) copied = 0; zero_new_buffers(page, from+copied, to); } ret = ext4_walk_page_buffers(handle, page_buffers(page), from, to, &partial, write_end_fn); if (!partial) SetPageUptodate(page); } size_changed = ext4_update_inode_size(inode, pos + copied); ext4_set_inode_state(inode, EXT4_STATE_JDATA); EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; unlock_page(page); page_cache_release(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); if (size_changed) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) ret = ret2; } if (pos + len > inode->i_size && ext4_can_truncate(inode)) /* if we have allocated more blocks and copied * less. We will have blocks allocated outside * inode->i_size. So truncate them */ ext4_orphan_add(handle, inode); ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; if (pos + len > inode->i_size) { ext4_truncate_failed_write(inode); /* * If truncate failed early the inode might still be * on the orphan list; we need to make sure the inode * is removed from the orphan list in that case. */ if (inode->i_nlink) ext4_orphan_del(NULL, inode); } return ret ? ret : copied; } /* * Reserve space for a single cluster */ static int ext4_da_reserve_space(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); int ret; /* * We will charge metadata quota at writeout time; this saves * us from metadata over-estimation, though we may go over by * a small amount in the end. Here we just reserve for data. */ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); if (ret) return ret; spin_lock(&ei->i_block_reservation_lock); if (ext4_claim_free_clusters(sbi, 1, 0)) { spin_unlock(&ei->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); return -ENOSPC; } ei->i_reserved_data_blocks++; trace_ext4_da_reserve_space(inode); spin_unlock(&ei->i_block_reservation_lock); return 0; /* success */ } static void ext4_da_release_space(struct inode *inode, int to_free) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); if (!to_free) return; /* Nothing to release, exit */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); trace_ext4_da_release_space(inode, to_free); if (unlikely(to_free > ei->i_reserved_data_blocks)) { /* * if there aren't enough reserved blocks, then the * counter is messed up somewhere. Since this * function is called from invalidate page, it's * harmless to return without any action. */ ext4_warning(inode->i_sb, "ext4_da_release_space: " "ino %lu, to_free %d with only %d reserved " "data blocks", inode->i_ino, to_free, ei->i_reserved_data_blocks); WARN_ON(1); to_free = ei->i_reserved_data_blocks; } ei->i_reserved_data_blocks -= to_free; /* update fs dirty data blocks counter */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); } static void ext4_da_page_release_reservation(struct page *page, unsigned int offset, unsigned int length) { int to_release = 0, contiguous_blks = 0; struct buffer_head *head, *bh; unsigned int curr_off = 0; struct inode *inode = page->mapping->host; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned int stop = offset + length; int num_clusters; ext4_fsblk_t lblk; BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); head = page_buffers(page); bh = head; do { unsigned int next_off = curr_off + bh->b_size; if (next_off > stop) break; if ((offset <= curr_off) && (buffer_delay(bh))) { to_release++; contiguous_blks++; clear_buffer_delay(bh); } else if (contiguous_blks) { lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; ext4_es_remove_extent(inode, lblk, contiguous_blks); contiguous_blks = 0; } curr_off = next_off; } while ((bh = bh->b_this_page) != head); if (contiguous_blks) { lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; ext4_es_remove_extent(inode, lblk, contiguous_blks); } /* If we have released all the blocks belonging to a cluster, then we * need to release the reserved space for that cluster. */ num_clusters = EXT4_NUM_B2C(sbi, to_release); while (num_clusters > 0) { lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + ((num_clusters - 1) << sbi->s_cluster_bits); if (sbi->s_cluster_ratio == 1 || !ext4_find_delalloc_cluster(inode, lblk)) ext4_da_release_space(inode, 1); num_clusters--; } } /* * Delayed allocation stuff */ struct mpage_da_data { struct inode *inode; struct writeback_control *wbc; pgoff_t first_page; /* The first page to write */ pgoff_t next_page; /* Current page to examine */ pgoff_t last_page; /* Last page to examine */ /* * Extent to map - this can be after first_page because that can be * fully mapped. We somewhat abuse m_flags to store whether the extent * is delalloc or unwritten. */ struct ext4_map_blocks map; struct ext4_io_submit io_submit; /* IO submission data */ }; static void mpage_release_unused_pages(struct mpage_da_data *mpd, bool invalidate) { int nr_pages, i; pgoff_t index, end; struct pagevec pvec; struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; /* This is necessary when next_page == 0. */ if (mpd->first_page >= mpd->next_page) return; index = mpd->first_page; end = mpd->next_page - 1; if (invalidate) { ext4_lblk_t start, last; start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); ext4_es_remove_extent(inode, start, last - start + 1); } pagevec_init(&pvec, 0); while (index <= end) { nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; if (page->index > end) break; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); if (invalidate) { block_invalidatepage(page, 0, PAGE_CACHE_SIZE); ClearPageUptodate(page); } unlock_page(page); } index = pvec.pages[nr_pages - 1]->index + 1; pagevec_release(&pvec); } } static void ext4_print_free_blocks(struct inode *inode) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct super_block *sb = inode->i_sb; struct ext4_inode_info *ei = EXT4_I(inode); ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", EXT4_C2B(EXT4_SB(inode->i_sb), ext4_count_free_clusters(sb))); ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_freeclusters_counter))); ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", (long long) EXT4_C2B(EXT4_SB(sb), percpu_counter_sum(&sbi->s_dirtyclusters_counter))); ext4_msg(sb, KERN_CRIT, "Block reservation details"); ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", ei->i_reserved_data_blocks); return; } static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) { return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); } /* * This function is grabs code from the very beginning of * ext4_map_blocks, but assumes that the caller is from delayed write * time. This function looks up the requested blocks and sets the * buffer delay bit under the protection of i_data_sem. */ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, struct ext4_map_blocks *map, struct buffer_head *bh) { struct extent_status es; int retval; sector_t invalid_block = ~((sector_t) 0xffff); #ifdef ES_AGGRESSIVE_TEST struct ext4_map_blocks orig_map; memcpy(&orig_map, map, sizeof(*map)); #endif if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) invalid_block = ~0; map->m_flags = 0; ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," "logical block %lu\n", inode->i_ino, map->m_len, (unsigned long) map->m_lblk); /* Lookup extent status tree firstly */ if (ext4_es_lookup_extent(inode, iblock, &es)) { if (ext4_es_is_hole(&es)) { retval = 0; down_read(&EXT4_I(inode)->i_data_sem); goto add_delayed; } /* * Delayed extent could be allocated by fallocate. * So we need to check it. */ if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); return 0; } map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; retval = es.es_len - (iblock - es.es_lblk); if (retval > map->m_len) retval = map->m_len; map->m_len = retval; if (ext4_es_is_written(&es)) map->m_flags |= EXT4_MAP_MAPPED; else if (ext4_es_is_unwritten(&es)) map->m_flags |= EXT4_MAP_UNWRITTEN; else BUG_ON(1); #ifdef ES_AGGRESSIVE_TEST ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); #endif return retval; } /* * Try to see if we can get the block without requesting a new * file system block. */ down_read(&EXT4_I(inode)->i_data_sem); if (ext4_has_inline_data(inode)) retval = 0; else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) retval = ext4_ext_map_blocks(NULL, inode, map, 0); else retval = ext4_ind_map_blocks(NULL, inode, map, 0); add_delayed: if (retval == 0) { int ret; /* * XXX: __block_prepare_write() unmaps passed block, * is it OK? */ /* * If the block was allocated from previously allocated cluster, * then we don't need to reserve it again. However we still need * to reserve metadata for every block we're going to write. */ if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 || !ext4_find_delalloc_cluster(inode, map->m_lblk)) { ret = ext4_da_reserve_space(inode); if (ret) { /* not enough space to reserve */ retval = ret; goto out_unlock; } } ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, ~0, EXTENT_STATUS_DELAYED); if (ret) { retval = ret; goto out_unlock; } map_bh(bh, inode->i_sb, invalid_block); set_buffer_new(bh); set_buffer_delay(bh); } else if (retval > 0) { int ret; unsigned int status; if (unlikely(retval != map->m_len)) { ext4_warning(inode->i_sb, "ES len assertion failed for inode " "%lu: retval %d != map->m_len %d", inode->i_ino, retval, map->m_len); WARN_ON(1); } status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map->m_pblk, status); if (ret != 0) retval = ret; } out_unlock: up_read((&EXT4_I(inode)->i_data_sem)); return retval; } /* * This is a special get_block_t callback which is used by * ext4_da_write_begin(). It will either return mapped block or * reserve space for a single block. * * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. * We also have b_blocknr = -1 and b_bdev initialized properly * * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev * initialized properly. */ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { struct ext4_map_blocks map; int ret = 0; BUG_ON(create == 0); BUG_ON(bh->b_size != inode->i_sb->s_blocksize); map.m_lblk = iblock; map.m_len = 1; /* * first, we need to know whether the block is allocated already * preallocated blocks are unmapped but should treated * the same as allocated blocks. */ ret = ext4_da_map_blocks(inode, iblock, &map, bh); if (ret <= 0) return ret; map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; if (buffer_unwritten(bh)) { /* A delayed write to unwritten bh should be marked * new and mapped. Mapped ensures that we don't do * get_block multiple times when we write to the same * offset and new ensures that we do proper zero out * for partial write. */ set_buffer_new(bh); set_buffer_mapped(bh); } return 0; } static int bget_one(handle_t *handle, struct buffer_head *bh) { get_bh(bh); return 0; } static int bput_one(handle_t *handle, struct buffer_head *bh) { put_bh(bh); return 0; } static int __ext4_journalled_writepage(struct page *page, unsigned int len) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct buffer_head *page_bufs = NULL; handle_t *handle = NULL; int ret = 0, err = 0; int inline_data = ext4_has_inline_data(inode); struct buffer_head *inode_bh = NULL; ClearPageChecked(page); if (inline_data) { BUG_ON(page->index != 0); BUG_ON(len > ext4_get_max_inline_size(inode)); inode_bh = ext4_journalled_write_inline_data(inode, len, page); if (inode_bh == NULL) goto out; } else { page_bufs = page_buffers(page); if (!page_bufs) { BUG(); goto out; } ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); } /* * We need to release the page lock before we start the * journal, so grab a reference so the page won't disappear * out from under us. */ get_page(page); unlock_page(page); handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); put_page(page); goto out_no_pagelock; } BUG_ON(!ext4_handle_valid(handle)); lock_page(page); put_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ ext4_journal_stop(handle); ret = 0; goto out; } if (inline_data) { BUFFER_TRACE(inode_bh, "get write access"); ret = ext4_journal_get_write_access(handle, inode_bh); err = ext4_handle_dirty_metadata(handle, inode, inode_bh); } else { ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, write_end_fn); } if (ret == 0) ret = err; EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; err = ext4_journal_stop(handle); if (!ret) ret = err; if (!ext4_has_inline_data(inode)) ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, bput_one); ext4_set_inode_state(inode, EXT4_STATE_JDATA); out: unlock_page(page); out_no_pagelock: brelse(inode_bh); return ret; } /* * Note that we don't need to start a transaction unless we're journaling data * because we should have holes filled from ext4_page_mkwrite(). We even don't * need to file the inode to the transaction's list in ordered mode because if * we are writing back data added by write(), the inode is already there and if * we are writing back data modified via mmap(), no one guarantees in which * transaction the data will hit the disk. In case we are journaling data, we * cannot start transaction directly because transaction start ranks above page * lock so we have to do some magic. * * This function can get called via... * - ext4_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) * - shrink_page_list via the kswapd/direct reclaim (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) * * We don't do any block allocation in this function. If we have page with * multiple blocks we need to write those buffer_heads that are mapped. This * is important for mmaped based write. So if we do with blocksize 1K * truncate(f, 1024); * a = mmap(f, 0, 4096); * a[0] = 'a'; * truncate(f, 4096); * we have in the page first buffer_head mapped via page_mkwrite call back * but other buffer_heads would be unmapped but dirty (dirty done via the * do_wp_page). So writepage should write the first block. If we modify * the mmap area beyond 1024 we will again get a page_fault and the * page_mkwrite callback will do the block allocation and mark the * buffer_heads mapped. * * We redirty the page if we have any buffer_heads that is either delay or * unwritten in the page. * * We can get recursively called as show below. * * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> * ext4_writepage() * * But since we don't do any block allocation we should not deadlock. * Page also have the dirty flag cleared so we don't get recurive page_lock. */ static int ext4_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; loff_t size; unsigned int len; struct buffer_head *page_bufs = NULL; struct inode *inode = page->mapping->host; struct ext4_io_submit io_submit; bool keep_towrite = false; trace_ext4_writepage(page); size = i_size_read(inode); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; page_bufs = page_buffers(page); /* * We cannot do block allocation or other extent handling in this * function. If there are buffers needing that, we have to redirty * the page. But we may reach here when we do a journal commit via * journal_submit_inode_data_buffers() and in that case we must write * allocated buffers to achieve data=ordered mode guarantees. * * Also, if there is only one buffer per page (the fs block * size == the page size), if one buffer needs block * allocation or needs to modify the extent tree to clear the * unwritten flag, we know that the page can't be written at * all, so we might as well refuse the write immediately. * Unfortunately if the block size != page size, we can't as * easily detect this case using ext4_walk_page_buffers(), but * for the extremely common case, this is an optimization that * skips a useless round trip through ext4_bio_write_page(). */ if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); if ((current->flags & PF_MEMALLOC) || (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { /* * For memory cleaning there's no point in writing only * some buffers. So just bail out. Warn if we came here * from direct reclaim. */ WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC); unlock_page(page); return 0; } keep_towrite = true; } if (PageChecked(page) && ext4_should_journal_data(inode)) /* * It's mmapped pagecache. Add buffers and journal it. There * doesn't seem much point in redirtying the page here. */ return __ext4_journalled_writepage(page, len); ext4_io_submit_init(&io_submit, wbc); io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); if (!io_submit.io_end) { redirty_page_for_writepage(wbc, page); unlock_page(page); return -ENOMEM; } ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite); ext4_io_submit(&io_submit); /* Drop io_end reference we got from init */ ext4_put_io_end_defer(io_submit.io_end); return ret; } static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) { int len; loff_t size = i_size_read(mpd->inode); int err; BUG_ON(page->index != mpd->first_page); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; clear_page_dirty_for_io(page); err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); if (!err) mpd->wbc->nr_to_write--; mpd->first_page++; return err; } #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) /* * mballoc gives us at most this number of blocks... * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). * The rest of mballoc seems to handle chunks up to full group size. */ #define MAX_WRITEPAGES_EXTENT_LEN 2048 /* * mpage_add_bh_to_extent - try to add bh to extent of blocks to map * * @mpd - extent of blocks * @lblk - logical number of the block in the file * @bh - buffer head we want to add to the extent * * The function is used to collect contig. blocks in the same state. If the * buffer doesn't require mapping for writeback and we haven't started the * extent of buffers to map yet, the function returns 'true' immediately - the * caller can write the buffer right away. Otherwise the function returns true * if the block has been added to the extent, false if the block couldn't be * added. */ static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, struct buffer_head *bh) { struct ext4_map_blocks *map = &mpd->map; /* Buffer that doesn't need mapping for writeback? */ if (!buffer_dirty(bh) || !buffer_mapped(bh) || (!buffer_delay(bh) && !buffer_unwritten(bh))) { /* So far no extent to map => we write the buffer right away */ if (map->m_len == 0) return true; return false; } /* First block in the extent? */ if (map->m_len == 0) { map->m_lblk = lblk; map->m_len = 1; map->m_flags = bh->b_state & BH_FLAGS; return true; } /* Don't go larger than mballoc is willing to allocate */ if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) return false; /* Can we merge the block to our big extent? */ if (lblk == map->m_lblk + map->m_len && (bh->b_state & BH_FLAGS) == map->m_flags) { map->m_len++; return true; } return false; } /* * mpage_process_page_bufs - submit page buffers for IO or add them to extent * * @mpd - extent of blocks for mapping * @head - the first buffer in the page * @bh - buffer we should start processing from * @lblk - logical number of the block in the file corresponding to @bh * * Walk through page buffers from @bh upto @head (exclusive) and either submit * the page for IO if all buffers in this page were mapped and there's no * accumulated extent of buffers to map or add buffers in the page to the * extent of buffers to map. The function returns 1 if the caller can continue * by processing the next page, 0 if it should stop adding buffers to the * extent to map because we cannot extend it anymore. It can also return value * < 0 in case of error during IO submission. */ static int mpage_process_page_bufs(struct mpage_da_data *mpd, struct buffer_head *head, struct buffer_head *bh, ext4_lblk_t lblk) { struct inode *inode = mpd->inode; int err; ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) >> inode->i_blkbits; do { BUG_ON(buffer_locked(bh)); if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { /* Found extent to map? */ if (mpd->map.m_len) return 0; /* Everything mapped so far and we hit EOF */ break; } } while (lblk++, (bh = bh->b_this_page) != head); /* So far everything mapped? Submit the page for IO. */ if (mpd->map.m_len == 0) { err = mpage_submit_page(mpd, head->b_page); if (err < 0) return err; } return lblk < blocks; } /* * mpage_map_buffers - update buffers corresponding to changed extent and * submit fully mapped pages for IO * * @mpd - description of extent to map, on return next extent to map * * Scan buffers corresponding to changed extent (we expect corresponding pages * to be already locked) and update buffer state according to new extent state. * We map delalloc buffers to their physical location, clear unwritten bits, * and mark buffers as uninit when we perform writes to unwritten extents * and do extent conversion after IO is finished. If the last page is not fully * mapped, we update @map to the next extent in the last page that needs * mapping. Otherwise we submit the page for IO. */ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) { struct pagevec pvec; int nr_pages, i; struct inode *inode = mpd->inode; struct buffer_head *head, *bh; int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; pgoff_t start, end; ext4_lblk_t lblk; sector_t pblock; int err; start = mpd->map.m_lblk >> bpp_bits; end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; lblk = start << bpp_bits; pblock = mpd->map.m_pblk; pagevec_init(&pvec, 0); while (start <= end) { nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, PAGEVEC_SIZE); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; if (page->index > end) break; /* Up to 'end' pages must be contiguous */ BUG_ON(page->index != start); bh = head = page_buffers(page); do { if (lblk < mpd->map.m_lblk) continue; if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { /* * Buffer after end of mapped extent. * Find next buffer in the page to map. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; /* * FIXME: If dioread_nolock supports * blocksize < pagesize, we need to make * sure we add size mapped so far to * io_end->size as the following call * can submit the page for IO. */ err = mpage_process_page_bufs(mpd, head, bh, lblk); pagevec_release(&pvec); if (err > 0) err = 0; return err; } if (buffer_delay(bh)) { clear_buffer_delay(bh); bh->b_blocknr = pblock++; } clear_buffer_unwritten(bh); } while (lblk++, (bh = bh->b_this_page) != head); /* * FIXME: This is going to break if dioread_nolock * supports blocksize < pagesize as we will try to * convert potentially unmapped parts of inode. */ mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; /* Page fully mapped - let IO run! */ err = mpage_submit_page(mpd, page); if (err < 0) { pagevec_release(&pvec); return err; } start++; } pagevec_release(&pvec); } /* Extent fully mapped and matches with page boundary. We are done. */ mpd->map.m_len = 0; mpd->map.m_flags = 0; return 0; } static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) { struct inode *inode = mpd->inode; struct ext4_map_blocks *map = &mpd->map; int get_blocks_flags; int err, dioread_nolock; trace_ext4_da_write_pages_extent(inode, map); /* * Call ext4_map_blocks() to allocate any delayed allocation blocks, or * to convert an unwritten extent to be initialized (in the case * where we have written into one or more preallocated blocks). It is * possible that we're going to need more metadata blocks than * previously reserved. However we must not fail because we're in * writeback and there is nothing we can do about it so it might result * in data loss. So use reserved blocks to allocate metadata if * possible. * * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if * the blocks in question are delalloc blocks. This indicates * that the blocks and quotas has already been checked when * the data was copied into the page cache. */ get_blocks_flags = EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_METADATA_NOFAIL; dioread_nolock = ext4_should_dioread_nolock(inode); if (dioread_nolock) get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; if (map->m_flags & (1 << BH_Delay)) get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; err = ext4_map_blocks(handle, inode, map, get_blocks_flags); if (err < 0) return err; if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { if (!mpd->io_submit.io_end->handle && ext4_handle_valid(handle)) { mpd->io_submit.io_end->handle = handle->h_rsv_handle; handle->h_rsv_handle = NULL; } ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); } BUG_ON(map->m_len == 0); if (map->m_flags & EXT4_MAP_NEW) { struct block_device *bdev = inode->i_sb->s_bdev; int i; for (i = 0; i < map->m_len; i++) unmap_underlying_metadata(bdev, map->m_pblk + i); } return 0; } /* * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length * mpd->len and submit pages underlying it for IO * * @handle - handle for journal operations * @mpd - extent to map * @give_up_on_write - we set this to true iff there is a fatal error and there * is no hope of writing the data. The caller should discard * dirty pages to avoid infinite loops. * * The function maps extent starting at mpd->lblk of length mpd->len. If it is * delayed, blocks are allocated, if it is unwritten, we may need to convert * them to initialized or split the described range from larger unwritten * extent. Note that we need not map all the described range since allocation * can return less blocks or the range is covered by more unwritten extents. We * cannot map more because we are limited by reserved transaction credits. On * the other hand we always make sure that the last touched page is fully * mapped so that it can be written out (and thus forward progress is * guaranteed). After mapping we submit all mapped pages for IO. */ static int mpage_map_and_submit_extent(handle_t *handle, struct mpage_da_data *mpd, bool *give_up_on_write) { struct inode *inode = mpd->inode; struct ext4_map_blocks *map = &mpd->map; int err; loff_t disksize; int progress = 0; mpd->io_submit.io_end->offset = ((loff_t)map->m_lblk) << inode->i_blkbits; do { err = mpage_map_one_extent(handle, mpd); if (err < 0) { struct super_block *sb = inode->i_sb; if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) goto invalidate_dirty_pages; /* * Let the uper layers retry transient errors. * In the case of ENOSPC, if ext4_count_free_blocks() * is non-zero, a commit should free up blocks. */ if ((err == -ENOMEM) || (err == -ENOSPC && ext4_count_free_clusters(sb))) { if (progress) goto update_disksize; return err; } ext4_msg(sb, KERN_CRIT, "Delayed block allocation failed for " "inode %lu at logical offset %llu with" " max blocks %u with error %d", inode->i_ino, (unsigned long long)map->m_lblk, (unsigned)map->m_len, -err); ext4_msg(sb, KERN_CRIT, "This should not happen!! Data will " "be lost\n"); if (err == -ENOSPC) ext4_print_free_blocks(inode); invalidate_dirty_pages: *give_up_on_write = true; return err; } progress = 1; /* * Update buffer state, submit mapped pages, and get us new * extent to map */ err = mpage_map_and_submit_buffers(mpd); if (err < 0) goto update_disksize; } while (map->m_len); update_disksize: /* * Update on-disk size after IO is submitted. Races with * truncate are avoided by checking i_size under i_data_sem. */ disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; if (disksize > EXT4_I(inode)->i_disksize) { int err2; loff_t i_size; down_write(&EXT4_I(inode)->i_data_sem); i_size = i_size_read(inode); if (disksize > i_size) disksize = i_size; if (disksize > EXT4_I(inode)->i_disksize) EXT4_I(inode)->i_disksize = disksize; err2 = ext4_mark_inode_dirty(handle, inode); up_write(&EXT4_I(inode)->i_data_sem); if (err2) ext4_error(inode->i_sb, "Failed to mark inode %lu dirty", inode->i_ino); if (!err) err = err2; } return err; } /* * Calculate the total number of credits to reserve for one writepages * iteration. This is called from ext4_writepages(). We map an extent of * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + * bpp - 1 blocks in bpp different extents. */ static int ext4_da_writepages_trans_blocks(struct inode *inode) { int bpp = ext4_journal_blocks_per_page(inode); return ext4_meta_trans_blocks(inode, MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); } /* * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages * and underlying extent to map * * @mpd - where to look for pages * * Walk dirty pages in the mapping. If they are fully mapped, submit them for * IO immediately. When we find a page which isn't mapped we start accumulating * extent of buffers underlying these pages that needs mapping (formed by * either delayed or unwritten buffers). We also lock the pages containing * these buffers. The extent found is returned in @mpd structure (starting at * mpd->lblk with length mpd->len blocks). * * Note that this function can attach bios to one io_end structure which are * neither logically nor physically contiguous. Although it may seem as an * unnecessary complication, it is actually inevitable in blocksize < pagesize * case as we need to track IO to all buffers underlying a page in one io_end. */ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) { struct address_space *mapping = mpd->inode->i_mapping; struct pagevec pvec; unsigned int nr_pages; long left = mpd->wbc->nr_to_write; pgoff_t index = mpd->first_page; pgoff_t end = mpd->last_page; int tag; int i, err = 0; int blkbits = mpd->inode->i_blkbits; ext4_lblk_t lblk; struct buffer_head *head; if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; pagevec_init(&pvec, 0); mpd->map.m_len = 0; mpd->next_page = index; while (index <= end) { nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) goto out; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. */ if (page->index > end) goto out; /* * Accumulated enough dirty pages? This doesn't apply * to WB_SYNC_ALL mode. For integrity sync we have to * keep going because someone may be concurrently * dirtying pages, and we might have synced a lot of * newly appeared dirty pages, but have not synced all * of the old dirty pages. */ if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0) goto out; /* If we can't merge this page, we are done. */ if (mpd->map.m_len > 0 && mpd->next_page != page->index) goto out; lock_page(page); /* * If the page is no longer dirty, or its mapping no * longer corresponds to inode we are writing (which * means it has been truncated or invalidated), or the * page is already under writeback and we are not doing * a data integrity writeback, skip the page */ if (!PageDirty(page) || (PageWriteback(page) && (mpd->wbc->sync_mode == WB_SYNC_NONE)) || unlikely(page->mapping != mapping)) { unlock_page(page); continue; } wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); if (mpd->map.m_len == 0) mpd->first_page = page->index; mpd->next_page = page->index + 1; /* Add all dirty buffers to mpd */ lblk = ((ext4_lblk_t)page->index) << (PAGE_CACHE_SHIFT - blkbits); head = page_buffers(page); err = mpage_process_page_bufs(mpd, head, head, lblk); if (err <= 0) goto out; err = 0; left--; } pagevec_release(&pvec); cond_resched(); } return 0; out: pagevec_release(&pvec); return err; } static int __writepage(struct page *page, struct writeback_control *wbc, void *data) { struct address_space *mapping = data; int ret = ext4_writepage(page, wbc); mapping_set_error(mapping, ret); return ret; } static int ext4_writepages(struct address_space *mapping, struct writeback_control *wbc) { pgoff_t writeback_index = 0; long nr_to_write = wbc->nr_to_write; int range_whole = 0; int cycled = 1; handle_t *handle = NULL; struct mpage_da_data mpd; struct inode *inode = mapping->host; int needed_blocks, rsv_blocks = 0, ret = 0; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); bool done; struct blk_plug plug; bool give_up_on_write = false; trace_ext4_writepages(inode, wbc); /* * No pages to write? This is mainly a kludge to avoid starting * a transaction for special inodes like journal inode on last iput() * because that could violate lock ordering on umount */ if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) goto out_writepages; if (ext4_should_journal_data(inode)) { struct blk_plug plug; blk_start_plug(&plug); ret = write_cache_pages(mapping, wbc, __writepage, mapping); blk_finish_plug(&plug); goto out_writepages; } /* * If the filesystem has aborted, it is read-only, so return * right away instead of dumping stack traces later on that * will obscure the real source of the problem. We test * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because * the latter could be true if the filesystem is mounted * read-only, and in that case, ext4_writepages should * *never* be called, so if that ever happens, we would want * the stack trace. */ if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) { ret = -EROFS; goto out_writepages; } if (ext4_should_dioread_nolock(inode)) { /* * We may need to convert up to one extent per block in * the page and we may dirty the inode. */ rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); } /* * If we have inline data and arrive here, it means that * we will soon create the block for the 1st page, so * we'd better clear the inline data here. */ if (ext4_has_inline_data(inode)) { /* Just inode will be modified... */ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out_writepages; } BUG_ON(ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)); ext4_destroy_inline_data(handle, inode); ext4_journal_stop(handle); } if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; if (writeback_index) cycled = 0; mpd.first_page = writeback_index; mpd.last_page = -1; } else { mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; } mpd.inode = inode; mpd.wbc = wbc; ext4_io_submit_init(&mpd.io_submit, wbc); retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); done = false; blk_start_plug(&plug); while (!done && mpd.first_page <= mpd.last_page) { /* For each extent of pages we use new io_end */ mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); if (!mpd.io_submit.io_end) { ret = -ENOMEM; break; } /* * We have two constraints: We find one extent to map and we * must always write out whole page (makes a difference when * blocksize < pagesize) so that we don't block on IO when we * try to write out the rest of the page. Journalled mode is * not supported by delalloc. */ BUG_ON(ext4_should_journal_data(inode)); needed_blocks = ext4_da_writepages_trans_blocks(inode); /* start a new transaction */ handle = ext4_journal_start_with_reserve(inode, EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " "%ld pages, ino %lu; err %d", __func__, wbc->nr_to_write, inode->i_ino, ret); /* Release allocated io_end */ ext4_put_io_end(mpd.io_submit.io_end); break; } trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); ret = mpage_prepare_extent_to_map(&mpd); if (!ret) { if (mpd.map.m_len) ret = mpage_map_and_submit_extent(handle, &mpd, &give_up_on_write); else { /* * We scanned the whole range (or exhausted * nr_to_write), submitted what was mapped and * didn't find anything needing mapping. We are * done. */ done = true; } } ext4_journal_stop(handle); /* Submit prepared bio */ ext4_io_submit(&mpd.io_submit); /* Unlock pages we didn't use */ mpage_release_unused_pages(&mpd, give_up_on_write); /* Drop our io_end reference we got from init */ ext4_put_io_end(mpd.io_submit.io_end); if (ret == -ENOSPC && sbi->s_journal) { /* * Commit the transaction which would * free blocks released in the transaction * and try again */ jbd2_journal_force_commit_nested(sbi->s_journal); ret = 0; continue; } /* Fatal error - ENOMEM, EIO... */ if (ret) break; } blk_finish_plug(&plug); if (!ret && !cycled && wbc->nr_to_write > 0) { cycled = 1; mpd.last_page = writeback_index - 1; mpd.first_page = 0; goto retry; } /* Update index */ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) /* * Set the writeback_index so that range_cyclic * mode will write it back later */ mapping->writeback_index = mpd.first_page; out_writepages: trace_ext4_writepages_result(inode, wbc, ret, nr_to_write - wbc->nr_to_write); return ret; } static int ext4_nonda_switch(struct super_block *sb) { s64 free_clusters, dirty_clusters; struct ext4_sb_info *sbi = EXT4_SB(sb); /* * switch to non delalloc mode if we are running low * on free block. The free block accounting via percpu * counters can get slightly wrong with percpu_counter_batch getting * accumulated on each CPU without updating global counters * Delalloc need an accurate free block accounting. So switch * to non delalloc when we are near to error range. */ free_clusters = percpu_counter_read_positive(&sbi->s_freeclusters_counter); dirty_clusters = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); /* * Start pushing delalloc when 1/2 of free blocks are dirty. */ if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); if (2 * free_clusters < 3 * dirty_clusters || free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { /* * free block count is less than 150% of dirty blocks * or free blocks is less than watermark */ return 1; } return 0; } /* We always reserve for an inode update; the superblock could be there too */ static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) { if (likely(ext4_has_feature_large_file(inode->i_sb))) return 1; if (pos + len <= 0x7fffffffULL) return 1; /* We might need to update the superblock to set LARGE_FILE */ return 2; } static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret, retries = 0; struct page *page; pgoff_t index; struct inode *inode = mapping->host; handle_t *handle; index = pos >> PAGE_CACHE_SHIFT; if (ext4_nonda_switch(inode->i_sb)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; return ext4_write_begin(file, mapping, pos, len, flags, pagep, fsdata); } *fsdata = (void *)0; trace_ext4_da_write_begin(inode, pos, len, flags); if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { ret = ext4_da_write_inline_data_begin(mapping, inode, pos, len, flags, pagep, fsdata); if (ret < 0) return ret; if (ret == 1) return 0; } /* * grab_cache_page_write_begin() can take a long time if the * system is thrashing due to memory pressure, or if the page * is being written back. So grab it first before we start * the transaction handle. This also allows us to allocate * the page (if needed) without using GFP_NOFS. */ retry_grab: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; unlock_page(page); /* * With delayed allocation, we don't log the i_disksize update * if there is delayed block allocation. But we still need * to journalling the i_disksize update if writes to the end * of file which has an already mapped buffer. */ retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_da_write_credits(inode, pos, len)); if (IS_ERR(handle)) { page_cache_release(page); return PTR_ERR(handle); } lock_page(page); if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); page_cache_release(page); ext4_journal_stop(handle); goto retry_grab; } /* In case writeback began while the page was unlocked */ wait_for_stable_page(page); #ifdef CONFIG_EXT4_FS_ENCRYPTION ret = ext4_block_write_begin(page, pos, len, ext4_da_get_block_prep); #else ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); #endif if (ret < 0) { unlock_page(page); ext4_journal_stop(handle); /* * block_write_begin may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. */ if (pos + len > inode->i_size) ext4_truncate_failed_write(inode); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; page_cache_release(page); return ret; } *pagep = page; return ret; } /* * Check if we should update i_disksize * when write to the end of file but not require block allocation */ static int ext4_da_should_update_i_disksize(struct page *page, unsigned long offset) { struct buffer_head *bh; struct inode *inode = page->mapping->host; unsigned int idx; int i; bh = page_buffers(page); idx = offset >> inode->i_blkbits; for (i = 0; i < idx; i++) bh = bh->b_this_page; if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) return 0; return 1; } static int ext4_da_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; int ret = 0, ret2; handle_t *handle = ext4_journal_current_handle(); loff_t new_i_size; unsigned long start, end; int write_mode = (int)(unsigned long)fsdata; if (write_mode == FALL_BACK_TO_NONDELALLOC) return ext4_write_end(file, mapping, pos, len, copied, page, fsdata); trace_ext4_da_write_end(inode, pos, len, copied); start = pos & (PAGE_CACHE_SIZE - 1); end = start + copied - 1; /* * generic_write_end() will run mark_inode_dirty() if i_size * changes. So let's piggyback the i_disksize mark_inode_dirty * into that. */ new_i_size = pos + copied; if (copied && new_i_size > EXT4_I(inode)->i_disksize) { if (ext4_has_inline_data(inode) || ext4_da_should_update_i_disksize(page, end)) { ext4_update_i_disksize(inode, new_i_size); /* We need to mark inode dirty even if * new_i_size is less that inode->i_size * bu greater than i_disksize.(hint delalloc) */ ext4_mark_inode_dirty(handle, inode); } } if (write_mode != CONVERT_INLINE_DATA && ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && ext4_has_inline_data(inode)) ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, page); else ret2 = generic_write_end(file, mapping, pos, len, copied, page, fsdata); copied = ret2; if (ret2 < 0) ret = ret2; ret2 = ext4_journal_stop(handle); if (!ret) ret = ret2; return ret ? ret : copied; } static void ext4_da_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { /* * Drop reserved blocks */ BUG_ON(!PageLocked(page)); if (!page_has_buffers(page)) goto out; ext4_da_page_release_reservation(page, offset, length); out: ext4_invalidatepage(page, offset, length); return; } /* * Force all delayed allocation blocks to be allocated for a given inode. */ int ext4_alloc_da_blocks(struct inode *inode) { trace_ext4_alloc_da_blocks(inode); if (!EXT4_I(inode)->i_reserved_data_blocks) return 0; /* * We do something simple for now. The filemap_flush() will * also start triggering a write of the data blocks, which is * not strictly speaking necessary (and for users of * laptop_mode, not even desirable). However, to do otherwise * would require replicating code paths in: * * ext4_writepages() -> * write_cache_pages() ---> (via passed in callback function) * __mpage_da_writepage() --> * mpage_add_bh_to_extent() * mpage_da_map_blocks() * * The problem is that write_cache_pages(), located in * mm/page-writeback.c, marks pages clean in preparation for * doing I/O, which is not desirable if we're not planning on * doing I/O at all. * * We could call write_cache_pages(), and then redirty all of * the pages by calling redirty_page_for_writepage() but that * would be ugly in the extreme. So instead we would need to * replicate parts of the code in the above functions, * simplifying them because we wouldn't actually intend to * write out the pages, but rather only collect contiguous * logical block extents, call the multi-block allocator, and * then update the buffer heads with the block allocations. * * For now, though, we'll cheat by calling filemap_flush(), * which will map the blocks, and start the I/O, but not * actually wait for the I/O to complete. */ return filemap_flush(inode->i_mapping); } /* * bmap() is special. It gets used by applications such as lilo and by * the swapper to find the on-disk block of a specific piece of data. * * Naturally, this is dangerous if the block concerned is still in the * journal. If somebody makes a swapfile on an ext4 data-journaling * filesystem and enables swap, then they may get a nasty shock when the * data getting swapped to that swapfile suddenly gets overwritten by * the original zero's written out previously to the journal and * awaiting writeback in the kernel's buffer cache. * * So, if we see any bmap calls here on a modified, data-journaled file, * take extra steps to flush any blocks which might be in the cache. */ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; journal_t *journal; int err; /* * We can get here for an inline file via the FIBMAP ioctl */ if (ext4_has_inline_data(inode)) return 0; if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && test_opt(inode->i_sb, DELALLOC)) { /* * With delalloc we want to sync the file * so that we can make sure we allocate * blocks for file */ filemap_write_and_wait(mapping); } if (EXT4_JOURNAL(inode) && ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { /* * This is a REALLY heavyweight approach, but the use of * bmap on dirty files is expected to be extremely rare: * only if we run lilo or swapon on a freshly made file * do we expect this to happen. * * (bmap requires CAP_SYS_RAWIO so this does not * represent an unprivileged user DOS attack --- we'd be * in trouble if mortal users could trigger this path at * will.) * * NB. EXT4_STATE_JDATA is not set on files other than * regular files. If somebody wants to bmap a directory * or symlink and gets confused because the buffer * hasn't yet been flushed to disk, they deserve * everything they get. */ ext4_clear_inode_state(inode, EXT4_STATE_JDATA); journal = EXT4_JOURNAL(inode); jbd2_journal_lock_updates(journal); err = jbd2_journal_flush(journal); jbd2_journal_unlock_updates(journal); if (err) return 0; } return generic_block_bmap(mapping, block, ext4_get_block); } static int ext4_readpage(struct file *file, struct page *page) { int ret = -EAGAIN; struct inode *inode = page->mapping->host; trace_ext4_readpage(page); if (ext4_has_inline_data(inode)) ret = ext4_readpage_inline(inode, page); if (ret == -EAGAIN) return ext4_mpage_readpages(page->mapping, NULL, page, 1); return ret; } static int ext4_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; /* If the file has inline data, no need to do readpages. */ if (ext4_has_inline_data(inode)) return 0; return ext4_mpage_readpages(mapping, pages, NULL, nr_pages); } static void ext4_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { trace_ext4_invalidatepage(page, offset, length); /* No journalling happens on data buffers when this function is used */ WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); block_invalidatepage(page, offset, length); } static int __ext4_journalled_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); trace_ext4_journalled_invalidatepage(page, offset, length); /* * If it's a full truncate we just forget about the pending dirtying */ if (offset == 0 && length == PAGE_CACHE_SIZE) ClearPageChecked(page); return jbd2_journal_invalidatepage(journal, page, offset, length); } /* Wrapper for aops... */ static void ext4_journalled_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); } static int ext4_releasepage(struct page *page, gfp_t wait) { journal_t *journal = EXT4_JOURNAL(page->mapping->host); trace_ext4_releasepage(page); /* Page has dirty journalled data -> cannot release */ if (PageChecked(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait); else return try_to_free_buffers(page); } /* * ext4_get_block used when preparing for a DIO write or buffer write. * We allocate an uinitialized extent if blocks haven't been allocated. * The extent will be converted to initialized after the IO is complete. */ int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", inode->i_ino, create); return _ext4_get_block(inode, iblock, bh_result, EXT4_GET_BLOCKS_IO_CREATE_EXT); } static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", inode->i_ino, create); return _ext4_get_block(inode, iblock, bh_result, EXT4_GET_BLOCKS_NO_LOCK); } int ext4_get_block_dax(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int flags = EXT4_GET_BLOCKS_PRE_IO | EXT4_GET_BLOCKS_UNWRIT_EXT; if (create) flags |= EXT4_GET_BLOCKS_CREATE; ext4_debug("ext4_get_block_dax: inode %lu, create flag %d\n", inode->i_ino, create); return _ext4_get_block(inode, iblock, bh_result, flags); } static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, ssize_t size, void *private) { ext4_io_end_t *io_end = iocb->private; /* if not async direct IO just return */ if (!io_end) return; ext_debug("ext4_end_io_dio(): io_end 0x%p " "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", iocb->private, io_end->inode->i_ino, iocb, offset, size); iocb->private = NULL; io_end->offset = offset; io_end->size = size; ext4_put_io_end(io_end); } /* * For ext4 extent files, ext4 will do direct-io write to holes, * preallocated extents, and those write extend the file, no need to * fall back to buffered IO. * * For holes, we fallocate those blocks, mark them as unwritten * If those blocks were preallocated, we mark sure they are split, but * still keep the range to write as unwritten. * * The unwritten extents will be converted to written when DIO is completed. * For async direct IO, since the IO may still pending when return, we * set up an end_io call back function, which will do the conversion * when async direct IO completed. * * If the O_DIRECT write will extend the file then add this inode to the * orphan list. So recovery will truncate it back to the original size * if the machine crashes during the write. * */ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; ssize_t ret; size_t count = iov_iter_count(iter); int overwrite = 0; get_block_t *get_block_func = NULL; int dio_flags = 0; loff_t final_size = offset + count; ext4_io_end_t *io_end = NULL; /* Use the old path for reads and writes beyond i_size. */ if (iov_iter_rw(iter) != WRITE || final_size > inode->i_size) return ext4_ind_direct_IO(iocb, iter, offset); BUG_ON(iocb->private == NULL); /* * Make all waiters for direct IO properly wait also for extent * conversion. This also disallows race between truncate() and * overwrite DIO as i_dio_count needs to be incremented under i_mutex. */ if (iov_iter_rw(iter) == WRITE) inode_dio_begin(inode); /* If we do a overwrite dio, i_mutex locking can be released */ overwrite = *((int *)iocb->private); if (overwrite) { down_read(&EXT4_I(inode)->i_data_sem); mutex_unlock(&inode->i_mutex); } /* * We could direct write to holes and fallocate. * * Allocated blocks to fill the hole are marked as * unwritten to prevent parallel buffered read to expose * the stale data before DIO complete the data IO. * * As to previously fallocated extents, ext4 get_block will * just simply mark the buffer mapped but still keep the * extents unwritten. * * For non AIO case, we will convert those unwritten extents * to written after return back from blockdev_direct_IO. * * For async DIO, the conversion needs to be deferred when the * IO is completed. The ext4 end_io callback function will be * called to take care of the conversion work. Here for async * case, we allocate an io_end structure to hook to the iocb. */ iocb->private = NULL; ext4_inode_aio_set(inode, NULL); if (!is_sync_kiocb(iocb)) { io_end = ext4_init_io_end(inode, GFP_NOFS); if (!io_end) { ret = -ENOMEM; goto retake_lock; } /* * Grab reference for DIO. Will be dropped in ext4_end_io_dio() */ iocb->private = ext4_get_io_end(io_end); /* * we save the io structure for current async direct * IO, so that later ext4_map_blocks() could flag the * io structure whether there is a unwritten extents * needs to be converted when IO is completed. */ ext4_inode_aio_set(inode, io_end); } if (overwrite) { get_block_func = ext4_get_block_write_nolock; } else { get_block_func = ext4_get_block_write; dio_flags = DIO_LOCKING; } #ifdef CONFIG_EXT4_FS_ENCRYPTION BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)); #endif if (IS_DAX(inode)) ret = dax_do_io(iocb, inode, iter, offset, get_block_func, ext4_end_io_dio, dio_flags); else ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, offset, get_block_func, ext4_end_io_dio, NULL, dio_flags); /* * Put our reference to io_end. This can free the io_end structure e.g. * in sync IO case or in case of error. It can even perform extent * conversion if all bios we submitted finished before we got here. * Note that in that case iocb->private can be already set to NULL * here. */ if (io_end) { ext4_inode_aio_set(inode, NULL); ext4_put_io_end(io_end); /* * When no IO was submitted ext4_end_io_dio() was not * called so we have to put iocb's reference. */ if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { WARN_ON(iocb->private != io_end); WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); ext4_put_io_end(io_end); iocb->private = NULL; } } if (ret > 0 && !overwrite && ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN)) { int err; /* * for non AIO case, since the IO is already * completed, we could do the conversion right here */ err = ext4_convert_unwritten_extents(NULL, inode, offset, ret); if (err < 0) ret = err; ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); } retake_lock: if (iov_iter_rw(iter) == WRITE) inode_dio_end(inode); /* take i_mutex locking again if we do a ovewrite dio */ if (overwrite) { up_read(&EXT4_I(inode)->i_data_sem); mutex_lock(&inode->i_mutex); } return ret; } static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; #ifdef CONFIG_EXT4_FS_ENCRYPTION if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return 0; #endif /* * If we are doing data journalling we don't support O_DIRECT */ if (ext4_should_journal_data(inode)) return 0; /* Let buffer I/O handle the inline data case. */ if (ext4_has_inline_data(inode)) return 0; trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_direct_IO(iocb, iter, offset); else ret = ext4_ind_direct_IO(iocb, iter, offset); trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); return ret; } /* * Pages can be marked dirty completely asynchronously from ext4's journalling * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do * much here because ->set_page_dirty is called under VFS locks. The page is * not necessarily locked. * * We cannot just dirty the page and leave attached buffers clean, because the * buffers' dirty state is "definitive". We cannot just set the buffers dirty * or jbddirty because all the journalling code will explode. * * So what we do is to mark the page "pending dirty" and next time writepage * is called, propagate that into the buffers appropriately. */ static int ext4_journalled_set_page_dirty(struct page *page) { SetPageChecked(page); return __set_page_dirty_nobuffers(page); } static const struct address_space_operations ext4_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_journalled_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_write_begin, .write_end = ext4_journalled_write_end, .set_page_dirty = ext4_journalled_set_page_dirty, .bmap = ext4_bmap, .invalidatepage = ext4_journalled_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; static const struct address_space_operations ext4_da_aops = { .readpage = ext4_readpage, .readpages = ext4_readpages, .writepage = ext4_writepage, .writepages = ext4_writepages, .write_begin = ext4_da_write_begin, .write_end = ext4_da_write_end, .bmap = ext4_bmap, .invalidatepage = ext4_da_invalidatepage, .releasepage = ext4_releasepage, .direct_IO = ext4_direct_IO, .migratepage = buffer_migrate_page, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_page = generic_error_remove_page, }; void ext4_set_aops(struct inode *inode) { switch (ext4_inode_journal_mode(inode)) { case EXT4_INODE_ORDERED_DATA_MODE: ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE); break; case EXT4_INODE_WRITEBACK_DATA_MODE: ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE); break; case EXT4_INODE_JOURNAL_DATA_MODE: inode->i_mapping->a_ops = &ext4_journalled_aops; return; default: BUG(); } if (test_opt(inode->i_sb, DELALLOC)) inode->i_mapping->a_ops = &ext4_da_aops; else inode->i_mapping->a_ops = &ext4_aops; } static int __ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; struct buffer_head *bh; struct page *page; int err = 0; page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) return -ENOMEM; blocksize = inode->i_sb->s_blocksize; iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); /* Find the buffer that contains "offset" */ bh = page_buffers(page); pos = blocksize; while (offset >= pos) { bh = bh->b_this_page; iblock++; pos += blocksize; } if (buffer_freed(bh)) { BUFFER_TRACE(bh, "freed: skip"); goto unlock; } if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "unmapped"); ext4_get_block(inode, iblock, bh, 0); /* unmapped? It's a hole - nothing to do */ if (!buffer_mapped(bh)) { BUFFER_TRACE(bh, "still unmapped"); goto unlock; } } /* Ok, it's mapped. Make sure it's up-to-date */ if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { err = -EIO; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode)) { /* We expect the key to be set. */ BUG_ON(!ext4_has_encryption_key(inode)); BUG_ON(blocksize != PAGE_CACHE_SIZE); WARN_ON_ONCE(ext4_decrypt(page)); } } if (ext4_should_journal_data(inode)) { BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) goto unlock; } zero_user(page, offset, length); BUFFER_TRACE(bh, "zeroed end of block"); if (ext4_should_journal_data(inode)) { err = ext4_handle_dirty_metadata(handle, inode, bh); } else { err = 0; mark_buffer_dirty(bh); if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) err = ext4_jbd2_file_inode(handle, inode); } unlock: unlock_page(page); page_cache_release(page); return err; } /* * ext4_block_zero_page_range() zeros out a mapping of length 'length' * starting from file offset 'from'. The range to be zero'd must * be contained with in one block. If the specified range exceeds * the end of the block it will be shortened to end of the block * that cooresponds to 'from' */ static int ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { struct inode *inode = mapping->host; unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned blocksize = inode->i_sb->s_blocksize; unsigned max = blocksize - (offset & (blocksize - 1)); /* * correct length if it does not fall between * 'from' and the end of the block */ if (length > max || length < 0) length = max; if (IS_DAX(inode)) return dax_zero_page_range(inode, from, length, ext4_get_block); return __ext4_block_zero_page_range(handle, mapping, from, length); } /* * ext4_block_truncate_page() zeroes out a mapping from file offset `from' * up to the end of the block which corresponds to `from'. * This required during truncate. We need to physically zero the tail end * of that block so it doesn't yield old data if the file is later grown. */ static int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from) { unsigned offset = from & (PAGE_CACHE_SIZE-1); unsigned length; unsigned blocksize; struct inode *inode = mapping->host; blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); return ext4_block_zero_page_range(handle, mapping, from, length); } int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, loff_t lstart, loff_t length) { struct super_block *sb = inode->i_sb; struct address_space *mapping = inode->i_mapping; unsigned partial_start, partial_end; ext4_fsblk_t start, end; loff_t byte_end = (lstart + length - 1); int err = 0; partial_start = lstart & (sb->s_blocksize - 1); partial_end = byte_end & (sb->s_blocksize - 1); start = lstart >> sb->s_blocksize_bits; end = byte_end >> sb->s_blocksize_bits; /* Handle partial zero within the single block */ if (start == end && (partial_start || (partial_end != sb->s_blocksize - 1))) { err = ext4_block_zero_page_range(handle, mapping, lstart, length); return err; } /* Handle partial zero out on the start of the range */ if (partial_start) { err = ext4_block_zero_page_range(handle, mapping, lstart, sb->s_blocksize); if (err) return err; } /* Handle partial zero out on the end of the range */ if (partial_end != sb->s_blocksize - 1) err = ext4_block_zero_page_range(handle, mapping, byte_end - partial_end, partial_end + 1); return err; } int ext4_can_truncate(struct inode *inode) { if (S_ISREG(inode->i_mode)) return 1; if (S_ISDIR(inode->i_mode)) return 1; if (S_ISLNK(inode->i_mode)) return !ext4_inode_is_fast_symlink(inode); return 0; } /* * ext4_punch_hole: punches a hole in a file by releaseing the blocks * associated with the given offset and length * * @inode: File inode * @offset: The offset where the hole will begin * @len: The length of the hole * * Returns: 0 on success or negative on failure */ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) { struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; loff_t first_block_offset, last_block_offset; handle_t *handle; unsigned int credits; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; trace_ext4_punch_hole(inode, offset, length, 0); /* * Write out all dirty pages to avoid race conditions * Then release them. */ if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { ret = filemap_write_and_wait_range(mapping, offset, offset + length - 1); if (ret) return ret; } mutex_lock(&inode->i_mutex); /* No need to punch hole beyond i_size */ if (offset >= inode->i_size) goto out_mutex; /* * If the hole extends beyond i_size, set the hole * to end after the page that contains i_size */ if (offset + length > inode->i_size) { length = inode->i_size + PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - offset; } if (offset & (sb->s_blocksize - 1) || (offset + length) & (sb->s_blocksize - 1)) { /* * Attach jinode to inode for jbd2 if we do any zeroing of * partial block */ ret = ext4_inode_attach_jinode(inode); if (ret < 0) goto out_mutex; } /* Wait all existing dio workers, newcomers will block on i_mutex */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); /* * Prevent page faults from reinstantiating pages we have released from * page cache. */ down_write(&EXT4_I(inode)->i_mmap_sem); first_block_offset = round_up(offset, sb->s_blocksize); last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; /* Now release the pages and zero block aligned part of pages*/ if (last_block_offset > first_block_offset) truncate_pagecache_range(inode, first_block_offset, last_block_offset); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_std_error(sb, ret); goto out_dio; } ret = ext4_zero_partial_blocks(handle, inode, offset, length); if (ret) goto out_stop; first_block = (offset + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); /* If there are no blocks to remove, return now */ if (first_block >= stop_block) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); if (ret) { up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ret = ext4_ext_remove_space(inode, first_block, stop_block - 1); else ret = ext4_ind_remove_space(handle, inode, first_block, stop_block); up_write(&EXT4_I(inode)->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); out_stop: ext4_journal_stop(handle); out_dio: up_write(&EXT4_I(inode)->i_mmap_sem); ext4_inode_resume_unlocked_dio(inode); out_mutex: mutex_unlock(&inode->i_mutex); return ret; } int ext4_inode_attach_jinode(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct jbd2_inode *jinode; if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) return 0; jinode = jbd2_alloc_inode(GFP_KERNEL); spin_lock(&inode->i_lock); if (!ei->jinode) { if (!jinode) { spin_unlock(&inode->i_lock); return -ENOMEM; } ei->jinode = jinode; jbd2_journal_init_jbd_inode(ei->jinode, inode); jinode = NULL; } spin_unlock(&inode->i_lock); if (unlikely(jinode != NULL)) jbd2_free_inode(jinode); return 0; } /* * ext4_truncate() * * We block out ext4_get_block() block instantiations across the entire * transaction, and VFS/VM ensures that ext4_truncate() cannot run * simultaneously on behalf of the same inode. * * As we work through the truncate and commit bits of it to the journal there * is one core, guiding principle: the file's tree must always be consistent on * disk. We must be able to restart the truncate after a crash. * * The file's tree may be transiently inconsistent in memory (although it * probably isn't), but whenever we close off and commit a journal transaction, * the contents of (the filesystem + the journal) must be consistent and * restartable. It's pretty simple, really: bottom up, right to left (although * left-to-right works OK too). * * Note that at recovery time, journal replay occurs *before* the restart of * truncate against the orphan inode list. * * The committed inode has the new, desired i_size (which is the same as * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see * that this inode's truncate did not complete and it will again call * ext4_truncate() to have another go. So there will be instantiated blocks * to the right of the truncation point in a crashed ext4 filesystem. But * that's fine - as long as they are linked from the inode, the post-crash * ext4_truncate() run will find them and release them. */ void ext4_truncate(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); unsigned int credits; handle_t *handle; struct address_space *mapping = inode->i_mapping; /* * There is a possibility that we're either freeing the inode * or it's a completely new inode. In those cases we might not * have i_mutex locked because it's not necessary. */ if (!(inode->i_state & (I_NEW|I_FREEING))) WARN_ON(!mutex_is_locked(&inode->i_mutex)); trace_ext4_truncate_enter(inode); if (!ext4_can_truncate(inode)) return; ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); if (ext4_has_inline_data(inode)) { int has_inline = 1; ext4_inline_data_truncate(inode, &has_inline); if (has_inline) return; } /* If we zero-out tail of the page, we have to create jinode for jbd2 */ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { if (ext4_inode_attach_jinode(inode) < 0) return; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) credits = ext4_writepage_trans_blocks(inode); else credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); if (IS_ERR(handle)) { ext4_std_error(inode->i_sb, PTR_ERR(handle)); return; } if (inode->i_size & (inode->i_sb->s_blocksize - 1)) ext4_block_truncate_page(handle, mapping, inode->i_size); /* * We add the inode to the orphan list, so that if this * truncate spans multiple transactions, and we crash, we will * resume the truncate when the filesystem recovers. It also * marks the inode dirty, to catch the new size. * * Implication: the file must always be in a sane, consistent * truncatable state while each transaction commits. */ if (ext4_orphan_add(handle, inode)) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ext4_ext_truncate(handle, inode); else ext4_ind_truncate(handle, inode); up_write(&ei->i_data_sem); if (IS_SYNC(inode)) ext4_handle_sync(handle); out_stop: /* * If this was a simple ftruncate() and the file will remain alive, * then we need to clear up the orphan record which we created above. * However, if this was a real unlink then we were called by * ext4_evict_inode(), and we allow that function to clean up the * orphan info for us. */ if (inode->i_nlink) ext4_orphan_del(handle, inode); inode->i_mtime = inode->i_ctime = ext4_current_time(inode); ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); trace_ext4_truncate_exit(inode); } /* * ext4_get_inode_loc returns with an extra refcount against the inode's * underlying buffer_head on success. If 'in_mem' is true, we have all * data in memory that is needed to recreate the on-disk version of this * inode. */ static int __ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc, int in_mem) { struct ext4_group_desc *gdp; struct buffer_head *bh; struct super_block *sb = inode->i_sb; ext4_fsblk_t block; int inodes_per_block, inode_offset; iloc->bh = NULL; if (!ext4_valid_inum(sb, inode->i_ino)) return -EFSCORRUPTED; iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); if (!gdp) return -EIO; /* * Figure out the offset within the block group inode table */ inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; inode_offset = ((inode->i_ino - 1) % EXT4_INODES_PER_GROUP(sb)); block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); bh = sb_getblk(sb, block); if (unlikely(!bh)) return -ENOMEM; if (!buffer_uptodate(bh)) { lock_buffer(bh); /* * If the buffer has the write error flag, we have failed * to write out another inode in the same block. In this * case, we don't have to read the block because we may * read the old inode data successfully. */ if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) set_buffer_uptodate(bh); if (buffer_uptodate(bh)) { /* someone brought it uptodate while we waited */ unlock_buffer(bh); goto has_buffer; } /* * If we have all information of the inode in memory and this * is the only valid inode in the block, we need not read the * block. */ if (in_mem) { struct buffer_head *bitmap_bh; int i, start; start = inode_offset & ~(inodes_per_block - 1); /* Is the inode bitmap in cache? */ bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); if (unlikely(!bitmap_bh)) goto make_io; /* * If the inode bitmap isn't in cache then the * optimisation may end up performing two reads instead * of one, so skip it. */ if (!buffer_uptodate(bitmap_bh)) { brelse(bitmap_bh); goto make_io; } for (i = start; i < start + inodes_per_block; i++) { if (i == inode_offset) continue; if (ext4_test_bit(i, bitmap_bh->b_data)) break; } brelse(bitmap_bh); if (i == start + inodes_per_block) { /* all other inodes are free, so skip I/O */ memset(bh->b_data, 0, bh->b_size); set_buffer_uptodate(bh); unlock_buffer(bh); goto has_buffer; } } make_io: /* * If we need to do any I/O, try to pre-readahead extra * blocks from the inode table. */ if (EXT4_SB(sb)->s_inode_readahead_blks) { ext4_fsblk_t b, end, table; unsigned num; __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; table = ext4_inode_table(sb, gdp); /* s_inode_readahead_blks is always a power of 2 */ b = block & ~((ext4_fsblk_t) ra_blks - 1); if (table > b) b = table; end = b + ra_blks; num = EXT4_INODES_PER_GROUP(sb); if (ext4_has_group_desc_csum(sb)) num -= ext4_itable_unused_count(sb, gdp); table += num / inodes_per_block; if (end > table) end = table; while (b <= end) sb_breadahead(sb, b++); } /* * There are other valid inodes in the buffer, this inode * has in-inode xattrs, or we don't have this inode in memory. * Read the block from disk. */ trace_ext4_load_inode(inode); get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ | REQ_META | REQ_PRIO, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { EXT4_ERROR_INODE_BLOCK(inode, block, "unable to read itable block"); brelse(bh); return -EIO; } } has_buffer: iloc->bh = bh; return 0; } int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) { /* We have all inode data except xattrs in memory here. */ return __ext4_get_inode_loc(inode, iloc, !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); } void ext4_set_inode_flags(struct inode *inode) { unsigned int flags = EXT4_I(inode)->i_flags; unsigned int new_fl = 0; if (flags & EXT4_SYNC_FL) new_fl |= S_SYNC; if (flags & EXT4_APPEND_FL) new_fl |= S_APPEND; if (flags & EXT4_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & EXT4_NOATIME_FL) new_fl |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) new_fl |= S_DIRSYNC; if (test_opt(inode->i_sb, DAX)) new_fl |= S_DAX; inode_set_flags(inode, new_fl, S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX); } /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ void ext4_get_inode_flags(struct ext4_inode_info *ei) { unsigned int vfs_fl; unsigned long old_fl, new_fl; do { vfs_fl = ei->vfs_inode.i_flags; old_fl = ei->i_flags; new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| EXT4_DIRSYNC_FL); if (vfs_fl & S_SYNC) new_fl |= EXT4_SYNC_FL; if (vfs_fl & S_APPEND) new_fl |= EXT4_APPEND_FL; if (vfs_fl & S_IMMUTABLE) new_fl |= EXT4_IMMUTABLE_FL; if (vfs_fl & S_NOATIME) new_fl |= EXT4_NOATIME_FL; if (vfs_fl & S_DIRSYNC) new_fl |= EXT4_DIRSYNC_FL; } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); } static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { blkcnt_t i_blocks ; struct inode *inode = &(ei->vfs_inode); struct super_block *sb = inode->i_sb; if (ext4_has_feature_huge_file(sb)) { /* we are using combined 48 bit field */ i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | le32_to_cpu(raw_inode->i_blocks_lo); if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { /* i_blocks represent file system block size */ return i_blocks << (inode->i_blkbits - 9); } else { return i_blocks; } } else { return le32_to_cpu(raw_inode->i_blocks_lo); } } static inline void ext4_iget_extra_inode(struct inode *inode, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { ext4_set_inode_state(inode, EXT4_STATE_XATTR); ext4_find_inline_data_nolock(inode); } else EXT4_I(inode)->i_inline_off = 0; } struct inode *ext4_iget(struct super_block *sb, unsigned long ino) { struct ext4_iloc iloc; struct ext4_inode *raw_inode; struct ext4_inode_info *ei; struct inode *inode; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; int block; uid_t i_uid; gid_t i_gid; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ei = EXT4_I(inode); iloc.bh = NULL; ret = __ext4_get_inode_loc(inode, &iloc, 0); if (ret < 0) goto bad_inode; raw_inode = ext4_raw_inode(&iloc); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > EXT4_INODE_SIZE(inode->i_sb)) { EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, EXT4_INODE_SIZE(inode->i_sb)); ret = -EFSCORRUPTED; goto bad_inode; } } else ei->i_extra_isize = 0; /* Precompute checksum seed for inode metadata */ if (ext4_has_metadata_csum(sb)) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); __u32 csum; __le32 inum = cpu_to_le32(inode->i_ino); __le32 gen = raw_inode->i_generation; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum)); ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen)); } if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { EXT4_ERROR_INODE(inode, "checksum invalid"); ret = -EFSBADCRC; goto bad_inode; } inode->i_mode = le16_to_cpu(raw_inode->i_mode); i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); if (!(test_opt(inode->i_sb, NO_UID32))) { i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; } i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ei->i_inline_off = 0; ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. * This is needed because nfsd might try to access dead inodes * the test is that same one that e2fsck uses * NeilBrown 1999oct15 */ if (inode->i_nlink == 0) { if ((inode->i_mode == 0 || !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && ino != EXT4_BOOT_LOADER_INO) { /* this inode is deleted */ ret = -ESTALE; goto bad_inode; } /* The only unlinked inodes we let through here have * valid i_mode and are being read by the orphan * recovery code: that's fine, we're about to complete * the process of deleting those. * OR it is the EXT4_BOOT_LOADER_INO which is * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) ei->i_file_acl |= ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; inode->i_size = ext4_isize(raw_inode); ei->i_disksize = inode->i_size; #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif inode->i_generation = le32_to_cpu(raw_inode->i_generation); ei->i_block_group = iloc.block_group; ei->i_last_alloc_group = ~0; /* * NOTE! The in-memory inode i_data array is in little-endian order * even on big-endian machines: we do NOT byteswap the block numbers! */ for (block = 0; block < EXT4_N_BLOCKS; block++) ei->i_data[block] = raw_inode->i_block[block]; INIT_LIST_HEAD(&ei->i_orphan); /* * Set transaction id's of transactions that have to be committed * to finish f[data]sync. We set them to currently running transaction * as we cannot be sure that the inode or some of its metadata isn't * part of the transaction - the inode could have been reclaimed and * now it is reread from disk. */ if (journal) { transaction_t *transaction; tid_t tid; read_lock(&journal->j_state_lock); if (journal->j_running_transaction) transaction = journal->j_running_transaction; else transaction = journal->j_committing_transaction; if (transaction) tid = transaction->t_tid; else tid = journal->j_commit_sequence; read_unlock(&journal->j_state_lock); ei->i_sync_tid = tid; ei->i_datasync_tid = tid; } if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (ei->i_extra_isize == 0) { /* The extra space is currently unused. Use it. */ ei->i_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; } else { ext4_iget_extra_inode(inode, raw_inode, ei); } } EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { inode->i_version = le32_to_cpu(raw_inode->i_disk_version); if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) inode->i_version |= (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; } } ret = 0; if (ei->i_file_acl && !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", ei->i_file_acl); ret = -EFSCORRUPTED; goto bad_inode; } else if (!ext4_has_inline_data(inode)) { if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode)))) /* Validate extent which is part of inode */ ret = ext4_ext_check_inode(inode); } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || (S_ISLNK(inode->i_mode) && !ext4_inode_is_fast_symlink(inode))) { /* Validate block references which are part of inode */ ret = ext4_ind_check_inode(inode); } } if (ret) goto bad_inode; if (S_ISREG(inode->i_mode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { if (ext4_encrypted_inode(inode)) { inode->i_op = &ext4_encrypted_symlink_inode_operations; ext4_set_aops(inode); } else if (ext4_inode_is_fast_symlink(inode)) { inode->i_link = (char *)ei->i_data; inode->i_op = &ext4_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); } else { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); } } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &ext4_special_inode_operations; if (raw_inode->i_block[0]) init_special_inode(inode, inode->i_mode, old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); else init_special_inode(inode, inode->i_mode, new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } else if (ino == EXT4_BOOT_LOADER_INO) { make_bad_inode(inode); } else { ret = -EFSCORRUPTED; EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); goto bad_inode; } brelse(iloc.bh); ext4_set_inode_flags(inode); unlock_new_inode(inode); return inode; bad_inode: brelse(iloc.bh); iget_failed(inode); return ERR_PTR(ret); } struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino) { if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) return ERR_PTR(-EFSCORRUPTED); return ext4_iget(sb, ino); } static int ext4_inode_blocks_set(handle_t *handle, struct ext4_inode *raw_inode, struct ext4_inode_info *ei) { struct inode *inode = &(ei->vfs_inode); u64 i_blocks = inode->i_blocks; struct super_block *sb = inode->i_sb; if (i_blocks <= ~0U) { /* * i_blocks can be represented in a 32 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = 0; ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); return 0; } if (!ext4_has_feature_huge_file(sb)) return -EFBIG; if (i_blocks <= 0xffffffffffffULL) { /* * i_blocks can be represented in a 48 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); } else { ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); /* i_block is stored in file system block size */ i_blocks = i_blocks >> (inode->i_blkbits - 9); raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); } return 0; } struct other_inode { unsigned long orig_ino; struct ext4_inode *raw_inode; }; static int other_inode_match(struct inode * inode, unsigned long ino, void *data) { struct other_inode *oi = (struct other_inode *) data; if ((inode->i_ino != ino) || (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || ((inode->i_state & I_DIRTY_TIME) == 0)) return 0; spin_lock(&inode->i_lock); if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) && (inode->i_state & I_DIRTY_TIME)) { struct ext4_inode_info *ei = EXT4_I(inode); inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); ext4_inode_csum_set(inode, oi->raw_inode, ei); spin_unlock(&ei->i_raw_lock); trace_ext4_other_inode_update_time(inode, oi->orig_ino); return -1; } spin_unlock(&inode->i_lock); return -1; } /* * Opportunistically update the other time fields for other inodes in * the same inode table block. */ static void ext4_update_other_inodes_time(struct super_block *sb, unsigned long orig_ino, char *buf) { struct other_inode oi; unsigned long ino; int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; int inode_size = EXT4_INODE_SIZE(sb); oi.orig_ino = orig_ino; /* * Calculate the first inode in the inode table block. Inode * numbers are one-based. That is, the first inode in a block * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). */ ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1; for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { if (ino == orig_ino) continue; oi.raw_inode = (struct ext4_inode *) buf; (void) find_inode_nowait(sb, ino, other_inode_match, &oi); } } /* * Post the struct inode info into an on-disk inode location in the * buffer-cache. This gobbles the caller's reference to the * buffer_head in the inode location struct. * * The caller must have write access to iloc->bh. */ static int ext4_do_update_inode(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { struct ext4_inode *raw_inode = ext4_raw_inode(iloc); struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; struct super_block *sb = inode->i_sb; int err = 0, rc, block; int need_datasync = 0, set_large_file = 0; uid_t i_uid; gid_t i_gid; spin_lock(&ei->i_raw_lock); /* For fields not tracked in the in-memory inode, * initialise them to zero for new inodes. */ if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); ext4_get_inode_flags(ei); raw_inode->i_mode = cpu_to_le16(inode->i_mode); i_uid = i_uid_read(inode); i_gid = i_gid_read(inode); if (!(test_opt(inode->i_sb, NO_UID32))) { raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); /* * Fix up interoperability with old kernels. Otherwise, old inodes get * re-used with the upper 16 bits of the uid/gid intact */ if (!ei->i_dtime) { raw_inode->i_uid_high = cpu_to_le16(high_16_bits(i_uid)); raw_inode->i_gid_high = cpu_to_le16(high_16_bits(i_gid)); } else { raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } } else { raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); raw_inode->i_uid_high = 0; raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); err = ext4_inode_blocks_set(handle, raw_inode, ei); if (err) { spin_unlock(&ei->i_raw_lock); goto out_brelse; } raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); if (ei->i_disksize != ext4_isize(raw_inode)) { ext4_isize_set(raw_inode, ei->i_disksize); need_datasync = 1; } if (ei->i_disksize > 0x7fffffffULL) { if (!ext4_has_feature_large_file(sb) || EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV)) set_large_file = 1; } raw_inode->i_generation = cpu_to_le32(inode->i_generation); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { if (old_valid_dev(inode->i_rdev)) { raw_inode->i_block[0] = cpu_to_le32(old_encode_dev(inode->i_rdev)); raw_inode->i_block[1] = 0; } else { raw_inode->i_block[0] = 0; raw_inode->i_block[1] = cpu_to_le32(new_encode_dev(inode->i_rdev)); raw_inode->i_block[2] = 0; } } else if (!ext4_has_inline_data(inode)) { for (block = 0; block < EXT4_N_BLOCKS; block++) raw_inode->i_block[block] = ei->i_data[block]; } if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) { raw_inode->i_disk_version = cpu_to_le32(inode->i_version); if (ei->i_extra_isize) { if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) raw_inode->i_version_hi = cpu_to_le32(inode->i_version >> 32); raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); } } ext4_inode_csum_set(inode, raw_inode, ei); spin_unlock(&ei->i_raw_lock); if (inode->i_sb->s_flags & MS_LAZYTIME) ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, bh->b_data); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); rc = ext4_handle_dirty_metadata(handle, NULL, bh); if (!err) err = rc; ext4_clear_inode_state(inode, EXT4_STATE_NEW); if (set_large_file) { BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); if (err) goto out_brelse; ext4_update_dynamic_rev(sb); ext4_set_feature_large_file(sb); ext4_handle_sync(handle); err = ext4_handle_dirty_super(handle, sb); } ext4_update_inode_fsync_trans(handle, inode, need_datasync); out_brelse: brelse(bh); ext4_std_error(inode->i_sb, err); return err; } /* * ext4_write_inode() * * We are called from a few places: * * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files. * Here, there will be no transaction running. We wait for any running * transaction to commit. * * - Within flush work (sys_sync(), kupdate and such). * We wait on commit, if told to. * * - Within iput_final() -> write_inode_now() * We wait on commit, if told to. * * In all cases it is actually safe for us to return without doing anything, * because the inode has been copied into a raw inode buffer in * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL * writeback. * * Note that we are absolutely dependent upon all inode dirtiers doing the * right thing: they *must* call mark_inode_dirty() after dirtying info in * which we are interested. * * It would be a bug for them to not do this. The code: * * mark_inode_dirty(inode) * stuff(); * inode->i_size = expr; * * is in error because write_inode() could occur while `stuff()' is running, * and the new i_size will be lost. Plus the inode will no longer be on the * superblock's dirty inode list. */ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) { int err; if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) return 0; if (EXT4_SB(inode->i_sb)->s_journal) { if (ext4_journal_current_handle()) { jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); dump_stack(); return -EIO; } /* * No need to force transaction in WB_SYNC_NONE mode. Also * ext4_sync_fs() will force the commit after everything is * written. */ if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync) return 0; err = ext4_force_commit(inode->i_sb); } else { struct ext4_iloc iloc; err = __ext4_get_inode_loc(inode, &iloc, 0); if (err) return err; /* * sync(2) will flush the whole buffer cache. No need to do * it here separately for each inode. */ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) sync_dirty_buffer(iloc.bh); if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, "IO error syncing inode"); err = -EIO; } brelse(iloc.bh); } return err; } /* * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate * buffers that are attached to a page stradding i_size and are undergoing * commit. In that case we have to wait for commit to finish and try again. */ static void ext4_wait_for_tail_page_commit(struct inode *inode) { struct page *page; unsigned offset; journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = 0; int ret; offset = inode->i_size & (PAGE_CACHE_SIZE - 1); /* * All buffers in the last page remain valid? Then there's nothing to * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == * blocksize case */ if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) return; while (1) { page = find_lock_page(inode->i_mapping, inode->i_size >> PAGE_CACHE_SHIFT); if (!page) return; ret = __ext4_journalled_invalidatepage(page, offset, PAGE_CACHE_SIZE - offset); unlock_page(page); page_cache_release(page); if (ret != -EBUSY) return; commit_tid = 0; read_lock(&journal->j_state_lock); if (journal->j_committing_transaction) commit_tid = journal->j_committing_transaction->t_tid; read_unlock(&journal->j_state_lock); if (commit_tid) jbd2_log_wait_commit(journal, commit_tid); } } /* * ext4_setattr() * * Called from notify_change. * * We want to trap VFS attempts to truncate the file as soon as * possible. In particular, we want to make sure that when the VFS * shrinks i_size, we put the inode on the orphan list and modify * i_disksize immediately, so that during the subsequent flushing of * dirty pages and freeing of disk blocks, we can guarantee that any * commit will leave the blocks being flushed in an unused state on * disk. (On recovery, the inode will get truncated and the blocks will * be freed, so we have a strong guarantee that no future commit will * leave these blocks visible to the user.) * * Another thing we have to assure is that if we are in ordered mode * and inode is still attached to the committing transaction, we must * we start writeout of all the dirty pages which are being truncated. * This way we are sure that all the data written in the previous * transaction are already on disk (truncate waits for pages under * writeback). * * Called with inode->i_mutex down. */ int ext4_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error, rc = 0; int orphan = 0; const unsigned int ia_valid = attr->ia_valid; error = inode_change_ok(inode, attr); if (error) return error; if (is_quota_modification(inode, attr)) { error = dquot_initialize(inode); if (error) return error; } if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { handle_t *handle; /* (user+group)*(old+new) structure, inode write (sb, * inode block, ? - but truncate inode update has it) */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } error = dquot_transfer(inode, attr); if (error) { ext4_journal_stop(handle); return error; } /* Update corresponding info in inode so that everything is in * one transaction */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); } if (attr->ia_valid & ATTR_SIZE) { handle_t *handle; loff_t oldsize = inode->i_size; int shrink = (attr->ia_size <= inode->i_size); if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) return -EFBIG; } if (!S_ISREG(inode->i_mode)) return -EINVAL; if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size) inode_inc_iversion(inode); if (ext4_should_order_data(inode) && (attr->ia_size < inode->i_size)) { error = ext4_begin_ordered_truncate(inode, attr->ia_size); if (error) goto err_out; } if (attr->ia_size != inode->i_size) { handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); if (IS_ERR(handle)) { error = PTR_ERR(handle); goto err_out; } if (ext4_handle_valid(handle) && shrink) { error = ext4_orphan_add(handle, inode); orphan = 1; } /* * Update c/mtime on truncate up, ext4_truncate() will * update c/mtime in shrink case below */ if (!shrink) { inode->i_mtime = ext4_current_time(inode); inode->i_ctime = inode->i_mtime; } down_write(&EXT4_I(inode)->i_data_sem); EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) error = rc; /* * We have to update i_size under i_data_sem together * with i_disksize to avoid races with writeback code * running ext4_wb_update_i_disksize(). */ if (!error) i_size_write(inode, attr->ia_size); up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) { if (orphan) ext4_orphan_del(NULL, inode); goto err_out; } } if (!shrink) pagecache_isize_extended(inode, oldsize, inode->i_size); /* * Blocks are going to be removed from the inode. Wait * for dio in flight. Temporarily disable * dioread_nolock to prevent livelock. */ if (orphan) { if (!ext4_should_journal_data(inode)) { ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); ext4_inode_resume_unlocked_dio(inode); } else ext4_wait_for_tail_page_commit(inode); } down_write(&EXT4_I(inode)->i_mmap_sem); /* * Truncate pagecache after we've waited for commit * in data=journal mode to make pages freeable. */ truncate_pagecache(inode, inode->i_size); if (shrink) ext4_truncate(inode); up_write(&EXT4_I(inode)->i_mmap_sem); } if (!rc) { setattr_copy(inode, attr); mark_inode_dirty(inode); } /* * If the call to ext4_truncate failed to get a transaction handle at * all, we need to clean up the in-core orphan list manually. */ if (orphan && inode->i_nlink) ext4_orphan_del(NULL, inode); if (!rc && (ia_valid & ATTR_MODE)) rc = posix_acl_chmod(inode, inode->i_mode); err_out: ext4_std_error(inode->i_sb, error); if (!error) error = rc; return error; } int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode; unsigned long long delalloc_blocks; inode = d_inode(dentry); generic_fillattr(inode, stat); /* * If there is inline data in the inode, the inode will normally not * have data blocks allocated (it may have an external xattr block). * Report at least one sector for such files, so tools like tar, rsync, * others doen't incorrectly think the file is completely sparse. */ if (unlikely(ext4_has_inline_data(inode))) stat->blocks += (stat->size + 511) >> 9; /* * We can't update i_blocks if the block allocation is delayed * otherwise in the case of system crash before the real block * allocation is done, we will have i_blocks inconsistent with * on-disk file blocks. * We always keep i_blocks updated together with real * allocation. But to not confuse with user, stat * will return the blocks that include the delayed allocation * blocks for this file. */ delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), EXT4_I(inode)->i_reserved_data_blocks); stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); return 0; } static int ext4_index_trans_blocks(struct inode *inode, int lblocks, int pextents) { if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return ext4_ind_trans_blocks(inode, lblocks); return ext4_ext_index_trans_blocks(inode, pextents); } /* * Account for index blocks, block groups bitmaps and block group * descriptor blocks if modify datablocks and index blocks * worse case, the indexs blocks spread over different block groups * * If datablocks are discontiguous, they are possible to spread over * different block groups too. If they are contiguous, with flexbg, * they could still across block group boundary. * * Also account for superblock, inode, quota and xattr blocks */ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, int pextents) { ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); int gdpblocks; int idxblocks; int ret = 0; /* * How many index blocks need to touch to map @lblocks logical blocks * to @pextents physical extents? */ idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); ret = idxblocks; /* * Now let's see how many group bitmaps and group descriptors need * to account */ groups = idxblocks + pextents; gdpblocks = groups; if (groups > ngroups) groups = ngroups; if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; /* bitmaps and block group descriptor blocks */ ret += groups + gdpblocks; /* Blocks for super block, inode, quota and xattr blocks */ ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); return ret; } /* * Calculate the total number of credits to reserve to fit * the modification of a single pages into a single transaction, * which may include multiple chunks of block allocations. * * This could be called via ext4_write_begin() * * We need to consider the worse case, when * one new block per extent. */ int ext4_writepage_trans_blocks(struct inode *inode) { int bpp = ext4_journal_blocks_per_page(inode); int ret; ret = ext4_meta_trans_blocks(inode, bpp, bpp); /* Account for data blocks for journalled mode */ if (ext4_should_journal_data(inode)) ret += bpp; return ret; } /* * Calculate the journal credits for a chunk of data modification. * * This is called from DIO, fallocate or whoever calling * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. * * journal buffers for data blocks are not included here, as DIO * and fallocate do no need to journal data buffers. */ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) { return ext4_meta_trans_blocks(inode, nrblocks, 1); } /* * The caller must have previously called ext4_reserve_inode_write(). * Give this, we know that the caller already has write access to iloc->bh. */ int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err = 0; if (IS_I_VERSION(inode)) inode_inc_iversion(inode); /* the do_update_inode consumes one bh->b_count */ get_bh(iloc->bh); /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ err = ext4_do_update_inode(handle, inode, iloc); put_bh(iloc->bh); return err; } /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc) { int err; err = ext4_get_inode_loc(inode, iloc); if (!err) { BUFFER_TRACE(iloc->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, iloc->bh); if (err) { brelse(iloc->bh); iloc->bh = NULL; } } ext4_std_error(inode->i_sb, err); return err; } /* * Expand an inode by new_extra_isize bytes. * Returns 0 on success or negative error number on failure. */ static int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc iloc, handle_t *handle) { struct ext4_inode *raw_inode; struct ext4_xattr_ibody_header *header; if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) return 0; raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); /* No extended attributes present */ if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, new_extra_isize); EXT4_I(inode)->i_extra_isize = new_extra_isize; return 0; } /* try to expand with EAs present */ return ext4_expand_extra_isize_ea(inode, new_extra_isize, raw_inode, handle); } /* * What we do here is to mark the in-core inode as clean with respect to inode * dirtiness (it may still be data-dirty). * This means that the in-core inode may be reaped by prune_icache * without having to perform any I/O. This is a very good thing, * because *any* task may call prune_icache - even ones which * have a transaction open against a different journal. * * Is this cheating? Not really. Sure, we haven't written the * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. */ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) { struct ext4_iloc iloc; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); static unsigned int mnt_count; int err, ret; might_sleep(); trace_ext4_mark_inode_dirty(inode, _RET_IP_); err = ext4_reserve_inode_write(handle, inode, &iloc); if (ext4_handle_valid(handle) && EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { /* * We need extra buffer credits since we may write into EA block * with this same handle. If journal_extend fails, then it will * only result in a minor loss of functionality for that inode. * If this is felt to be critical, then e2fsck should be run to * force a large enough s_min_extra_isize. */ if ((jbd2_journal_extend(handle, EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { ret = ext4_expand_extra_isize(inode, sbi->s_want_extra_isize, iloc, handle); if (ret) { ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND); if (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count)) { ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete" " some EAs or run e2fsck.", inode->i_ino); mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count); } } } } if (!err) err = ext4_mark_iloc_dirty(handle, inode, &iloc); return err; } /* * ext4_dirty_inode() is called from __mark_inode_dirty() * * We're really interested in the case where a file is being extended. * i_size has been changed by generic_commit_write() and we thus need * to include the updated inode in the current transaction. * * Also, dquot_alloc_block() will always dirty the inode when blocks * are allocated to the file. * * If the inode is marked synchronous, we don't honour that here - doing * so would cause a commit on atime updates, which we don't bother doing. * We handle synchronous inodes at the highest possible level. * * If only the I_DIRTY_TIME flag is set, we can skip everything. If * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need * to copy into the on-disk inode structure are the timestamp files. */ void ext4_dirty_inode(struct inode *inode, int flags) { handle_t *handle; if (flags == I_DIRTY_TIME) return; handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) goto out; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return; } #if 0 /* * Bind an inode's backing buffer_head into this transaction, to prevent * it from being flushed to disk early. Unlike * ext4_reserve_inode_write, this leaves behind no bh reference and * returns no iloc structure, so the caller needs to repeat the iloc * lookup to mark the inode dirty later. */ static int ext4_pin_inode(handle_t *handle, struct inode *inode) { struct ext4_iloc iloc; int err = 0; if (handle) { err = ext4_get_inode_loc(inode, &iloc); if (!err) { BUFFER_TRACE(iloc.bh, "get_write_access"); err = jbd2_journal_get_write_access(handle, iloc.bh); if (!err) err = ext4_handle_dirty_metadata(handle, NULL, iloc.bh); brelse(iloc.bh); } } ext4_std_error(inode->i_sb, err); return err; } #endif int ext4_change_inode_journal_flag(struct inode *inode, int val) { journal_t *journal; handle_t *handle; int err; /* * We have to be very careful here: changing a data block's * journaling status dynamically is dangerous. If we write a * data block to the journal, change the status and then delete * that block, we risk forgetting to revoke the old log record * from the journal and so a subsequent replay can corrupt data. * So, first we make sure that the journal is empty and that * nobody is changing anything. */ journal = EXT4_JOURNAL(inode); if (!journal) return 0; if (is_journal_aborted(journal)) return -EROFS; /* We have to allocate physical blocks for delalloc blocks * before flushing journal. otherwise delalloc blocks can not * be allocated any more. even more truncate on delalloc blocks * could trigger BUG by flushing delalloc blocks in journal. * There is no delalloc block in non-journal data mode. */ if (val && test_opt(inode->i_sb, DELALLOC)) { err = ext4_alloc_da_blocks(inode); if (err < 0) return err; } /* Wait for all existing dio workers */ ext4_inode_block_unlocked_dio(inode); inode_dio_wait(inode); jbd2_journal_lock_updates(journal); /* * OK, there are no updates running now, and all cached data is * synced to disk. We are now in a completely consistent state * which doesn't have anything in the journal, and we know that * no filesystem updates are running, so it is safe to modify * the inode's in-core data-journaling state flag now. */ if (val) ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); else { err = jbd2_journal_flush(journal); if (err < 0) { jbd2_journal_unlock_updates(journal); ext4_inode_resume_unlocked_dio(inode); return err; } ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); } ext4_set_aops(inode); jbd2_journal_unlock_updates(journal); ext4_inode_resume_unlocked_dio(inode); /* Finally we can mark the inode as dirty. */ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext4_mark_inode_dirty(handle, inode); ext4_handle_sync(handle); ext4_journal_stop(handle); ext4_std_error(inode->i_sb, err); return err; } static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) { return !buffer_mapped(bh); } int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; loff_t size; unsigned long len; int ret; struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; handle_t *handle; get_block_t *get_block; int retries = 0; sb_start_pagefault(inode->i_sb); file_update_time(vma->vm_file); down_read(&EXT4_I(inode)->i_mmap_sem); /* Delalloc case is easy... */ if (test_opt(inode->i_sb, DELALLOC) && !ext4_should_journal_data(inode) && !ext4_nonda_switch(inode->i_sb)) { do { ret = block_page_mkwrite(vma, vmf, ext4_da_get_block_prep); } while (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)); goto out_ret; } lock_page(page); size = i_size_read(inode); /* Page got truncated from under us? */ if (page->mapping != mapping || page_offset(page) > size) { unlock_page(page); ret = VM_FAULT_NOPAGE; goto out; } if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; /* * Return if we have all the buffers mapped. This avoids the need to do * journal_start/journal_stop which can block and take a long time */ if (page_has_buffers(page)) { if (!ext4_walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, ext4_bh_unmapped)) { /* Wait so that we don't change page under IO */ wait_for_stable_page(page); ret = VM_FAULT_LOCKED; goto out; } } unlock_page(page); /* OK, we need to fill the hole... */ if (ext4_should_dioread_nolock(inode)) get_block = ext4_get_block_write; else get_block = ext4_get_block; retry_alloc: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_writepage_trans_blocks(inode)); if (IS_ERR(handle)) { ret = VM_FAULT_SIGBUS; goto out; } ret = block_page_mkwrite(vma, vmf, get_block); if (!ret && ext4_should_journal_data(inode)) { if (ext4_walk_page_buffers(handle, page_buffers(page), 0, PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { unlock_page(page); ret = VM_FAULT_SIGBUS; ext4_journal_stop(handle); goto out; } ext4_set_inode_state(inode, EXT4_STATE_JDATA); } ext4_journal_stop(handle); if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_alloc; out_ret: ret = block_page_mkwrite_return(ret); out: up_read(&EXT4_I(inode)->i_mmap_sem); sb_end_pagefault(inode->i_sb); return ret; } int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct inode *inode = file_inode(vma->vm_file); int err; down_read(&EXT4_I(inode)->i_mmap_sem); err = filemap_fault(vma, vmf); up_read(&EXT4_I(inode)->i_mmap_sem); return err; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_1819_3
crossvul-cpp_data_bad_1496_4
/* ssl/ssl_sess.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ /* ==================================================================== * Copyright (c) 1998-2006 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * openssl-core@openssl.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ /* ==================================================================== * Copyright 2005 Nokia. All rights reserved. * * The portions of the attached software ("Contribution") is developed by * Nokia Corporation and is licensed pursuant to the OpenSSL open source * license. * * The Contribution, originally written by Mika Kousa and Pasi Eronen of * Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites * support (see RFC 4279) to OpenSSL. * * No patent licenses or other rights except those expressly stated in * the OpenSSL open source license shall be deemed granted or received * expressly, by implication, estoppel, or otherwise. * * No assurances are provided by Nokia that the Contribution does not * infringe the patent or other intellectual property rights of any third * party or that the license provides you with all the necessary rights * to make use of the Contribution. * * THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN * ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA * SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY * OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR * OTHERWISE. */ #include <stdio.h> #include <openssl/lhash.h> #include <openssl/rand.h> #ifndef OPENSSL_NO_ENGINE # include <openssl/engine.h> #endif #include "ssl_locl.h" static void SSL_SESSION_list_remove(SSL_CTX *ctx, SSL_SESSION *s); static void SSL_SESSION_list_add(SSL_CTX *ctx, SSL_SESSION *s); static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *c, int lck); SSL_SESSION *SSL_get_session(const SSL *ssl) /* aka SSL_get0_session; gets 0 objects, just returns a copy of the pointer */ { return (ssl->session); } SSL_SESSION *SSL_get1_session(SSL *ssl) /* variant of SSL_get_session: caller really gets something */ { SSL_SESSION *sess; /* * Need to lock this all up rather than just use CRYPTO_add so that * somebody doesn't free ssl->session between when we check it's non-null * and when we up the reference count. */ CRYPTO_w_lock(CRYPTO_LOCK_SSL_SESSION); sess = ssl->session; if (sess) sess->references++; CRYPTO_w_unlock(CRYPTO_LOCK_SSL_SESSION); return (sess); } int SSL_SESSION_get_ex_new_index(long argl, void *argp, CRYPTO_EX_new *new_func, CRYPTO_EX_dup *dup_func, CRYPTO_EX_free *free_func) { return CRYPTO_get_ex_new_index(CRYPTO_EX_INDEX_SSL_SESSION, argl, argp, new_func, dup_func, free_func); } int SSL_SESSION_set_ex_data(SSL_SESSION *s, int idx, void *arg) { return (CRYPTO_set_ex_data(&s->ex_data, idx, arg)); } void *SSL_SESSION_get_ex_data(const SSL_SESSION *s, int idx) { return (CRYPTO_get_ex_data(&s->ex_data, idx)); } SSL_SESSION *SSL_SESSION_new(void) { SSL_SESSION *ss; ss = OPENSSL_malloc(sizeof(*ss)); if (ss == NULL) { SSLerr(SSL_F_SSL_SESSION_NEW, ERR_R_MALLOC_FAILURE); return (0); } memset(ss, 0, sizeof(*ss)); ss->verify_result = 1; /* avoid 0 (= X509_V_OK) just in case */ ss->references = 1; ss->timeout = 60 * 5 + 4; /* 5 minute timeout by default */ ss->time = (unsigned long)time(NULL); ss->prev = NULL; ss->next = NULL; ss->compress_meth = 0; ss->tlsext_hostname = NULL; #ifndef OPENSSL_NO_EC ss->tlsext_ecpointformatlist_length = 0; ss->tlsext_ecpointformatlist = NULL; ss->tlsext_ellipticcurvelist_length = 0; ss->tlsext_ellipticcurvelist = NULL; #endif CRYPTO_new_ex_data(CRYPTO_EX_INDEX_SSL_SESSION, ss, &ss->ex_data); #ifndef OPENSSL_NO_PSK ss->psk_identity_hint = NULL; ss->psk_identity = NULL; #endif #ifndef OPENSSL_NO_SRP ss->srp_username = NULL; #endif return (ss); } const unsigned char *SSL_SESSION_get_id(const SSL_SESSION *s, unsigned int *len) { if (len) *len = s->session_id_length; return s->session_id; } unsigned int SSL_SESSION_get_compress_id(const SSL_SESSION *s) { return s->compress_meth; } /* * SSLv3/TLSv1 has 32 bytes (256 bits) of session ID space. As such, filling * the ID with random junk repeatedly until we have no conflict is going to * complete in one iteration pretty much "most" of the time (btw: * understatement). So, if it takes us 10 iterations and we still can't avoid * a conflict - well that's a reasonable point to call it quits. Either the * RAND code is broken or someone is trying to open roughly very close to * 2^256 SSL sessions to our server. How you might store that many sessions * is perhaps a more interesting question ... */ #define MAX_SESS_ID_ATTEMPTS 10 static int def_generate_session_id(const SSL *ssl, unsigned char *id, unsigned int *id_len) { unsigned int retry = 0; do if (RAND_bytes(id, *id_len) <= 0) return 0; while (SSL_has_matching_session_id(ssl, id, *id_len) && (++retry < MAX_SESS_ID_ATTEMPTS)) ; if (retry < MAX_SESS_ID_ATTEMPTS) return 1; /* else - woops a session_id match */ /* * XXX We should also check the external cache -- but the probability of * a collision is negligible, and we could not prevent the concurrent * creation of sessions with identical IDs since we currently don't have * means to atomically check whether a session ID already exists and make * a reservation for it if it does not (this problem applies to the * internal cache as well). */ return 0; } int ssl_get_new_session(SSL *s, int session) { /* This gets used by clients and servers. */ unsigned int tmp; SSL_SESSION *ss = NULL; GEN_SESSION_CB cb = def_generate_session_id; if ((ss = SSL_SESSION_new()) == NULL) return (0); /* If the context has a default timeout, use it */ if (s->session_ctx->session_timeout == 0) ss->timeout = SSL_get_default_timeout(s); else ss->timeout = s->session_ctx->session_timeout; SSL_SESSION_free(s->session); s->session = NULL; if (session) { if (s->version == SSL3_VERSION) { ss->ssl_version = SSL3_VERSION; ss->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; } else if (s->version == TLS1_VERSION) { ss->ssl_version = TLS1_VERSION; ss->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; } else if (s->version == TLS1_1_VERSION) { ss->ssl_version = TLS1_1_VERSION; ss->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; } else if (s->version == TLS1_2_VERSION) { ss->ssl_version = TLS1_2_VERSION; ss->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; } else if (s->version == DTLS1_BAD_VER) { ss->ssl_version = DTLS1_BAD_VER; ss->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; } else if (s->version == DTLS1_VERSION) { ss->ssl_version = DTLS1_VERSION; ss->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; } else if (s->version == DTLS1_2_VERSION) { ss->ssl_version = DTLS1_2_VERSION; ss->session_id_length = SSL3_SSL_SESSION_ID_LENGTH; } else { SSLerr(SSL_F_SSL_GET_NEW_SESSION, SSL_R_UNSUPPORTED_SSL_VERSION); SSL_SESSION_free(ss); return (0); } /*- * If RFC5077 ticket, use empty session ID (as server). * Note that: * (a) ssl_get_prev_session() does lookahead into the * ClientHello extensions to find the session ticket. * When ssl_get_prev_session() fails, s3_srvr.c calls * ssl_get_new_session() in ssl3_get_client_hello(). * At that point, it has not yet parsed the extensions, * however, because of the lookahead, it already knows * whether a ticket is expected or not. * * (b) s3_clnt.c calls ssl_get_new_session() before parsing * ServerHello extensions, and before recording the session * ID received from the server, so this block is a noop. */ if (s->tlsext_ticket_expected) { ss->session_id_length = 0; goto sess_id_done; } /* Choose which callback will set the session ID */ CRYPTO_r_lock(CRYPTO_LOCK_SSL_CTX); if (s->generate_session_id) cb = s->generate_session_id; else if (s->session_ctx->generate_session_id) cb = s->session_ctx->generate_session_id; CRYPTO_r_unlock(CRYPTO_LOCK_SSL_CTX); /* Choose a session ID */ tmp = ss->session_id_length; if (!cb(s, ss->session_id, &tmp)) { /* The callback failed */ SSLerr(SSL_F_SSL_GET_NEW_SESSION, SSL_R_SSL_SESSION_ID_CALLBACK_FAILED); SSL_SESSION_free(ss); return (0); } /* * Don't allow the callback to set the session length to zero. nor * set it higher than it was. */ if (!tmp || (tmp > ss->session_id_length)) { /* The callback set an illegal length */ SSLerr(SSL_F_SSL_GET_NEW_SESSION, SSL_R_SSL_SESSION_ID_HAS_BAD_LENGTH); SSL_SESSION_free(ss); return (0); } ss->session_id_length = tmp; /* Finally, check for a conflict */ if (SSL_has_matching_session_id(s, ss->session_id, ss->session_id_length)) { SSLerr(SSL_F_SSL_GET_NEW_SESSION, SSL_R_SSL_SESSION_ID_CONFLICT); SSL_SESSION_free(ss); return (0); } sess_id_done: if (s->tlsext_hostname) { ss->tlsext_hostname = BUF_strdup(s->tlsext_hostname); if (ss->tlsext_hostname == NULL) { SSLerr(SSL_F_SSL_GET_NEW_SESSION, ERR_R_INTERNAL_ERROR); SSL_SESSION_free(ss); return 0; } } } else { ss->session_id_length = 0; } if (s->sid_ctx_length > sizeof ss->sid_ctx) { SSLerr(SSL_F_SSL_GET_NEW_SESSION, ERR_R_INTERNAL_ERROR); SSL_SESSION_free(ss); return 0; } memcpy(ss->sid_ctx, s->sid_ctx, s->sid_ctx_length); ss->sid_ctx_length = s->sid_ctx_length; s->session = ss; ss->ssl_version = s->version; ss->verify_result = X509_V_OK; return (1); } /*- * ssl_get_prev attempts to find an SSL_SESSION to be used to resume this * connection. It is only called by servers. * * session_id: points at the session ID in the ClientHello. This code will * read past the end of this in order to parse out the session ticket * extension, if any. * len: the length of the session ID. * limit: a pointer to the first byte after the ClientHello. * * Returns: * -1: error * 0: a session may have been found. * * Side effects: * - If a session is found then s->session is pointed at it (after freeing an * existing session if need be) and s->verify_result is set from the session. * - Both for new and resumed sessions, s->tlsext_ticket_expected is set to 1 * if the server should issue a new session ticket (to 0 otherwise). */ int ssl_get_prev_session(SSL *s, unsigned char *session_id, int len, const unsigned char *limit) { /* This is used only by servers. */ SSL_SESSION *ret = NULL; int fatal = 0; int try_session_cache = 1; int r; if (len < 0 || len > SSL_MAX_SSL_SESSION_ID_LENGTH) goto err; if (session_id + len > limit) { fatal = 1; goto err; } if (len == 0) try_session_cache = 0; /* sets s->tlsext_ticket_expected */ r = tls1_process_ticket(s, session_id, len, limit, &ret); switch (r) { case -1: /* Error during processing */ fatal = 1; goto err; case 0: /* No ticket found */ case 1: /* Zero length ticket found */ break; /* Ok to carry on processing session id. */ case 2: /* Ticket found but not decrypted. */ case 3: /* Ticket decrypted, *ret has been set. */ try_session_cache = 0; break; default: abort(); } if (try_session_cache && ret == NULL && !(s->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) { SSL_SESSION data; data.ssl_version = s->version; data.session_id_length = len; if (len == 0) return 0; memcpy(data.session_id, session_id, len); CRYPTO_r_lock(CRYPTO_LOCK_SSL_CTX); ret = lh_SSL_SESSION_retrieve(s->session_ctx->sessions, &data); if (ret != NULL) { /* don't allow other threads to steal it: */ CRYPTO_add(&ret->references, 1, CRYPTO_LOCK_SSL_SESSION); } CRYPTO_r_unlock(CRYPTO_LOCK_SSL_CTX); if (ret == NULL) s->session_ctx->stats.sess_miss++; } if (try_session_cache && ret == NULL && s->session_ctx->get_session_cb != NULL) { int copy = 1; if ((ret = s->session_ctx->get_session_cb(s, session_id, len, &copy))) { s->session_ctx->stats.sess_cb_hit++; /* * Increment reference count now if the session callback asks us * to do so (note that if the session structures returned by the * callback are shared between threads, it must handle the * reference count itself [i.e. copy == 0], or things won't be * thread-safe). */ if (copy) CRYPTO_add(&ret->references, 1, CRYPTO_LOCK_SSL_SESSION); /* * Add the externally cached session to the internal cache as * well if and only if we are supposed to. */ if (! (s->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { /* * The following should not return 1, otherwise, things are * very strange */ if (SSL_CTX_add_session(s->session_ctx, ret)) goto err; } } } if (ret == NULL) goto err; /* Now ret is non-NULL and we own one of its reference counts. */ if (ret->sid_ctx_length != s->sid_ctx_length || memcmp(ret->sid_ctx, s->sid_ctx, ret->sid_ctx_length)) { /* * We have the session requested by the client, but we don't want to * use it in this context. */ goto err; /* treat like cache miss */ } if ((s->verify_mode & SSL_VERIFY_PEER) && s->sid_ctx_length == 0) { /* * We can't be sure if this session is being used out of context, * which is especially important for SSL_VERIFY_PEER. The application * should have used SSL[_CTX]_set_session_id_context. For this error * case, we generate an error instead of treating the event like a * cache miss (otherwise it would be easy for applications to * effectively disable the session cache by accident without anyone * noticing). */ SSLerr(SSL_F_SSL_GET_PREV_SESSION, SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED); fatal = 1; goto err; } if (ret->cipher == NULL) { unsigned char buf[5], *p; unsigned long l; p = buf; l = ret->cipher_id; l2n(l, p); if ((ret->ssl_version >> 8) >= SSL3_VERSION_MAJOR) ret->cipher = ssl_get_cipher_by_char(s, &(buf[2])); else ret->cipher = ssl_get_cipher_by_char(s, &(buf[1])); if (ret->cipher == NULL) goto err; } if (ret->timeout < (long)(time(NULL) - ret->time)) { /* timeout */ s->session_ctx->stats.sess_timeout++; if (try_session_cache) { /* session was from the cache, so remove it */ SSL_CTX_remove_session(s->session_ctx, ret); } goto err; } s->session_ctx->stats.sess_hit++; SSL_SESSION_free(s->session); s->session = ret; s->verify_result = s->session->verify_result; return 1; err: if (ret != NULL) { SSL_SESSION_free(ret); if (!try_session_cache) { /* * The session was from a ticket, so we should issue a ticket for * the new session */ s->tlsext_ticket_expected = 1; } } if (fatal) return -1; else return 0; } int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *c) { int ret = 0; SSL_SESSION *s; /* * add just 1 reference count for the SSL_CTX's session cache even though * it has two ways of access: each session is in a doubly linked list and * an lhash */ CRYPTO_add(&c->references, 1, CRYPTO_LOCK_SSL_SESSION); /* * if session c is in already in cache, we take back the increment later */ CRYPTO_w_lock(CRYPTO_LOCK_SSL_CTX); s = lh_SSL_SESSION_insert(ctx->sessions, c); /* * s != NULL iff we already had a session with the given PID. In this * case, s == c should hold (then we did not really modify * ctx->sessions), or we're in trouble. */ if (s != NULL && s != c) { /* We *are* in trouble ... */ SSL_SESSION_list_remove(ctx, s); SSL_SESSION_free(s); /* * ... so pretend the other session did not exist in cache (we cannot * handle two SSL_SESSION structures with identical session ID in the * same cache, which could happen e.g. when two threads concurrently * obtain the same session from an external cache) */ s = NULL; } /* Put at the head of the queue unless it is already in the cache */ if (s == NULL) SSL_SESSION_list_add(ctx, c); if (s != NULL) { /* * existing cache entry -- decrement previously incremented reference * count because it already takes into account the cache */ SSL_SESSION_free(s); /* s == c */ ret = 0; } else { /* * new cache entry -- remove old ones if cache has become too large */ ret = 1; if (SSL_CTX_sess_get_cache_size(ctx) > 0) { while (SSL_CTX_sess_number(ctx) > SSL_CTX_sess_get_cache_size(ctx)) { if (!remove_session_lock(ctx, ctx->session_cache_tail, 0)) break; else ctx->stats.sess_cache_full++; } } } CRYPTO_w_unlock(CRYPTO_LOCK_SSL_CTX); return (ret); } int SSL_CTX_remove_session(SSL_CTX *ctx, SSL_SESSION *c) { return remove_session_lock(ctx, c, 1); } static int remove_session_lock(SSL_CTX *ctx, SSL_SESSION *c, int lck) { SSL_SESSION *r; int ret = 0; if ((c != NULL) && (c->session_id_length != 0)) { if (lck) CRYPTO_w_lock(CRYPTO_LOCK_SSL_CTX); if ((r = lh_SSL_SESSION_retrieve(ctx->sessions, c)) == c) { ret = 1; r = lh_SSL_SESSION_delete(ctx->sessions, c); SSL_SESSION_list_remove(ctx, c); } if (lck) CRYPTO_w_unlock(CRYPTO_LOCK_SSL_CTX); if (ret) { r->not_resumable = 1; if (ctx->remove_session_cb != NULL) ctx->remove_session_cb(ctx, r); SSL_SESSION_free(r); } } else ret = 0; return (ret); } void SSL_SESSION_free(SSL_SESSION *ss) { int i; if (ss == NULL) return; i = CRYPTO_add(&ss->references, -1, CRYPTO_LOCK_SSL_SESSION); #ifdef REF_PRINT REF_PRINT("SSL_SESSION", ss); #endif if (i > 0) return; #ifdef REF_CHECK if (i < 0) { fprintf(stderr, "SSL_SESSION_free, bad reference count\n"); abort(); /* ok */ } #endif CRYPTO_free_ex_data(CRYPTO_EX_INDEX_SSL_SESSION, ss, &ss->ex_data); OPENSSL_cleanse(ss->master_key, sizeof ss->master_key); OPENSSL_cleanse(ss->session_id, sizeof ss->session_id); ssl_sess_cert_free(ss->sess_cert); X509_free(ss->peer); sk_SSL_CIPHER_free(ss->ciphers); OPENSSL_free(ss->tlsext_hostname); OPENSSL_free(ss->tlsext_tick); #ifndef OPENSSL_NO_EC ss->tlsext_ecpointformatlist_length = 0; OPENSSL_free(ss->tlsext_ecpointformatlist); ss->tlsext_ellipticcurvelist_length = 0; OPENSSL_free(ss->tlsext_ellipticcurvelist); #endif /* OPENSSL_NO_EC */ #ifndef OPENSSL_NO_PSK OPENSSL_free(ss->psk_identity_hint); OPENSSL_free(ss->psk_identity); #endif #ifndef OPENSSL_NO_SRP OPENSSL_free(ss->srp_username); #endif OPENSSL_clear_free(ss, sizeof(*ss)); } int SSL_set_session(SSL *s, SSL_SESSION *session) { int ret = 0; const SSL_METHOD *meth; if (session != NULL) { meth = s->ctx->method->get_ssl_method(session->ssl_version); if (meth == NULL) meth = s->method->get_ssl_method(session->ssl_version); if (meth == NULL) { SSLerr(SSL_F_SSL_SET_SESSION, SSL_R_UNABLE_TO_FIND_SSL_METHOD); return (0); } if (meth != s->method) { if (!SSL_set_ssl_method(s, meth)) return (0); } /* CRYPTO_w_lock(CRYPTO_LOCK_SSL); */ CRYPTO_add(&session->references, 1, CRYPTO_LOCK_SSL_SESSION); SSL_SESSION_free(s->session); s->session = session; s->verify_result = s->session->verify_result; /* CRYPTO_w_unlock(CRYPTO_LOCK_SSL); */ ret = 1; } else { SSL_SESSION_free(s->session); s->session = NULL; meth = s->ctx->method; if (meth != s->method) { if (!SSL_set_ssl_method(s, meth)) return (0); } ret = 1; } return (ret); } long SSL_SESSION_set_timeout(SSL_SESSION *s, long t) { if (s == NULL) return (0); s->timeout = t; return (1); } long SSL_SESSION_get_timeout(const SSL_SESSION *s) { if (s == NULL) return (0); return (s->timeout); } long SSL_SESSION_get_time(const SSL_SESSION *s) { if (s == NULL) return (0); return (s->time); } long SSL_SESSION_set_time(SSL_SESSION *s, long t) { if (s == NULL) return (0); s->time = t; return (t); } int SSL_SESSION_has_ticket(const SSL_SESSION *s) { return (s->tlsext_ticklen > 0) ? 1 : 0; } unsigned long SSL_SESSION_get_ticket_lifetime_hint(const SSL_SESSION *s) { return s->tlsext_tick_lifetime_hint; } void SSL_SESSION_get0_ticket(const SSL_SESSION *s, unsigned char **tick, size_t *len) { *len = s->tlsext_ticklen; if (tick != NULL) *tick = s->tlsext_tick; } X509 *SSL_SESSION_get0_peer(SSL_SESSION *s) { return s->peer; } int SSL_SESSION_set1_id_context(SSL_SESSION *s, const unsigned char *sid_ctx, unsigned int sid_ctx_len) { if (sid_ctx_len > SSL_MAX_SID_CTX_LENGTH) { SSLerr(SSL_F_SSL_SESSION_SET1_ID_CONTEXT, SSL_R_SSL_SESSION_ID_CONTEXT_TOO_LONG); return 0; } s->sid_ctx_length = sid_ctx_len; memcpy(s->sid_ctx, sid_ctx, sid_ctx_len); return 1; } long SSL_CTX_set_timeout(SSL_CTX *s, long t) { long l; if (s == NULL) return (0); l = s->session_timeout; s->session_timeout = t; return (l); } long SSL_CTX_get_timeout(const SSL_CTX *s) { if (s == NULL) return (0); return (s->session_timeout); } int SSL_set_session_secret_cb(SSL *s, int (*tls_session_secret_cb) (SSL *s, void *secret, int *secret_len, STACK_OF(SSL_CIPHER) *peer_ciphers, SSL_CIPHER **cipher, void *arg), void *arg) { if (s == NULL) return (0); s->tls_session_secret_cb = tls_session_secret_cb; s->tls_session_secret_cb_arg = arg; return (1); } int SSL_set_session_ticket_ext_cb(SSL *s, tls_session_ticket_ext_cb_fn cb, void *arg) { if (s == NULL) return (0); s->tls_session_ticket_ext_cb = cb; s->tls_session_ticket_ext_cb_arg = arg; return (1); } int SSL_set_session_ticket_ext(SSL *s, void *ext_data, int ext_len) { if (s->version >= TLS1_VERSION) { OPENSSL_free(s->tlsext_session_ticket); s->tlsext_session_ticket = NULL; s->tlsext_session_ticket = OPENSSL_malloc(sizeof(TLS_SESSION_TICKET_EXT) + ext_len); if (!s->tlsext_session_ticket) { SSLerr(SSL_F_SSL_SET_SESSION_TICKET_EXT, ERR_R_MALLOC_FAILURE); return 0; } if (ext_data) { s->tlsext_session_ticket->length = ext_len; s->tlsext_session_ticket->data = s->tlsext_session_ticket + 1; memcpy(s->tlsext_session_ticket->data, ext_data, ext_len); } else { s->tlsext_session_ticket->length = 0; s->tlsext_session_ticket->data = NULL; } return 1; } return 0; } typedef struct timeout_param_st { SSL_CTX *ctx; long time; LHASH_OF(SSL_SESSION) *cache; } TIMEOUT_PARAM; static void timeout_doall_arg(SSL_SESSION *s, TIMEOUT_PARAM *p) { if ((p->time == 0) || (p->time > (s->time + s->timeout))) { /* timeout */ /* * The reason we don't call SSL_CTX_remove_session() is to save on * locking overhead */ (void)lh_SSL_SESSION_delete(p->cache, s); SSL_SESSION_list_remove(p->ctx, s); s->not_resumable = 1; if (p->ctx->remove_session_cb != NULL) p->ctx->remove_session_cb(p->ctx, s); SSL_SESSION_free(s); } } static IMPLEMENT_LHASH_DOALL_ARG_FN(timeout, SSL_SESSION, TIMEOUT_PARAM) void SSL_CTX_flush_sessions(SSL_CTX *s, long t) { unsigned long i; TIMEOUT_PARAM tp; tp.ctx = s; tp.cache = s->sessions; if (tp.cache == NULL) return; tp.time = t; CRYPTO_w_lock(CRYPTO_LOCK_SSL_CTX); i = CHECKED_LHASH_OF(SSL_SESSION, tp.cache)->down_load; CHECKED_LHASH_OF(SSL_SESSION, tp.cache)->down_load = 0; lh_SSL_SESSION_doall_arg(tp.cache, LHASH_DOALL_ARG_FN(timeout), TIMEOUT_PARAM, &tp); CHECKED_LHASH_OF(SSL_SESSION, tp.cache)->down_load = i; CRYPTO_w_unlock(CRYPTO_LOCK_SSL_CTX); } int ssl_clear_bad_session(SSL *s) { if ((s->session != NULL) && !(s->shutdown & SSL_SENT_SHUTDOWN) && !(SSL_in_init(s) || SSL_in_before(s))) { SSL_CTX_remove_session(s->ctx, s->session); return (1); } else return (0); } /* locked by SSL_CTX in the calling function */ static void SSL_SESSION_list_remove(SSL_CTX *ctx, SSL_SESSION *s) { if ((s->next == NULL) || (s->prev == NULL)) return; if (s->next == (SSL_SESSION *)&(ctx->session_cache_tail)) { /* last element in list */ if (s->prev == (SSL_SESSION *)&(ctx->session_cache_head)) { /* only one element in list */ ctx->session_cache_head = NULL; ctx->session_cache_tail = NULL; } else { ctx->session_cache_tail = s->prev; s->prev->next = (SSL_SESSION *)&(ctx->session_cache_tail); } } else { if (s->prev == (SSL_SESSION *)&(ctx->session_cache_head)) { /* first element in list */ ctx->session_cache_head = s->next; s->next->prev = (SSL_SESSION *)&(ctx->session_cache_head); } else { /* middle of list */ s->next->prev = s->prev; s->prev->next = s->next; } } s->prev = s->next = NULL; } static void SSL_SESSION_list_add(SSL_CTX *ctx, SSL_SESSION *s) { if ((s->next != NULL) && (s->prev != NULL)) SSL_SESSION_list_remove(ctx, s); if (ctx->session_cache_head == NULL) { ctx->session_cache_head = s; ctx->session_cache_tail = s; s->prev = (SSL_SESSION *)&(ctx->session_cache_head); s->next = (SSL_SESSION *)&(ctx->session_cache_tail); } else { s->next = ctx->session_cache_head; s->next->prev = s; s->prev = (SSL_SESSION *)&(ctx->session_cache_head); ctx->session_cache_head = s; } } void SSL_CTX_sess_set_new_cb(SSL_CTX *ctx, int (*cb) (struct ssl_st *ssl, SSL_SESSION *sess)) { ctx->new_session_cb = cb; } int (*SSL_CTX_sess_get_new_cb(SSL_CTX *ctx)) (SSL *ssl, SSL_SESSION *sess) { return ctx->new_session_cb; } void SSL_CTX_sess_set_remove_cb(SSL_CTX *ctx, void (*cb) (SSL_CTX *ctx, SSL_SESSION *sess)) { ctx->remove_session_cb = cb; } void (*SSL_CTX_sess_get_remove_cb(SSL_CTX *ctx)) (SSL_CTX *ctx, SSL_SESSION *sess) { return ctx->remove_session_cb; } void SSL_CTX_sess_set_get_cb(SSL_CTX *ctx, SSL_SESSION *(*cb) (struct ssl_st *ssl, unsigned char *data, int len, int *copy)) { ctx->get_session_cb = cb; } SSL_SESSION *(*SSL_CTX_sess_get_get_cb(SSL_CTX *ctx)) (SSL *ssl, unsigned char *data, int len, int *copy) { return ctx->get_session_cb; } void SSL_CTX_set_info_callback(SSL_CTX *ctx, void (*cb) (const SSL *ssl, int type, int val)) { ctx->info_callback = cb; } void (*SSL_CTX_get_info_callback(SSL_CTX *ctx)) (const SSL *ssl, int type, int val) { return ctx->info_callback; } void SSL_CTX_set_client_cert_cb(SSL_CTX *ctx, int (*cb) (SSL *ssl, X509 **x509, EVP_PKEY **pkey)) { ctx->client_cert_cb = cb; } int (*SSL_CTX_get_client_cert_cb(SSL_CTX *ctx)) (SSL *ssl, X509 **x509, EVP_PKEY **pkey) { return ctx->client_cert_cb; } #ifndef OPENSSL_NO_ENGINE int SSL_CTX_set_client_cert_engine(SSL_CTX *ctx, ENGINE *e) { if (!ENGINE_init(e)) { SSLerr(SSL_F_SSL_CTX_SET_CLIENT_CERT_ENGINE, ERR_R_ENGINE_LIB); return 0; } if (!ENGINE_get_ssl_client_cert_function(e)) { SSLerr(SSL_F_SSL_CTX_SET_CLIENT_CERT_ENGINE, SSL_R_NO_CLIENT_CERT_METHOD); ENGINE_finish(e); return 0; } ctx->client_cert_engine = e; return 1; } #endif void SSL_CTX_set_cookie_generate_cb(SSL_CTX *ctx, int (*cb) (SSL *ssl, unsigned char *cookie, unsigned int *cookie_len)) { ctx->app_gen_cookie_cb = cb; } void SSL_CTX_set_cookie_verify_cb(SSL_CTX *ctx, int (*cb) (SSL *ssl, unsigned char *cookie, unsigned int cookie_len)) { ctx->app_verify_cookie_cb = cb; } IMPLEMENT_PEM_rw(SSL_SESSION, SSL_SESSION, PEM_STRING_SSL_SESSION, SSL_SESSION)
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1496_4
crossvul-cpp_data_good_1450_0
/****************************************************************************** * emulate.c * * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. * * Copyright (c) 2005 Keir Fraser * * Linux coding style, mod r/m decoder, segment base fixes, real-mode * privileged instructions: * * Copyright (C) 2006 Qumranet * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 */ #include <linux/kvm_host.h> #include "kvm_cache_regs.h" #include <linux/module.h> #include <asm/kvm_emulate.h> #include <linux/stringify.h> #include "x86.h" #include "tss.h" /* * Operand types */ #define OpNone 0ull #define OpImplicit 1ull /* No generic decode */ #define OpReg 2ull /* Register */ #define OpMem 3ull /* Memory */ #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ #define OpDI 5ull /* ES:DI/EDI/RDI */ #define OpMem64 6ull /* Memory, 64-bit */ #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ #define OpDX 8ull /* DX register */ #define OpCL 9ull /* CL register (for shifts) */ #define OpImmByte 10ull /* 8-bit sign extended immediate */ #define OpOne 11ull /* Implied 1 */ #define OpImm 12ull /* Sign extended up to 32-bit immediate */ #define OpMem16 13ull /* Memory operand (16-bit). */ #define OpMem32 14ull /* Memory operand (32-bit). */ #define OpImmU 15ull /* Immediate operand, zero extended */ #define OpSI 16ull /* SI/ESI/RSI */ #define OpImmFAddr 17ull /* Immediate far address */ #define OpMemFAddr 18ull /* Far address in memory */ #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ #define OpES 20ull /* ES */ #define OpCS 21ull /* CS */ #define OpSS 22ull /* SS */ #define OpDS 23ull /* DS */ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) /* * Opcode effective-address decode tables. * Note that we only emulate instructions that have at least one memory * operand (excluding implicit stack references). We assume that stack * references and instruction fetches will never occur in special memory * areas that require emulation. So, for example, 'mov <imm>,<reg>' need * not be handled. */ /* Operand sizes: 8-bit operands or specified/overridden size. */ #define ByteOp (1<<0) /* 8-bit operands. */ /* Destination operand type. */ #define DstShift 1 #define ImplicitOps (OpImplicit << DstShift) #define DstReg (OpReg << DstShift) #define DstMem (OpMem << DstShift) #define DstAcc (OpAcc << DstShift) #define DstDI (OpDI << DstShift) #define DstMem64 (OpMem64 << DstShift) #define DstImmUByte (OpImmUByte << DstShift) #define DstDX (OpDX << DstShift) #define DstAccLo (OpAccLo << DstShift) #define DstMask (OpMask << DstShift) /* Source operand type. */ #define SrcShift 6 #define SrcNone (OpNone << SrcShift) #define SrcReg (OpReg << SrcShift) #define SrcMem (OpMem << SrcShift) #define SrcMem16 (OpMem16 << SrcShift) #define SrcMem32 (OpMem32 << SrcShift) #define SrcImm (OpImm << SrcShift) #define SrcImmByte (OpImmByte << SrcShift) #define SrcOne (OpOne << SrcShift) #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) #define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) #define SrcImm64 (OpImm64 << SrcShift) #define SrcDX (OpDX << SrcShift) #define SrcMem8 (OpMem8 << SrcShift) #define SrcAccHi (OpAccHi << SrcShift) #define SrcMask (OpMask << SrcShift) #define BitOp (1<<11) #define MemAbs (1<<12) /* Memory operand is absolute displacement */ #define String (1<<13) /* String instruction (rep capable) */ #define Stack (1<<14) /* Stack instruction (push/pop) */ #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ #define Escape (5<<15) /* Escape to coprocessor instruction */ #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */ #define Sse (1<<18) /* SSE Vector instruction */ /* Generic ModRM decode. */ #define ModRM (1<<19) /* Destination is only written; never read. */ #define Mov (1<<20) /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ #define Undefined (1<<25) /* No Such Instruction */ #define Lock (1<<26) /* lock prefix is allowed for the instruction */ #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) #define PageTable (1 << 29) /* instruction used to write page table */ #define NotImpl (1 << 30) /* instruction is not implemented */ /* Source 2 operand type */ #define Src2Shift (31) #define Src2None (OpNone << Src2Shift) #define Src2Mem (OpMem << Src2Shift) #define Src2CL (OpCL << Src2Shift) #define Src2ImmByte (OpImmByte << Src2Shift) #define Src2One (OpOne << Src2Shift) #define Src2Imm (OpImm << Src2Shift) #define Src2ES (OpES << Src2Shift) #define Src2CS (OpCS << Src2Shift) #define Src2SS (OpSS << Src2Shift) #define Src2DS (OpDS << Src2Shift) #define Src2FS (OpFS << Src2Shift) #define Src2GS (OpGS << Src2Shift) #define Src2Mask (OpMask << Src2Shift) #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ #define NoWrite ((u64)1 << 45) /* No writeback */ #define SrcWrite ((u64)1 << 46) /* Write back src operand */ #define NoMod ((u64)1 << 47) /* Mod field is ignored */ #define Intercept ((u64)1 << 48) /* Has valid intercept field */ #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ #define NoBigReal ((u64)1 << 50) /* No big real mode */ #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ #define NearBranch ((u64)1 << 52) /* Near branches */ #define No16 ((u64)1 << 53) /* No 16 bit operand */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) #define X2(x...) x, x #define X3(x...) X2(x), x #define X4(x...) X2(x), X2(x) #define X5(x...) X4(x), x #define X6(x...) X4(x), X2(x) #define X7(x...) X4(x), X3(x) #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) #define NR_FASTOP (ilog2(sizeof(ulong)) + 1) #define FASTOP_SIZE 8 /* * fastop functions have a special calling convention: * * dst: rax (in/out) * src: rdx (in/out) * src2: rcx (in) * flags: rflags (in/out) * ex: rsi (in:fastop pointer, out:zero if exception) * * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for * different operand sizes can be reached by calculation, rather than a jump * table (which would be bigger than the code). * * fastop functions are declared as taking a never-defined fastop parameter, * so they can't be called from C directly. */ struct fastop; struct opcode { u64 flags : 56; u64 intercept : 8; union { int (*execute)(struct x86_emulate_ctxt *ctxt); const struct opcode *group; const struct group_dual *gdual; const struct gprefix *gprefix; const struct escape *esc; const struct instr_dual *idual; void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; struct group_dual { struct opcode mod012[8]; struct opcode mod3[8]; }; struct gprefix { struct opcode pfx_no; struct opcode pfx_66; struct opcode pfx_f2; struct opcode pfx_f3; }; struct escape { struct opcode op[8]; struct opcode high[64]; }; struct instr_dual { struct opcode mod012; struct opcode mod3; }; /* EFLAGS bit definitions. */ #define EFLG_ID (1<<21) #define EFLG_VIP (1<<20) #define EFLG_VIF (1<<19) #define EFLG_AC (1<<18) #define EFLG_VM (1<<17) #define EFLG_RF (1<<16) #define EFLG_IOPL (3<<12) #define EFLG_NT (1<<14) #define EFLG_OF (1<<11) #define EFLG_DF (1<<10) #define EFLG_IF (1<<9) #define EFLG_TF (1<<8) #define EFLG_SF (1<<7) #define EFLG_ZF (1<<6) #define EFLG_AF (1<<4) #define EFLG_PF (1<<2) #define EFLG_CF (1<<0) #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a #define EFLG_RESERVED_ONE_MASK 2 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) { if (!(ctxt->regs_valid & (1 << nr))) { ctxt->regs_valid |= 1 << nr; ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); } return ctxt->_regs[nr]; } static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) { ctxt->regs_valid |= 1 << nr; ctxt->regs_dirty |= 1 << nr; return &ctxt->_regs[nr]; } static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) { reg_read(ctxt, nr); return reg_write(ctxt, nr); } static void writeback_registers(struct x86_emulate_ctxt *ctxt) { unsigned reg; for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); } static void invalidate_registers(struct x86_emulate_ctxt *ctxt) { ctxt->regs_dirty = 0; ctxt->regs_valid = 0; } /* * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) #ifdef CONFIG_X86_64 #define ON64(x) x #else #define ON64(x) #endif static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t" #define FOP_RET "ret \n\t" #define FOP_START(op) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ FOP_ALIGN \ "em_" #op ": \n\t" #define FOP_END \ ".popsection") #define FOPNOP() FOP_ALIGN FOP_RET #define FOP1E(op, dst) \ FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET #define FOP1EEX(op, dst) \ FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) #define FASTOP1(op) \ FOP_START(op) \ FOP1E(op##b, al) \ FOP1E(op##w, ax) \ FOP1E(op##l, eax) \ ON64(FOP1E(op##q, rax)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m) */ #define FASTOP1SRC2(op, name) \ FOP_START(name) \ FOP1E(op, cl) \ FOP1E(op, cx) \ FOP1E(op, ecx) \ ON64(FOP1E(op, rcx)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ #define FASTOP1SRC2EX(op, name) \ FOP_START(name) \ FOP1EEX(op, cl) \ FOP1EEX(op, cx) \ FOP1EEX(op, ecx) \ ON64(FOP1EEX(op, rcx)) \ FOP_END #define FOP2E(op, dst, src) \ FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET #define FASTOP2(op) \ FOP_START(op) \ FOP2E(op##b, al, dl) \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, word only */ #define FASTOP2W(op) \ FOP_START(op) \ FOPNOP() \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, src is CL */ #define FASTOP2CL(op) \ FOP_START(op) \ FOP2E(op##b, al, cl) \ FOP2E(op##w, ax, cl) \ FOP2E(op##l, eax, cl) \ ON64(FOP2E(op##q, rax, cl)) \ FOP_END /* 2 operand, src and dest are reversed */ #define FASTOP2R(op, name) \ FOP_START(name) \ FOP2E(op##b, dl, al) \ FOP2E(op##w, dx, ax) \ FOP2E(op##l, edx, eax) \ ON64(FOP2E(op##q, rdx, rax)) \ FOP_END #define FOP3E(op, dst, src, src2) \ FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET /* 3-operand, word-only, src2=cl */ #define FASTOP3WCL(op) \ FOP_START(op) \ FOPNOP() \ FOP3E(op##w, ax, dx, cl) \ FOP3E(op##l, eax, edx, cl) \ ON64(FOP3E(op##q, rax, rdx, cl)) \ FOP_END /* Special case for SETcc - 1 instruction per cc */ #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t" asm(".global kvm_fastop_exception \n" "kvm_fastop_exception: xor %esi, %esi; ret"); FOP_START(setcc) FOP_SETCC(seto) FOP_SETCC(setno) FOP_SETCC(setc) FOP_SETCC(setnc) FOP_SETCC(setz) FOP_SETCC(setnz) FOP_SETCC(setbe) FOP_SETCC(setnbe) FOP_SETCC(sets) FOP_SETCC(setns) FOP_SETCC(setp) FOP_SETCC(setnp) FOP_SETCC(setl) FOP_SETCC(setnl) FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET FOP_END; static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage) { struct x86_instruction_info info = { .intercept = intercept, .rep_prefix = ctxt->rep_prefix, .modrm_mod = ctxt->modrm_mod, .modrm_reg = ctxt->modrm_reg, .modrm_rm = ctxt->modrm_rm, .src_val = ctxt->src.val64, .dst_val = ctxt->dst.val64, .src_bytes = ctxt->src.bytes, .dst_bytes = ctxt->dst.bytes, .ad_bytes = ctxt->ad_bytes, .next_rip = ctxt->eip, }; return ctxt->ops->intercept(ctxt, &info, stage); } static void assign_masked(ulong *dest, ulong src, ulong mask) { *dest = (*dest & ~mask) | (src & mask); } static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; } static ulong stack_mask(struct x86_emulate_ctxt *ctxt) { u16 sel; struct desc_struct ss; if (ctxt->mode == X86EMUL_MODE_PROT64) return ~0UL; ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ } static int stack_size(struct x86_emulate_ctxt *ctxt) { return (__fls(stack_mask(ctxt)) + 1) >> 3; } /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) { if (ctxt->ad_bytes == sizeof(unsigned long)) return reg; else return reg & ad_mask(ctxt); } static inline unsigned long register_address(struct x86_emulate_ctxt *ctxt, int reg) { return address_mask(ctxt, reg_read(ctxt, reg)); } static void masked_increment(ulong *reg, ulong mask, int inc) { assign_masked(reg, *reg + inc, mask); } static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) { ulong mask; if (ctxt->ad_bytes == sizeof(unsigned long)) mask = ~0UL; else mask = ad_mask(ctxt); masked_increment(reg_rmw(ctxt, reg), mask, inc); } static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) { masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); return desc->g ? (limit << 12) | 0xfff : limit; } static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) { if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) return 0; return ctxt->ops->get_cached_segment_base(ctxt, seg); } static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid) { WARN_ON(vec > 0x1f); ctxt->exception.vector = vec; ctxt->exception.error_code = error; ctxt->exception.error_code_valid = valid; return X86EMUL_PROPAGATE_FAULT; } static int emulate_db(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DB_VECTOR, 0, false); } static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, GP_VECTOR, err, true); } static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, SS_VECTOR, err, true); } static int emulate_ud(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, UD_VECTOR, 0, false); } static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, TS_VECTOR, err, true); } static int emulate_de(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DE_VECTOR, 0, false); } static int emulate_nm(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, NM_VECTOR, 0, false); } static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); return selector; } static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg) { u16 dummy; u32 base3; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); } /* * x86 defines three classes of vector instructions: explicitly * aligned, explicitly unaligned, and the rest, which change behaviour * depending on whether they're AVX encoded or not. * * Also included is CMPXCHG16B which is not a vector instruction, yet it is * subject to the same check. */ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) { if (likely(size < 16)) return false; if (ctxt->d & Aligned) return true; else if (ctxt->d & Unaligned) return false; else if (ctxt->d & Avx) return false; else return true; } static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, bool write, bool fetch, enum x86emul_mode mode, ulong *linear) { struct desc_struct desc; bool usable; ulong la; u32 lim; u16 sel; la = seg_base(ctxt, addr.seg) + addr.ea; *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: if (is_noncanonical_address(la)) goto bad; *max_size = min_t(u64, ~0u, (1ull << 48) - la); if (size > *max_size) goto bad; break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, addr.seg); if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && write) goto bad; /* unreadable code segment */ if (!fetch && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { /* expand-down segment */ if (addr.ea <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; } if (addr.ea > lim) goto bad; *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); if (size > *max_size) goto bad; la &= (u32)-1; break; } if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) return emulate_gp(ctxt, 0); *linear = la; return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) return emulate_ss(ctxt, 0); else return emulate_gp(ctxt, 0); } static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear) { unsigned max_size; return __linearize(ctxt, addr, &max_size, size, write, false, ctxt->mode, linear); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, enum x86emul_mode mode) { ulong linear; int rc; unsigned max_size; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = dst }; if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; } static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { return assign_eip(ctxt, dst, ctxt->mode); } static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, const struct desc_struct *cs_desc) { enum x86emul_mode mode = ctxt->mode; #ifdef CONFIG_X86_64 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) mode = X86EMUL_MODE_PROT64; } #endif if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32) mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; return assign_eip(ctxt, dst, mode); } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { return assign_eip_near(ctxt, ctxt->_eip + rel); } static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); } /* * Prefetch the remaining bytes of the instruction without crossing page * boundary if they are not in fetch_cache yet. */ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; unsigned size, max_size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; /* * We do not know exactly how many bytes will be needed, and * __linearize is expensive, so fetch as much as possible. We * just have to avoid going beyond the 15 byte limit, the end * of the segment, or the end of the page. * * __linearize is called with size 0 so that it does not do any * boundary check itself. Instead, we use max_size to check * against op_size. */ rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; size = min_t(unsigned, 15UL ^ cur_size, max_size); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* * One instruction can only straddle two pages, * and one has been loaded at the beginning of * x86_decode_insn. So, if not enough bytes * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) return emulate_gp(ctxt, 0); rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; ctxt->fetch.end += size; return X86EMUL_CONTINUE; } static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size) { unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; if (unlikely(done_size < size)) return __do_insn_fetch_bytes(ctxt, size - done_size); else return X86EMUL_CONTINUE; } /* Fetch next part of the instruction being emulated. */ #define insn_fetch(_type, _ctxt) \ ({ _type _x; \ \ rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += sizeof(_type); \ _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ ctxt->fetch.ptr += sizeof(_type); \ _x; \ }) #define insn_fetch_arr(_arr, _size, _ctxt) \ ({ \ rc = do_insn_fetch_bytes(_ctxt, _size); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += (_size); \ memcpy(_arr, ctxt->fetch.ptr, _size); \ ctxt->fetch.ptr += (_size); \ }) /* * Given the 'reg' portion of a ModRM byte, and a register block, return a * pointer into the block that addresses the relevant register. * @highbyte_regs specifies whether to decode AH,CH,DH,BH. */ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop) { void *p; int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; else p = reg_rmw(ctxt, modrm_reg); return p; } static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; if (op_bytes == 2) op_bytes = 3; *address = 0; rc = segmented_read_std(ctxt, addr, size, 2); if (rc != X86EMUL_CONTINUE) return rc; addr.ea += 2; rc = segmented_read_std(ctxt, addr, address, op_bytes); return rc; } FASTOP2(add); FASTOP2(or); FASTOP2(adc); FASTOP2(sbb); FASTOP2(and); FASTOP2(sub); FASTOP2(xor); FASTOP2(cmp); FASTOP2(test); FASTOP1SRC2(mul, mul_ex); FASTOP1SRC2(imul, imul_ex); FASTOP1SRC2EX(div, div_ex); FASTOP1SRC2EX(idiv, idiv_ex); FASTOP3WCL(shld); FASTOP3WCL(shrd); FASTOP2W(imul); FASTOP1(not); FASTOP1(neg); FASTOP1(inc); FASTOP1(dec); FASTOP2CL(rol); FASTOP2CL(ror); FASTOP2CL(rcl); FASTOP2CL(rcr); FASTOP2CL(shl); FASTOP2CL(shr); FASTOP2CL(sar); FASTOP2W(bsf); FASTOP2W(bsr); FASTOP2W(bt); FASTOP2W(bts); FASTOP2W(btr); FASTOP2W(btc); FASTOP2(xadd); FASTOP2R(cmp, cmp_r); static u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; asm("push %[flags]; popf; call *%[fastop]" : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags)); return rc; } static void fetch_register_operand(struct operand *op) { switch (op->bytes) { case 1: op->val = *(u8 *)op->addr.reg; break; case 2: op->val = *(u16 *)op->addr.reg; break; case 4: op->val = *(u32 *)op->addr.reg; break; case 8: op->val = *(u64 *)op->addr.reg; break; } } static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break; case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break; case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break; case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break; case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break; case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break; case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break; case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break; case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break; case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break; case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break; case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break; case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break; case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break; case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break; case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break; case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break; case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break; #ifdef CONFIG_X86_64 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break; case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break; case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break; case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break; case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break; case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break; case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break; case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break; #endif default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; case 2: asm("movq %%mm2, %0" : "=m"(*data)); break; case 3: asm("movq %%mm3, %0" : "=m"(*data)); break; case 4: asm("movq %%mm4, %0" : "=m"(*data)); break; case 5: asm("movq %%mm5, %0" : "=m"(*data)); break; case 6: asm("movq %%mm6, %0" : "=m"(*data)); break; case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) { ctxt->ops->get_fpu(ctxt); switch (reg) { case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; case 2: asm("movq %0, %%mm2" : : "m"(*data)); break; case 3: asm("movq %0, %%mm3" : : "m"(*data)); break; case 4: asm("movq %0, %%mm4" : : "m"(*data)); break; case 5: asm("movq %0, %%mm5" : : "m"(*data)); break; case 6: asm("movq %0, %%mm6" : : "m"(*data)); break; case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; default: BUG(); } ctxt->ops->put_fpu(ctxt); } static int em_fninit(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fninit"); ctxt->ops->put_fpu(ctxt); return X86EMUL_CONTINUE; } static int em_fnstcw(struct x86_emulate_ctxt *ctxt) { u16 fcw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstcw %0": "+m"(fcw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fcw; return X86EMUL_CONTINUE; } static int em_fnstsw(struct x86_emulate_ctxt *ctxt) { u16 fsw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); ctxt->ops->get_fpu(ctxt); asm volatile("fnstsw %0": "+m"(fsw)); ctxt->ops->put_fpu(ctxt); /* force 2 byte destination */ ctxt->dst.bytes = 2; ctxt->dst.val = fsw; return X86EMUL_CONTINUE; } static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { unsigned reg = ctxt->modrm_reg; if (!(ctxt->d & ModRM)) reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = reg; read_sse_reg(ctxt, &op->vec_val, reg); return; } if (ctxt->d & Mmx) { reg &= 7; op->type = OP_MM; op->bytes = 8; op->addr.mm = reg; return; } op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); fetch_register_operand(op); op->orig_val = op->val; } static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) { if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) ctxt->modrm_seg = VCPU_SREG_SS; } static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op) { u8 sib; int index_reg, base_reg, scale; int rc = X86EMUL_CONTINUE; ulong modrm_ea = 0; ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = ctxt->modrm_rm; read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); return rc; } if (ctxt->d & Mmx) { op->type = OP_MM; op->bytes = 8; op->addr.mm = ctxt->modrm_rm & 7; return rc; } fetch_register_operand(op); return rc; } op->type = OP_MEM; if (ctxt->ad_bytes == 2) { unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); unsigned si = reg_read(ctxt, VCPU_REGS_RSI); unsigned di = reg_read(ctxt, VCPU_REGS_RDI); /* 16-bit ModR/M decode. */ switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 6) modrm_ea += insn_fetch(u16, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(u16, ctxt); break; } switch (ctxt->modrm_rm) { case 0: modrm_ea += bx + si; break; case 1: modrm_ea += bx + di; break; case 2: modrm_ea += bp + si; break; case 3: modrm_ea += bp + di; break; case 4: modrm_ea += si; break; case 5: modrm_ea += di; break; case 6: if (ctxt->modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) ctxt->modrm_seg = VCPU_SREG_SS; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ if ((ctxt->modrm_rm & 7) == 4) { sib = insn_fetch(u8, ctxt); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) modrm_ea += insn_fetch(s32, ctxt); else { modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } if (index_reg != 4) modrm_ea += reg_read(ctxt, index_reg) << scale; } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { modrm_ea += insn_fetch(s32, ctxt); if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->rip_relative = 1; } else { base_reg = ctxt->modrm_rm; modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } switch (ctxt->modrm_mod) { case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(s32, ctxt); break; } } op->addr.mem.ea = modrm_ea; if (ctxt->ad_bytes != 8) ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; done: return rc; } static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op) { int rc = X86EMUL_CONTINUE; op->type = OP_MEM; switch (ctxt->ad_bytes) { case 2: op->addr.mem.ea = insn_fetch(u16, ctxt); break; case 4: op->addr.mem.ea = insn_fetch(u32, ctxt); break; case 8: op->addr.mem.ea = insn_fetch(u64, ctxt); break; } done: return rc; } static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) { long sv = 0, mask; if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { mask = ~((long)ctxt->dst.bytes * 8 - 1); if (ctxt->src.bytes == 2) sv = (s16)ctxt->src.val & (s16)mask; else if (ctxt->src.bytes == 4) sv = (s32)ctxt->src.val & (s32)mask; else sv = (s64)ctxt->src.val & (s64)mask; ctxt->dst.addr.mem.ea = address_mask(ctxt, ctxt->dst.addr.mem.ea + (sv >> 3)); } /* only subword offset */ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; } static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size) { int rc; struct read_cache *mc = &ctxt->mem_read; if (mc->pos < mc->end) goto read_cached; WARN_ON((mc->end + size) >= sizeof(mc->data)); rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += size; read_cached: memcpy(dest, mc->data + mc->pos, size); mc->pos += size; return X86EMUL_CONTINUE; } static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return read_emulated(ctxt, linear, data, size); } static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest) { struct read_cache *rc = &ctxt->io_read; if (rc->pos == rc->end) { /* refill pio read ahead */ unsigned int in_page, n; unsigned int count = ctxt->rep_prefix ? address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; in_page = (ctxt->eflags & EFLG_DF) ? offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); if (n == 0) n = 1; rc->pos = rc->end = 0; if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) return 0; rc->end = n * size; } if (ctxt->rep_prefix && (ctxt->d & String) && !(ctxt->eflags & EFLG_DF)) { ctxt->dst.data = rc->data + rc->pos; ctxt->dst.type = OP_MEM_STR; ctxt->dst.count = (rc->end - rc->pos) / size; rc->pos = rc->end; } else { memcpy(dest, rc->data + rc->pos, size); rc->pos += size; } return 1; } static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc) { struct desc_ptr dt; ulong addr; ctxt->ops->get_idt(ctxt, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 base3 = 0; if (selector & 1 << 2) { struct desc_struct desc; u16 sel; memset (dt, 0, sizeof *dt); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ dt->address = get_desc_base(&desc) | ((u64)base3 << 32); } else ops->get_gdt(ctxt, dt); } /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); *desc_addr_p = addr = dt.address + index * 8; return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* allowed just for 8 bytes segments */ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); } /* Does not support long mode */ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, bool in_task_switch, struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) /* for NULL selector skip all following checks */ goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) goto exception; if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or segment selector's RPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and (both RPL and CPL > DPL)) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { /* mark segment as accessed */ seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (is_noncanonical_address(get_desc_base(&seg_desc) | ((u64)base3 << 32))) return emulate_gp(ctxt, 0); } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); if (desc) *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); } static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { u8 cpl = ctxt->ops->cpl(ctxt); return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); } static void write_register_operand(struct operand *op) { /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ switch (op->bytes) { case 1: *(u8 *)op->addr.reg = (u8)op->val; break; case 2: *(u16 *)op->addr.reg = (u16)op->val; break; case 4: *op->addr.reg = (u32)op->val; break; /* 64b: zero-extend */ case 8: *op->addr.reg = op->val; break; } } static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) { switch (op->type) { case OP_REG: write_register_operand(op); break; case OP_MEM: if (ctxt->lock_prefix) return segmented_cmpxchg(ctxt, op->addr.mem, &op->orig_val, &op->val, op->bytes); else return segmented_write(ctxt, op->addr.mem, &op->val, op->bytes); break; case OP_MEM_STR: return segmented_write(ctxt, op->addr.mem, op->data, op->bytes * op->count); break; case OP_XMM: write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); break; case OP_MM: write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); break; case OP_NONE: /* no writeback */ break; default: break; } return X86EMUL_CONTINUE; } static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; rsp_increment(ctxt, -bytes); addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, bytes); } static int em_push(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; return push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; struct segmented_address addr; addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, len); return rc; } static int em_pop(struct x86_emulate_ctxt *ctxt) { return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; unsigned long val, change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) return rc; change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; switch(ctxt->mode) { case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT16: if (cpl == 0) change_mask |= EFLG_IOPL; if (cpl <= iopl) change_mask |= EFLG_IF; break; case X86EMUL_MODE_VM86: if (iopl < 3) return emulate_gp(ctxt, 0); change_mask |= EFLG_IF; break; default: /* real mode */ change_mask |= (EFLG_IOPL | EFLG_IF); break; } *(unsigned long *)dest = (ctxt->eflags & ~change_mask) | (val & change_mask); return rc; } static int em_popf(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = &ctxt->eflags; ctxt->dst.bytes = ctxt->op_bytes; return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int em_enter(struct x86_emulate_ctxt *ctxt) { int rc; unsigned frame_size = ctxt->src.val; unsigned nesting_level = ctxt->src2.val & 31; ulong rbp; if (nesting_level) return X86EMUL_UNHANDLEABLE; rbp = reg_read(ctxt, VCPU_REGS_RBP); rc = push(ctxt, &rbp, stack_size(ctxt)); if (rc != X86EMUL_CONTINUE) return rc; assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), stack_mask(ctxt)); assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RSP) - frame_size, stack_mask(ctxt)); return X86EMUL_CONTINUE; } static int em_leave(struct x86_emulate_ctxt *ctxt) { assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), stack_mask(ctxt)); return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); } static int em_push_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; ctxt->src.val = get_segment_selector(ctxt, seg); if (ctxt->op_bytes == 4) { rsp_increment(ctxt, -2); ctxt->op_bytes = 2; } return em_push(ctxt); } static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned long selector; int rc; rc = emulate_pop(ctxt, &selector, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; rc = load_segment_descriptor(ctxt, (u16)selector, seg); return rc; } static int em_pusha(struct x86_emulate_ctxt *ctxt) { unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RAX; while (reg <= VCPU_REGS_RDI) { (reg == VCPU_REGS_RSP) ? (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ++reg; } return rc; } static int em_pushf(struct x86_emulate_ctxt *ctxt) { ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM; return em_push(ctxt); } static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { rsp_increment(ctxt, ctxt->op_bytes); --reg; } rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; --reg; } return rc; } static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { const struct x86_emulate_ops *ops = ctxt->ops; int rc; struct desc_ptr dt; gva_t cs_addr; gva_t eip_addr; u16 cs, eip; /* TODO: Add limit checks */ ctxt->src.val = ctxt->eflags; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = ctxt->_eip; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ops->get_idt(ctxt, &dt); eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = eip; return rc; } int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { int rc; invalidate_registers(ctxt); rc = __emulate_int_real(ctxt, irq); if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return rc; } static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return __emulate_int_real(ctxt, irq); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* Protected mode interrupts unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; unsigned long temp_eip = 0; unsigned long temp_eflags = 0; unsigned long cs = 0; unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; /* TODO: Add stack limit check */ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (temp_eip & ~0xffff) return emulate_gp(ctxt, 0); rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = temp_eip; if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); else if (ctxt->op_bytes == 2) { ctxt->eflags &= ~0xffff; ctxt->eflags |= temp_eflags; } ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ ctxt->eflags |= EFLG_RESERVED_ONE_MASK; return rc; } static int em_iret(struct x86_emulate_ctxt *ctxt) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return emulate_iret_real(ctxt); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* iret from protected mode unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel, old_sel; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; u8 cpl = ctxt->ops->cpl(ctxt); /* Assignment of RIP may only fail in 64-bit mode */ if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_sel, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; } return rc; } static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) { return assign_eip_near(ctxt, ctxt->src.val); } static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) { int rc; long int old_eip; old_eip = ctxt->_eip; rc = assign_eip_near(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = old_eip; rc = em_push(ctxt); return rc; } static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) { u64 old = ctxt->dst.orig_val64; if (ctxt->dst.bytes == 16) return X86EMUL_UNHANDLEABLE; if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | (u32) reg_read(ctxt, VCPU_REGS_RBX); ctxt->eflags |= EFLG_ZF; } return X86EMUL_CONTINUE; } static int em_ret(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; } static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) { int rc; rc = em_ret_far(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) { /* Save real source value, then compare EAX against destination. */ ctxt->dst.orig_val = ctxt->dst.val; ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); ctxt->src.orig_val = ctxt->src.val; ctxt->src.val = ctxt->dst.orig_val; fastop(ctxt, em_cmp); if (ctxt->eflags & EFLG_ZF) { /* Success: write back to memory. */ ctxt->dst.val = ctxt->src.orig_val; } else { /* Failure: write the value we saw to EAX. */ ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt->dst.val = ctxt->dst.orig_val; } return X86EMUL_CONTINUE; } static int em_lseg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned short sel; int rc; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, seg); if (rc != X86EMUL_CONTINUE) return rc; ctxt->dst.val = ctxt->src.val; return rc; } static void setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *cs, struct desc_struct *ss) { cs->l = 0; /* will be adjusted later */ set_desc_base(cs, 0); /* flat segment */ cs->g = 1; /* 4kb granularity */ set_desc_limit(cs, 0xfffff); /* 4GB limit */ cs->type = 0x0b; /* Read, Execute, Accessed */ cs->s = 1; cs->dpl = 0; /* will be adjusted later */ cs->p = 1; cs->d = 1; cs->avl = 0; set_desc_base(ss, 0); /* flat segment */ set_desc_limit(ss, 0xfffff); /* 4GB limit */ ss->g = 1; /* 4kb granularity */ ss->s = 1; ss->type = 0x03; /* Read/Write, Accessed */ ss->d = 1; /* 32bit stack segment */ ss->dpl = 0; ss->p = 1; ss->l = 0; ss->avl = 0; } static bool vendor_intel(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; } static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 eax, ebx, ecx, edx; /* * syscall should always be enabled in longmode - so only become * vendor specific (cpuid) if other modes are active... */ if (ctxt->mode == X86EMUL_MODE_PROT64) return true; eax = 0x00000000; ecx = 0x00000000; ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); /* * Intel ("GenuineIntel") * remark: Intel CPUs only support "syscall" in 64bit * longmode. Also an 64bit guest with a * 32bit compat-app running will #UD !! While this * behaviour can be fixed (by emulating) into AMD * response - CPUs of AMD can't behave like Intel. */ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) return false; /* AMD ("AuthenticAMD") */ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) return true; /* AMD ("AMDisbetter!") */ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) return true; /* default: (not Intel, not AMD), apply Intel's stricter rules... */ return false; } static int em_syscall(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); if (!(em_syscall_is_enabled(ctxt))) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, &cs, &ss); if (!(efer & EFER_SCE)) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); ss_sel = (u16)(msr_data + 8); if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~msr_data; ctxt->eflags |= EFLG_RESERVED_ONE_MASK; #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(EFLG_VM | EFLG_IF); } return X86EMUL_CONTINUE; } static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* sysenter/sysexit have not been tested in 64bit mode. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : (u32)msr_data; return X86EMUL_CONTINUE; } static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; rcx = reg_read(ctxt, VCPU_REGS_RCX); rdx = reg_read(ctxt, VCPU_REGS_RDX); cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); rcx = (u32)rcx; rdx = (u32)rdx; break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; if (is_noncanonical_address(rcx) || is_noncanonical_address(rdx)) return emulate_gp(ctxt, 0); break; } cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) { int iopl; if (ctxt->mode == X86EMUL_MODE_REAL) return false; if (ctxt->mode == X86EMUL_MODE_VM86) return true; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; return ctxt->ops->cpl(ctxt) > iopl; } static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct tr_seg; u32 base3; int r; u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; unsigned mask = (1 << len) - 1; unsigned long base; ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); if (!tr_seg.p) return false; if (desc_limit_scaled(&tr_seg) < 103) return false; base = get_desc_base(&tr_seg); #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) return false; return true; } static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { if (ctxt->perm_ok) return true; if (emulator_bad_iopl(ctxt)) if (!emulator_io_port_access_allowed(ctxt, port, len)) return false; ctxt->perm_ok = true; return true; } static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { tss->ip = ctxt->_eip; tss->flag = ctxt->eflags; tss->ax = reg_read(ctxt, VCPU_REGS_RAX); tss->cx = reg_read(ctxt, VCPU_REGS_RCX); tss->dx = reg_read(ctxt, VCPU_REGS_RDX); tss->bx = reg_read(ctxt, VCPU_REGS_RBX); tss->sp = reg_read(ctxt, VCPU_REGS_RSP); tss->bp = reg_read(ctxt, VCPU_REGS_RBP); tss->si = reg_read(ctxt, VCPU_REGS_RSI); tss->di = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); } static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; /* * SDM says that segment selectors are loaded before segment * descriptors */ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } return load_state_from_tss16(ctxt, &tss_seg); } static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { /* CR3 and ldt selector are not saved intentionally */ tss->eip = ctxt->_eip; tss->eflags = ctxt->eflags; tss->eax = reg_read(ctxt, VCPU_REGS_RAX); tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); tss->edx = reg_read(ctxt, VCPU_REGS_RDX); tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); tss->esp = reg_read(ctxt, VCPU_REGS_RSP); tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); tss->esi = reg_read(ctxt, VCPU_REGS_RSI); tss->edi = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); } static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; /* General purpose registers */ *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; /* * SDM says that segment selectors are loaded before segment * descriptors. This is important because CPL checks will * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. */ if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 tss_selector, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { const struct x86_emulate_ops *ops = ctxt->ops; struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = ops->write_std(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof tss_seg.prev_task_link, &ctxt->exception); if (ret != X86EMUL_CONTINUE) /* FIXME: need to provide precise fault address */ return ret; } return load_state_from_tss32(ctxt, &tss_seg); } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct curr_tss_desc, next_tss_desc; int ret; u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); ulong old_tss_base = ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); u32 desc_limit; ulong desc_addr; /* FIXME: old_tss_base == ~0 ? */ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; /* FIXME: check that next_tss_desc is tss */ /* * Check privileges. The three cases are task switch caused by... * * 1. jmp/call/int to task gate: Check against DPL of the task gate * 2. Exception/IRQ/iret: No check is performed * 3. jmp/call to TSS/task-gate: No check is performed since the * hardware checks it before exiting. */ if (reason == TASK_SWITCH_GATE) { if (idt_index != -1) { /* Software interrupts */ struct desc_struct task_gate_desc; int dpl; ret = read_interrupt_descriptor(ctxt, idt_index, &task_gate_desc); if (ret != X86EMUL_CONTINUE) return ret; dpl = task_gate_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, (idt_index << 3) | 0x2); } } desc_limit = desc_limit_scaled(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { return emulate_ts(ctxt, tss_selector & 0xfffc); } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); } if (reason == TASK_SWITCH_IRET) ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; /* set back link to prev task only if NT bit is set in eflags note that old_tss_sel is not used after this point */ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) old_tss_sel = 0xffff; if (next_tss_desc.type & 8) ret = task_switch_32(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); else ret = task_switch_16(ctxt, tss_selector, old_tss_sel, old_tss_base, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; if (reason != TASK_SWITCH_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); } ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); if (has_error_code) { ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; ctxt->lock_prefix = 0; ctxt->src.val = (unsigned long) error_code; ret = em_push(ctxt); } return ret; } int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { int rc; invalidate_registers(ctxt); ctxt->_eip = ctxt->eip; ctxt->dst.type = OP_NONE; rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) { ctxt->eip = ctxt->_eip; writeback_registers(ctxt); } return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op) { int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; register_address_increment(ctxt, reg, df * op->bytes); op->addr.mem.ea = register_address(ctxt, reg); } static int em_das(struct x86_emulate_ctxt *ctxt) { u8 al, old_al; bool af, cf, old_cf; cf = ctxt->eflags & X86_EFLAGS_CF; al = ctxt->dst.val; old_al = al; old_cf = cf; cf = false; af = ctxt->eflags & X86_EFLAGS_AF; if ((al & 0x0f) > 9 || af) { al -= 6; cf = old_cf | (al >= 250); af = true; } else { af = false; } if (old_al > 0x99 || old_cf) { al -= 0x60; cf = true; } ctxt->dst.val = al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; if (af) ctxt->eflags |= X86_EFLAGS_AF; return X86EMUL_CONTINUE; } static int em_aam(struct x86_emulate_ctxt *ctxt) { u8 al, ah; if (ctxt->src.val == 0) return emulate_de(ctxt); al = ctxt->dst.val & 0xff; ah = al / ctxt->src.val; al %= ctxt->src.val; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; u8 ah = (ctxt->dst.val >> 8) & 0xff; al = (al + (ah * ctxt->src.val)) & 0xff; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_call(struct x86_emulate_ctxt *ctxt) { int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; rc = jmp_rel(ctxt, rel); if (rc != X86EMUL_CONTINUE) return rc; return em_push(ctxt); } static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; int cpl = ctxt->ops->cpl(ctxt); old_eip = ctxt->_eip; ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, &new_desc); if (rc != X86EMUL_CONTINUE) return X86EMUL_CONTINUE; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_eip; rc = em_push(ctxt); /* If we failed, we tainted the memory, but the very least we should restore cs */ if (rc != X86EMUL_CONTINUE) goto fail; return rc; fail: ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); return rc; } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ ctxt->src.val = ctxt->dst.val; write_register_operand(&ctxt->src); /* Write back the memory destination with implicit LOCK prefix. */ ctxt->dst.val = ctxt->src.orig_val; ctxt->lock_prefix = 1; return X86EMUL_CONTINUE; } static int em_imul_3op(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = ctxt->src2.val; return fastop(ctxt, em_imul); } static int em_cwd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.bytes = ctxt->src.bytes; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); return X86EMUL_CONTINUE; } static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 tsc = 0; ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; return X86EMUL_CONTINUE; } static int em_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 pmc; if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; return X86EMUL_CONTINUE; } static int em_mov(struct x86_emulate_ctxt *ctxt) { memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); return X86EMUL_CONTINUE; } #define FFL(x) bit(X86_FEATURE_##x) static int em_movbe(struct x86_emulate_ctxt *ctxt) { u32 ebx, ecx, edx, eax = 1; u16 tmp; /* * Check MOVBE is set in the guest-visible CPUID leaf. */ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); if (!(ecx & FFL(MOVBE))) return emulate_ud(ctxt); switch (ctxt->op_bytes) { case 2: /* * From MOVBE definition: "...When the operand size is 16 bits, * the upper word of the destination register remains unchanged * ..." * * Both casting ->valptr and ->val to u16 breaks strict aliasing * rules so we have to do the operation almost per hand. */ tmp = (u16)ctxt->src.val; ctxt->dst.val &= ~0xffffUL; ctxt->dst.val |= (unsigned long)swab16(tmp); break; case 4: ctxt->dst.val = swab32((u32)ctxt->src.val); break; case 8: ctxt->dst.val = swab64(ctxt->src.val); break; default: BUG(); } return X86EMUL_CONTINUE; } static int em_cr_write(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_dr_write(struct x86_emulate_ctxt *ctxt) { unsigned long val; if (ctxt->mode == X86EMUL_MODE_PROT64) val = ctxt->src.val & ~0ULL; else val = ctxt->src.val & ~0U; /* #UD condition is already handled. */ if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_wrmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int em_rdmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_data; if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; return X86EMUL_CONTINUE; } static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) { if (ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; return X86EMUL_CONTINUE; } static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } static int em_lldt(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } static int em_ltr(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); } static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_clts(struct x86_emulate_ctxt *ctxt) { ulong cr0; cr0 = ctxt->ops->get_cr(ctxt, 0); cr0 &= ~X86_CR0_TS; ctxt->ops->set_cr(ctxt, 0, cr0); return X86EMUL_CONTINUE; } static int em_vmcall(struct x86_emulate_ctxt *ctxt) { int rc = ctxt->ops->fix_hypercall(ctxt); if (rc != X86EMUL_CONTINUE) return rc; /* Let the processor re-execute the fixed hypercall */ ctxt->_eip = ctxt->eip; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void (*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr)) { struct desc_ptr desc_ptr; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; get(ctxt, &desc_ptr); if (ctxt->op_bytes == 2) { ctxt->op_bytes = 4; desc_ptr.address &= 0x00ffffff; } /* Disable writeback. */ ctxt->dst.type = OP_NONE; return segmented_write(ctxt, ctxt->dst.addr.mem, &desc_ptr, 2 + ctxt->op_bytes); } static int em_sgdt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); } static int em_sidt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); } static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->mode == X86EMUL_MODE_PROT64 && is_noncanonical_address(desc_ptr.address)) return emulate_gp(ctxt, 0); if (lgdt) ctxt->ops->set_gdt(ctxt, &desc_ptr); else ctxt->ops->set_idt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_lgdt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, true); } static int em_vmmcall(struct x86_emulate_ctxt *ctxt) { int rc; rc = ctxt->ops->fix_hypercall(ctxt); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return rc; } static int em_lidt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, false); } static int em_smsw(struct x86_emulate_ctxt *ctxt) { if (ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); return X86EMUL_CONTINUE; } static int em_lmsw(struct x86_emulate_ctxt *ctxt) { ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | (ctxt->src.val & 0x0f)); ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_loop(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; register_address_increment(ctxt, VCPU_REGS_RCX, -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) { if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, &ctxt->dst.val)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } static int em_out(struct x86_emulate_ctxt *ctxt) { ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, &ctxt->src.val, 1); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_cli(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->eflags &= ~X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_sti(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt->eflags |= X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_cpuid(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; eax = reg_read(ctxt, VCPU_REGS_RAX); ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); *reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx; *reg_write(ctxt, VCPU_REGS_RDX) = edx; return X86EMUL_CONTINUE; } static int em_sahf(struct x86_emulate_ctxt *ctxt) { u32 flags; flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt->eflags &= ~0xffUL; ctxt->eflags |= flags | X86_EFLAGS_FIXED; return X86EMUL_CONTINUE; } static int em_lahf(struct x86_emulate_ctxt *ctxt) { *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; return X86EMUL_CONTINUE; } static int em_bswap(struct x86_emulate_ctxt *ctxt) { switch (ctxt->op_bytes) { #ifdef CONFIG_X86_64 case 8: asm("bswap %0" : "+r"(ctxt->dst.val)); break; #endif default: asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); break; } return X86EMUL_CONTINUE; } static int em_clflush(struct x86_emulate_ctxt *ctxt) { /* emulating clflush regardless of cpuid */ return X86EMUL_CONTINUE; } static bool valid_cr(int nr) { switch (nr) { case 0: case 2 ... 4: case 8: return true; default: return false; } } static int check_cr_read(struct x86_emulate_ctxt *ctxt) { if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_cr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int cr = ctxt->modrm_reg; u64 efer = 0; static u64 cr_reserved_bits[] = { 0xffffffff00000000ULL, 0, 0, 0, /* CR3 checked later */ CR4_RESERVED_BITS, 0, 0, 0, CR8_RESERVED_BITS, }; if (!valid_cr(cr)) return emulate_ud(ctxt); if (new_val & cr_reserved_bits[cr]) return emulate_gp(ctxt, 0); switch (cr) { case 0: { u64 cr4; if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) return emulate_gp(ctxt, 0); cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && !(cr4 & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } case 3: { u64 rsvd = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD; if (new_val & rsvd) return emulate_gp(ctxt, 0); break; } case 4: { ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) return emulate_gp(ctxt, 0); break; } } return X86EMUL_CONTINUE; } static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) { unsigned long dr7; ctxt->ops->get_dr(ctxt, 7, &dr7); /* Check if DR7.Global_Enable is set */ return dr7 & (1 << 13); } static int check_dr_read(struct x86_emulate_ctxt *ctxt) { int dr = ctxt->modrm_reg; u64 cr4; if (dr > 7) return emulate_ud(ctxt); cr4 = ctxt->ops->get_cr(ctxt, 4); if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) return emulate_ud(ctxt); if (check_dr7_gd(ctxt)) { ulong dr6; ctxt->ops->get_dr(ctxt, 6, &dr6); dr6 &= ~15; dr6 |= DR6_BD | DR6_RTM; ctxt->ops->set_dr(ctxt, 6, dr6); return emulate_db(ctxt); } return X86EMUL_CONTINUE; } static int check_dr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int dr = ctxt->modrm_reg; if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) return emulate_gp(ctxt, 0); return check_dr_read(ctxt); } static int check_svme(struct x86_emulate_ctxt *ctxt) { u64 efer; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_SVME)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_svme_pa(struct x86_emulate_ctxt *ctxt) { u64 rax = reg_read(ctxt, VCPU_REGS_RAX); /* Valid physical address? */ if (rax & 0xffff000000000000ULL) return emulate_gp(ctxt, 0); return check_svme(ctxt); } static int check_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt->ops->check_pmc(ctxt, rcx)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_in(struct x86_emulate_ctxt *ctxt) { ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_out(struct x86_emulate_ctxt *ctxt) { ctxt->src.bytes = min(ctxt->src.bytes, 4u); if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } #define D(_y) { .flags = (_y) } #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define N D(NotImpl) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) } #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define II(_f, _e, _i) \ { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } #define IIP(_f, _e, _i, _p) \ { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } #define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) static const struct opcode group7_rm0[] = { N, I(SrcNone | Priv | EmulateOnUD, em_vmcall), N, N, N, N, N, N, }; static const struct opcode group7_rm1[] = { DI(SrcNone | Priv, monitor), DI(SrcNone | Priv, mwait), N, N, N, N, N, N, }; static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), DIP(SrcNone | Prot | Priv, clgi, check_svme), DIP(SrcNone | Prot | Priv, skinit, check_svme), DIP(SrcNone | Prot | Priv, invlpga, check_svme), }; static const struct opcode group7_rm7[] = { N, DIP(SrcNone, rdtscp, check_rdtsc), N, N, N, N, N, N, }; static const struct opcode group1[] = { F(Lock, em_add), F(Lock | PageTable, em_or), F(Lock, em_adc), F(Lock, em_sbb), F(Lock | PageTable, em_and), F(Lock, em_sub), F(Lock, em_xor), F(NoWrite, em_cmp), }; static const struct opcode group1A[] = { I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, }; static const struct opcode group2[] = { F(DstMem | ModRM, em_rol), F(DstMem | ModRM, em_ror), F(DstMem | ModRM, em_rcl), F(DstMem | ModRM, em_rcr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_shr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_sar), }; static const struct opcode group3[] = { F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcNone | Lock, em_not), F(DstMem | SrcNone | Lock, em_neg), F(DstXacc | Src2Mem, em_mul_ex), F(DstXacc | Src2Mem, em_imul_ex), F(DstXacc | Src2Mem, em_div_ex), F(DstXacc | Src2Mem, em_idiv_ex), }; static const struct opcode group4[] = { F(ByteOp | DstMem | SrcNone | Lock, em_inc), F(ByteOp | DstMem | SrcNone | Lock, em_dec), N, N, N, N, N, N, }; static const struct opcode group5[] = { F(DstMem | SrcNone | Lock, em_inc), F(DstMem | SrcNone | Lock, em_dec), I(SrcMem | NearBranch, em_call_near_abs), I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), I(SrcMem | NearBranch, em_jmp_abs), I(SrcMemFAddr | ImplicitOps, em_jmp_far), I(SrcMem | Stack, em_push), D(Undefined), }; static const struct opcode group6[] = { DI(Prot | DstMem, sldt), DI(Prot | DstMem, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, }; static const struct group_dual group7 = { { II(Mov | DstMem, em_sgdt, sgdt), II(Mov | DstMem, em_sidt, sidt), II(SrcMem | Priv, em_lgdt, lgdt), II(SrcMem | Priv, em_lidt, lidt), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), }, { EXT(0, group7_rm0), EXT(0, group7_rm1), N, EXT(0, group7_rm3), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), } }; static const struct opcode group8[] = { N, N, N, N, F(DstMem | SrcImmByte | NoWrite, em_bt), F(DstMem | SrcImmByte | Lock | PageTable, em_bts), F(DstMem | SrcImmByte | Lock, em_btr), F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; static const struct group_dual group9 = { { N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, }, { N, N, N, N, N, N, N, N, } }; static const struct opcode group11[] = { I(DstMem | SrcImm | Mov | PageTable, em_mov), X7(D(Undefined)), }; static const struct gprefix pfx_0f_ae_7 = { I(SrcMem | ByteOp, em_clflush), N, N, N, }; static const struct group_dual group15 = { { N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7), }, { N, N, N, N, N, N, N, N, } }; static const struct gprefix pfx_0f_6f_0f_7f = { I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), }; static const struct instr_dual instr_dual_0f_2b = { I(0, em_mov), N }; static const struct gprefix pfx_0f_2b = { ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N, }; static const struct gprefix pfx_0f_28_0f_29 = { I(Aligned, em_mov), I(Aligned, em_mov), N, N, }; static const struct gprefix pfx_0f_e7 = { N, I(Sse, em_mov), N, N, }; static const struct escape escape_d9 = { { N, N, N, N, N, N, N, I(DstMem, em_fnstcw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_db = { { N, N, N, N, N, N, N, N, }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_dd = { { N, N, N, N, N, N, N, I(DstMem, em_fnstsw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct instr_dual instr_dual_0f_c3 = { I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N }; static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ F6ALU(Lock, em_add), I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), /* 0x08 - 0x0F */ F6ALU(Lock | PageTable, em_or), I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), N, /* 0x10 - 0x17 */ F6ALU(Lock, em_adc), I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), /* 0x18 - 0x1F */ F6ALU(Lock, em_sbb), I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), /* 0x20 - 0x27 */ F6ALU(Lock | PageTable, em_and), N, N, /* 0x28 - 0x2F */ F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), /* 0x30 - 0x37 */ F6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), /* 0x50 - 0x57 */ X8(I(SrcReg | Stack, em_push)), /* 0x58 - 0x5F */ X8(I(DstReg | Stack, em_pop)), /* 0x60 - 0x67 */ I(ImplicitOps | Stack | No64, em_pusha), I(ImplicitOps | Stack | No64, em_popa), N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , N, N, N, N, /* 0x68 - 0x6F */ I(SrcImm | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte | NearBranch)), /* 0x80 - 0x87 */ G(ByteOp | DstMem | SrcImm, group1), G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), D(ModRM | SrcMem | NoAccess | DstReg), I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), G(0, group1A), /* 0x90 - 0x97 */ DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), II(ImplicitOps | Stack, em_popf, popf), I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String, em_mov), F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r), /* 0xA8 - 0xAF */ F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm), I(ImplicitOps | NearBranch, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave), I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm), I(ImplicitOps | Stack, em_ret_far), D(ImplicitOps), DI(SrcImmByte, intn), D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), I(DstAcc | SrcImmUByte | No64, em_aam), I(DstAcc | SrcImmUByte | No64, em_aad), F(DstAcc | ByteOp | No64, em_salc), I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ X3(I(SrcImmByte | NearBranch, em_loop)), I(SrcImmByte | NearBranch, em_jcxz), I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), /* 0xE8 - 0xEF */ I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch), I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps | NearBranch), I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), G(ByteOp, group3), G(0, group3), /* 0xF8 - 0xFF */ D(ImplicitOps), D(ImplicitOps), I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), }; static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, N, I(ImplicitOps | EmulateOnUD, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, /* 0x10 - 0x1F */ N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, check_cr_write), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, check_dr_write), N, N, N, N, GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), N, N, N, N, /* 0x30 - 0x3F */ II(ImplicitOps | Priv, em_wrmsr, wrmsr), IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), I(ImplicitOps | EmulateOnUD, em_sysenter), I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ X16(D(DstReg | SrcMem | ModRM)), /* 0x50 - 0x5F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0x60 - 0x6F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x70 - 0x7F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x80 - 0x8F */ X16(D(SrcImm | NearBranch)), /* 0x90 - 0x9F */ X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), II(ImplicitOps, em_cpuid, cpuid), F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), DI(ImplicitOps, rsm), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), N, ID(0, &instr_dual_0f_c3), N, N, N, GD(0, &group9), /* 0xC8 - 0xCF */ X8(I(DstReg, em_bswap)), /* 0xD0 - 0xDF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0xE0 - 0xEF */ N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), N, N, N, N, N, N, N, N, /* 0xF0 - 0xFF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N }; static const struct instr_dual instr_dual_0f_38_f0 = { I(DstReg | SrcMem | Mov, em_movbe), N }; static const struct instr_dual instr_dual_0f_38_f1 = { I(DstMem | SrcReg | Mov, em_movbe), N }; static const struct gprefix three_byte_0f_38_f0 = { ID(0, &instr_dual_0f_38_f0), N, N, N }; static const struct gprefix three_byte_0f_38_f1 = { ID(0, &instr_dual_0f_38_f1), N, N, N }; /* * Insns below are selected by the prefix which indexed by the third opcode * byte. */ static const struct opcode opcode_map_0f_38[256] = { /* 0x00 - 0x7f */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0x80 - 0xef */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0xf0 - 0xf1 */ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0), GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1), /* 0xf2 - 0xff */ N, N, X4(N), X8(N) }; #undef D #undef N #undef G #undef GD #undef I #undef GP #undef EXT #undef D2bv #undef D2bvIP #undef I2bv #undef I2bvIP #undef I6ALU static unsigned imm_size(struct x86_emulate_ctxt *ctxt) { unsigned size; size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; if (size == 8) size = 4; return size; } static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension) { int rc = X86EMUL_CONTINUE; op->type = OP_IMM; op->bytes = size; op->addr.mem.ea = ctxt->_eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: op->val = insn_fetch(s8, ctxt); break; case 2: op->val = insn_fetch(s16, ctxt); break; case 4: op->val = insn_fetch(s32, ctxt); break; case 8: op->val = insn_fetch(s64, ctxt); break; } if (!sign_extension) { switch (op->bytes) { case 1: op->val &= 0xff; break; case 2: op->val &= 0xffff; break; case 4: op->val &= 0xffffffff; break; } } done: return rc; } static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d) { int rc = X86EMUL_CONTINUE; switch (d) { case OpReg: decode_register_operand(ctxt, op); break; case OpImmUByte: rc = decode_imm(ctxt, op, 1, false); break; case OpMem: ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; mem_common: *op = ctxt->memop; ctxt->memopp = op; if (ctxt->d & BitOp) fetch_bit_operand(ctxt); op->orig_val = op->val; break; case OpMem64: ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; goto mem_common; case OpAcc: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccLo: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccHi: if (ctxt->d & ByteOp) { op->type = OP_NONE; break; } op->type = OP_REG; op->bytes = ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); op->orig_val = op->val; break; case OpDI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RDI); op->addr.mem.seg = VCPU_SREG_ES; op->val = 0; op->count = 1; break; case OpDX: op->type = OP_REG; op->bytes = 2; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); break; case OpCL: op->type = OP_IMM; op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; case OpImmByte: rc = decode_imm(ctxt, op, 1, true); break; case OpOne: op->type = OP_IMM; op->bytes = 1; op->val = 1; break; case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; case OpImm64: rc = decode_imm(ctxt, op, ctxt->op_bytes, true); break; case OpMem8: ctxt->memop.bytes = 1; if (ctxt->memop.type == OP_REG) { ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, true); fetch_register_operand(&ctxt->memop); } goto mem_common; case OpMem16: ctxt->memop.bytes = 2; goto mem_common; case OpMem32: ctxt->memop.bytes = 4; goto mem_common; case OpImmU16: rc = decode_imm(ctxt, op, 2, false); break; case OpImmU: rc = decode_imm(ctxt, op, imm_size(ctxt), false); break; case OpSI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RSI); op->addr.mem.seg = ctxt->seg_override; op->val = 0; op->count = 1; break; case OpXLat: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RBX) + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; op->bytes = ctxt->op_bytes + 2; insn_fetch_arr(op->valptr, op->bytes, ctxt); break; case OpMemFAddr: ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: op->type = OP_IMM; op->val = VCPU_SREG_ES; break; case OpCS: op->type = OP_IMM; op->val = VCPU_SREG_CS; break; case OpSS: op->type = OP_IMM; op->val = VCPU_SREG_SS; break; case OpDS: op->type = OP_IMM; op->val = VCPU_SREG_DS; break; case OpFS: op->type = OP_IMM; op->val = VCPU_SREG_FS; break; case OpGS: op->type = OP_IMM; op->val = VCPU_SREG_GS; break; case OpImplicit: /* Special instructions do their own operand decoding. */ default: op->type = OP_NONE; /* Disable writeback. */ break; } done: return rc; } int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: /* FS override */ case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; case InstrDual: if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.idual->mod3; else opcode = opcode.u.idual->mod012; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| No16))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64) { if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) ctxt->op_bytes = 8; else if (ctxt->d & NearBranch) ctxt->op_bytes = 8; } if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if ((ctxt->d & No16) && ctxt->op_bytes == 2) ctxt->op_bytes = 4; if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt->memopp->addr.mem.ea + ctxt->_eip); done: return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) { return ctxt->d & PageTable; } static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) { /* The second termination condition only applies for REPE * and REPNE. Test if the repeat string operation prefix is * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the * corresponding termination condition according to: * - if REPE/REPZ and ZF = 0 then done * - if REPNE/REPNZ and ZF = 1 then done */ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || (ctxt->b == 0xae) || (ctxt->b == 0xaf)) && (((ctxt->rep_prefix == REPE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == 0)) || ((ctxt->rep_prefix == REPNE_PREFIX) && ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) return true; return false; } static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { bool fault = false; ctxt->ops->get_fpu(ctxt); asm volatile("1: fwait \n\t" "2: \n\t" ".pushsection .fixup,\"ax\" \n\t" "3: \n\t" "movb $1, %[fault] \n\t" "jmp 2b \n\t" ".popsection \n\t" _ASM_EXTABLE(1b, 3b) : [fault]"+qm"(fault)); ctxt->ops->put_fpu(ctxt); if (unlikely(fault)) return emulate_exception(ctxt, MF_VECTOR, 0, false); return X86EMUL_CONTINUE; } static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { if (op->type == OP_MM) read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); } static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) { ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), [fastop]"+S"(fop) : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); if (!fop) /* exception is returned in fop variable */ return emulate_de(ctxt); return X86EMUL_CONTINUE; } void init_decode_cache(struct x86_emulate_ctxt *ctxt) { memset(&ctxt->rip_relative, 0, (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.end = 0; } int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; ctxt->mem_read.pos = 0; /* LOCK prefix is allowed only with some instructions */ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; /* * Now that we know the fpu is exception safe, we can fetch * operands from it. */ fetch_possible_mmx_operand(ctxt, &ctxt->src); fetch_possible_mmx_operand(ctxt, &ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Instruction can only be executed in protected mode */ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } /* Do instruction specific permission checks */ if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt->eip = ctxt->_eip; ctxt->eflags &= ~EFLG_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; } ctxt->dst.orig_val = ctxt->dst.val; special_insn: if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= EFLG_RF; else ctxt->eflags &= ~EFLG_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { void (*fop)(struct fastop *) = (void *)ctxt->execute; rc = fastop(ctxt, fop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; ctxt->dst.val = (s32) ctxt->src.val; break; case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: /* cbw/cwde/cdqe */ switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; case 0xcd: /* int n */ rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ if (ctxt->eflags & EFLG_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ ctxt->eflags ^= EFLG_CF; break; case 0xf8: /* clc */ ctxt->eflags &= ~EFLG_CF; break; case 0xf9: /* stc */ ctxt->eflags |= EFLG_CF; break; case 0xfc: /* cld */ ctxt->eflags &= ~EFLG_DF; break; case 0xfd: /* std */ ctxt->eflags |= EFLG_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } /* * restore dst type in case the decoding will be reused * (happens for string instruction ) */ ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, VCPU_REGS_RCX, -count); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; /* skip rip writeback */ } ctxt->eflags &= ~EFLG_RF; } ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { WARN_ON(ctxt->exception.vector > 0x1f); ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->mode != X86EMUL_MODE_PROT64 || ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; } void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) { invalidate_registers(ctxt); } void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) { writeback_registers(ctxt); }
./CrossVul/dataset_final_sorted/CWE-362/c/good_1450_0
crossvul-cpp_data_bad_5222_2
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "mysys_priv.h" #include <my_dir.h> #include <m_string.h> #include "mysys_err.h" #if defined(HAVE_UTIME_H) #include <utime.h> #elif defined(HAVE_SYS_UTIME_H) #include <sys/utime.h> #elif !defined(HPUX10) struct utimbuf { time_t actime; time_t modtime; }; #endif /* Rename with copy stat form old file Copy stats from old file to new file, deletes orginal and changes new file name to old file name if MY_REDEL_MAKE_COPY is given, then the orginal file is renamed to org_name-'current_time'.BAK */ #define REDEL_EXT ".BAK" int my_redel(const char *org_name, const char *tmp_name, myf MyFlags) { int error=1; DBUG_ENTER("my_redel"); DBUG_PRINT("my",("org_name: '%s' tmp_name: '%s' MyFlags: %d", org_name,tmp_name,MyFlags)); if (my_copystat(org_name,tmp_name,MyFlags) < 0) goto end; if (MyFlags & MY_REDEL_MAKE_BACKUP) { char name_buff[FN_REFLEN+20]; char ext[20]; ext[0]='-'; get_date(ext+1,2+4,(time_t) 0); strmov(strend(ext),REDEL_EXT); if (my_rename(org_name, fn_format(name_buff, org_name, "", ext, 2), MyFlags)) goto end; } else if (my_delete_allow_opened(org_name, MyFlags)) goto end; if (my_rename(tmp_name,org_name,MyFlags)) goto end; error=0; end: DBUG_RETURN(error); } /* my_redel */ /* Copy stat from one file to another */ /* Return -1 if can't get stat, 1 if wrong type of file */ int my_copystat(const char *from, const char *to, int MyFlags) { struct stat statbuf; if (stat(from, &statbuf)) { my_errno=errno; if (MyFlags & (MY_FAE+MY_WME)) my_error(EE_STAT, MYF(ME_BELL+ME_WAITTANG),from,errno); return -1; /* Can't get stat on input file */ } if ((statbuf.st_mode & S_IFMT) != S_IFREG) return 1; /* Copy modes */ if (chmod(to, statbuf.st_mode & 07777)) { my_errno= errno; if (MyFlags & (MY_FAE+MY_WME)) my_error(EE_CHANGE_PERMISSIONS, MYF(ME_BELL+ME_WAITTANG), from, errno); return -1; } #if !defined(__WIN__) if (statbuf.st_nlink > 1 && MyFlags & MY_LINK_WARNING) { if (MyFlags & MY_LINK_WARNING) my_error(EE_LINK_WARNING,MYF(ME_BELL+ME_WAITTANG),from,statbuf.st_nlink); } /* Copy ownership */ if (chown(to, statbuf.st_uid, statbuf.st_gid)) { my_errno= errno; if (MyFlags & (MY_FAE+MY_WME)) my_error(EE_CHANGE_OWNERSHIP, MYF(ME_BELL+ME_WAITTANG), from, errno); return -1; } #endif /* !__WIN__ */ if (MyFlags & MY_COPYTIME) { struct utimbuf timep; timep.actime = statbuf.st_atime; timep.modtime = statbuf.st_mtime; (void) utime((char*) to, &timep);/* Update last accessed and modified times */ } return 0; } /* my_copystat */
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5222_2
crossvul-cpp_data_good_836_0
/* * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/module.h> #include <net/tcp.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/addrconf.h> #include "rds.h" #include "tcp.h" /* only for info exporting */ static DEFINE_SPINLOCK(rds_tcp_tc_list_lock); static LIST_HEAD(rds_tcp_tc_list); /* rds_tcp_tc_count counts only IPv4 connections. * rds6_tcp_tc_count counts both IPv4 and IPv6 connections. */ static unsigned int rds_tcp_tc_count; #if IS_ENABLED(CONFIG_IPV6) static unsigned int rds6_tcp_tc_count; #endif /* Track rds_tcp_connection structs so they can be cleaned up */ static DEFINE_SPINLOCK(rds_tcp_conn_lock); static LIST_HEAD(rds_tcp_conn_list); static atomic_t rds_tcp_unloading = ATOMIC_INIT(0); static struct kmem_cache *rds_tcp_conn_slab; static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos); static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF; static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF; static struct ctl_table rds_tcp_sysctl_table[] = { #define RDS_TCP_SNDBUF 0 { .procname = "rds_tcp_sndbuf", /* data is per-net pointer */ .maxlen = sizeof(int), .mode = 0644, .proc_handler = rds_tcp_skbuf_handler, .extra1 = &rds_tcp_min_sndbuf, }, #define RDS_TCP_RCVBUF 1 { .procname = "rds_tcp_rcvbuf", /* data is per-net pointer */ .maxlen = sizeof(int), .mode = 0644, .proc_handler = rds_tcp_skbuf_handler, .extra1 = &rds_tcp_min_rcvbuf, }, { } }; /* doing it this way avoids calling tcp_sk() */ void rds_tcp_nonagle(struct socket *sock) { int val = 1; kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (void *)&val, sizeof(val)); } u32 rds_tcp_write_seq(struct rds_tcp_connection *tc) { /* seq# of the last byte of data in tcp send buffer */ return tcp_sk(tc->t_sock->sk)->write_seq; } u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) { return tcp_sk(tc->t_sock->sk)->snd_una; } void rds_tcp_restore_callbacks(struct socket *sock, struct rds_tcp_connection *tc) { rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc); write_lock_bh(&sock->sk->sk_callback_lock); /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_del_init(&tc->t_list_item); #if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count--; #endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count--; spin_unlock(&rds_tcp_tc_list_lock); tc->t_sock = NULL; sock->sk->sk_write_space = tc->t_orig_write_space; sock->sk->sk_data_ready = tc->t_orig_data_ready; sock->sk->sk_state_change = tc->t_orig_state_change; sock->sk->sk_user_data = NULL; write_unlock_bh(&sock->sk->sk_callback_lock); } /* * rds_tcp_reset_callbacks() switches the to the new sock and * returns the existing tc->t_sock. * * The only functions that set tc->t_sock are rds_tcp_set_callbacks * and rds_tcp_reset_callbacks. Send and receive trust that * it is set. The absence of RDS_CONN_UP bit protects those paths * from being called while it isn't set. */ void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp) { struct rds_tcp_connection *tc = cp->cp_transport_data; struct socket *osock = tc->t_sock; if (!osock) goto newsock; /* Need to resolve a duelling SYN between peers. * We have an outstanding SYN to this peer, which may * potentially have transitioned to the RDS_CONN_UP state, * so we must quiesce any send threads before resetting * cp_transport_data. We quiesce these threads by setting * cp_state to something other than RDS_CONN_UP, and then * waiting for any existing threads in rds_send_xmit to * complete release_in_xmit(). (Subsequent threads entering * rds_send_xmit() will bail on !rds_conn_up(). * * However an incoming syn-ack at this point would end up * marking the conn as RDS_CONN_UP, and would again permit * rds_send_xmi() threads through, so ideally we would * synchronize on RDS_CONN_UP after lock_sock(), but cannot * do that: waiting on !RDS_IN_XMIT after lock_sock() may * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT * would not get set. As a result, we set c_state to * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change * cannot mark rds_conn_path_up() in the window before lock_sock() */ atomic_set(&cp->cp_state, RDS_CONN_RESETTING); wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags)); lock_sock(osock->sk); /* reset receive side state for rds_tcp_data_recv() for osock */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); if (tc->t_tinc) { rds_inc_put(&tc->t_tinc->ti_inc); tc->t_tinc = NULL; } tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc->t_tinc_data_rem = 0; rds_tcp_restore_callbacks(osock, tc); release_sock(osock->sk); sock_release(osock); newsock: rds_send_path_reset(cp); lock_sock(sock->sk); rds_tcp_set_callbacks(sock, cp); release_sock(sock->sk); } /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments * above rds_tcp_reset_callbacks for notes about synchronization * with data path */ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp) { struct rds_tcp_connection *tc = cp->cp_transport_data; rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); write_lock_bh(&sock->sk->sk_callback_lock); /* done under the callback_lock to serialize with write_space */ spin_lock(&rds_tcp_tc_list_lock); list_add_tail(&tc->t_list_item, &rds_tcp_tc_list); #if IS_ENABLED(CONFIG_IPV6) rds6_tcp_tc_count++; #endif if (!tc->t_cpath->cp_conn->c_isv6) rds_tcp_tc_count++; spin_unlock(&rds_tcp_tc_list_lock); /* accepted sockets need our listen data ready undone */ if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready) sock->sk->sk_data_ready = sock->sk->sk_user_data; tc->t_sock = sock; tc->t_cpath = cp; tc->t_orig_data_ready = sock->sk->sk_data_ready; tc->t_orig_write_space = sock->sk->sk_write_space; tc->t_orig_state_change = sock->sk->sk_state_change; sock->sk->sk_user_data = cp; sock->sk->sk_data_ready = rds_tcp_data_ready; sock->sk->sk_write_space = rds_tcp_write_space; sock->sk->sk_state_change = rds_tcp_state_change; write_unlock_bh(&sock->sk->sk_callback_lock); } /* Handle RDS_INFO_TCP_SOCKETS socket option. It only returns IPv4 * connections for backward compatibility. */ static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds_info_tcp_socket tsinfo; struct rds_tcp_connection *tc; unsigned long flags; spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); if (len / sizeof(tsinfo) < rds_tcp_tc_count) goto out; list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { struct inet_sock *inet = inet_sk(tc->t_sock->sk); if (tc->t_cpath->cp_conn->c_isv6) continue; tsinfo.local_addr = inet->inet_saddr; tsinfo.local_port = inet->inet_sport; tsinfo.peer_addr = inet->inet_daddr; tsinfo.peer_port = inet->inet_dport; tsinfo.hdr_rem = tc->t_tinc_hdr_rem; tsinfo.data_rem = tc->t_tinc_data_rem; tsinfo.last_sent_nxt = tc->t_last_sent_nxt; tsinfo.last_expected_una = tc->t_last_expected_una; tsinfo.last_seen_una = tc->t_last_seen_una; tsinfo.tos = tc->t_cpath->cp_conn->c_tos; rds_info_copy(iter, &tsinfo, sizeof(tsinfo)); } out: lens->nr = rds_tcp_tc_count; lens->each = sizeof(tsinfo); spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } #if IS_ENABLED(CONFIG_IPV6) /* Handle RDS6_INFO_TCP_SOCKETS socket option. It returns both IPv4 and * IPv6 connections. IPv4 connection address is returned in an IPv4 mapped * address. */ static void rds6_tcp_tc_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { struct rds6_info_tcp_socket tsinfo6; struct rds_tcp_connection *tc; unsigned long flags; spin_lock_irqsave(&rds_tcp_tc_list_lock, flags); if (len / sizeof(tsinfo6) < rds6_tcp_tc_count) goto out; list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) { struct sock *sk = tc->t_sock->sk; struct inet_sock *inet = inet_sk(sk); tsinfo6.local_addr = sk->sk_v6_rcv_saddr; tsinfo6.local_port = inet->inet_sport; tsinfo6.peer_addr = sk->sk_v6_daddr; tsinfo6.peer_port = inet->inet_dport; tsinfo6.hdr_rem = tc->t_tinc_hdr_rem; tsinfo6.data_rem = tc->t_tinc_data_rem; tsinfo6.last_sent_nxt = tc->t_last_sent_nxt; tsinfo6.last_expected_una = tc->t_last_expected_una; tsinfo6.last_seen_una = tc->t_last_seen_una; rds_info_copy(iter, &tsinfo6, sizeof(tsinfo6)); } out: lens->nr = rds6_tcp_tc_count; lens->each = sizeof(tsinfo6); spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } #endif static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, __u32 scope_id) { struct net_device *dev = NULL; #if IS_ENABLED(CONFIG_IPV6) int ret; #endif if (ipv6_addr_v4mapped(addr)) { if (inet_addr_type(net, addr->s6_addr32[3]) == RTN_LOCAL) return 0; return -EADDRNOTAVAIL; } /* If the scope_id is specified, check only those addresses * hosted on the specified interface. */ if (scope_id != 0) { rcu_read_lock(); dev = dev_get_by_index_rcu(net, scope_id); /* scope_id is not valid... */ if (!dev) { rcu_read_unlock(); return -EADDRNOTAVAIL; } rcu_read_unlock(); } #if IS_ENABLED(CONFIG_IPV6) ret = ipv6_chk_addr(net, addr, dev, 0); if (ret) return 0; #endif return -EADDRNOTAVAIL; } static void rds_tcp_conn_free(void *arg) { struct rds_tcp_connection *tc = arg; unsigned long flags; rdsdebug("freeing tc %p\n", tc); spin_lock_irqsave(&rds_tcp_conn_lock, flags); if (!tc->t_tcp_node_detached) list_del(&tc->t_tcp_node); spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); kmem_cache_free(rds_tcp_conn_slab, tc); } static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_tcp_connection *tc; int i, j; int ret = 0; for (i = 0; i < RDS_MPATH_WORKERS; i++) { tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); if (!tc) { ret = -ENOMEM; goto fail; } mutex_init(&tc->t_conn_path_lock); tc->t_sock = NULL; tc->t_tinc = NULL; tc->t_tinc_hdr_rem = sizeof(struct rds_header); tc->t_tinc_data_rem = 0; conn->c_path[i].cp_transport_data = tc; tc->t_cpath = &conn->c_path[i]; tc->t_tcp_node_detached = true; rdsdebug("rds_conn_path [%d] tc %p\n", i, conn->c_path[i].cp_transport_data); } spin_lock_irq(&rds_tcp_conn_lock); for (i = 0; i < RDS_MPATH_WORKERS; i++) { tc = conn->c_path[i].cp_transport_data; tc->t_tcp_node_detached = false; list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); } spin_unlock_irq(&rds_tcp_conn_lock); fail: if (ret) { for (j = 0; j < i; j++) rds_tcp_conn_free(conn->c_path[j].cp_transport_data); } return ret; } static bool list_has_conn(struct list_head *list, struct rds_connection *conn) { struct rds_tcp_connection *tc, *_tc; list_for_each_entry_safe(tc, _tc, list, t_tcp_node) { if (tc->t_cpath->cp_conn == conn) return true; } return false; } static void rds_tcp_set_unloading(void) { atomic_set(&rds_tcp_unloading, 1); } static bool rds_tcp_is_unloading(struct rds_connection *conn) { return atomic_read(&rds_tcp_unloading) != 0; } static void rds_tcp_destroy_conns(void) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) list_move_tail(&tc->t_tcp_node, &tmp_list); } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); } static void rds_tcp_exit(void); static u8 rds_tcp_get_tos_map(u8 tos) { /* all user tos mapped to default 0 for TCP transport */ return 0; } struct rds_transport rds_tcp_transport = { .laddr_check = rds_tcp_laddr_check, .xmit_path_prepare = rds_tcp_xmit_path_prepare, .xmit_path_complete = rds_tcp_xmit_path_complete, .xmit = rds_tcp_xmit, .recv_path = rds_tcp_recv_path, .conn_alloc = rds_tcp_conn_alloc, .conn_free = rds_tcp_conn_free, .conn_path_connect = rds_tcp_conn_path_connect, .conn_path_shutdown = rds_tcp_conn_path_shutdown, .inc_copy_to_user = rds_tcp_inc_copy_to_user, .inc_free = rds_tcp_inc_free, .stats_info_copy = rds_tcp_stats_info_copy, .exit = rds_tcp_exit, .get_tos_map = rds_tcp_get_tos_map, .t_owner = THIS_MODULE, .t_name = "tcp", .t_type = RDS_TRANS_TCP, .t_prefer_loopback = 1, .t_mp_capable = 1, .t_unloading = rds_tcp_is_unloading, }; static unsigned int rds_tcp_netid; /* per-network namespace private data for this module */ struct rds_tcp_net { struct socket *rds_tcp_listen_sock; struct work_struct rds_tcp_accept_w; struct ctl_table_header *rds_tcp_sysctl; struct ctl_table *ctl_table; int sndbuf_size; int rcvbuf_size; }; /* All module specific customizations to the RDS-TCP socket should be done in * rds_tcp_tune() and applied after socket creation. */ void rds_tcp_tune(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); rds_tcp_nonagle(sock); lock_sock(sk); if (rtn->sndbuf_size > 0) { sk->sk_sndbuf = rtn->sndbuf_size; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } if (rtn->rcvbuf_size > 0) { sk->sk_sndbuf = rtn->rcvbuf_size; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); } static void rds_tcp_accept_worker(struct work_struct *work) { struct rds_tcp_net *rtn = container_of(work, struct rds_tcp_net, rds_tcp_accept_w); while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0) cond_resched(); } void rds_tcp_accept_work(struct sock *sk) { struct net *net = sock_net(sk); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); queue_work(rds_wq, &rtn->rds_tcp_accept_w); } static __net_init int rds_tcp_init_net(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct ctl_table *tbl; int err = 0; memset(rtn, 0, sizeof(*rtn)); /* {snd, rcv}buf_size default to 0, which implies we let the * stack pick the value, and permit auto-tuning of buffer size. */ if (net == &init_net) { tbl = rds_tcp_sysctl_table; } else { tbl = kmemdup(rds_tcp_sysctl_table, sizeof(rds_tcp_sysctl_table), GFP_KERNEL); if (!tbl) { pr_warn("could not set allocate syctl table\n"); return -ENOMEM; } rtn->ctl_table = tbl; } tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size; tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size; rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl); if (!rtn->rds_tcp_sysctl) { pr_warn("could not register sysctl\n"); err = -ENOMEM; goto fail; } #if IS_ENABLED(CONFIG_IPV6) rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true); #else rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); #endif if (!rtn->rds_tcp_listen_sock) { pr_warn("could not set up IPv6 listen sock\n"); #if IS_ENABLED(CONFIG_IPV6) /* Try IPv4 as some systems disable IPv6 */ rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false); if (!rtn->rds_tcp_listen_sock) { #endif unregister_net_sysctl_table(rtn->rds_tcp_sysctl); rtn->rds_tcp_sysctl = NULL; err = -EAFNOSUPPORT; goto fail; #if IS_ENABLED(CONFIG_IPV6) } #endif } INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker); return 0; fail: if (net != &init_net) kfree(tbl); return err; } static void rds_tcp_kill_sock(struct net *net) { struct rds_tcp_connection *tc, *_tc; LIST_HEAD(tmp_list); struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; rtn->rds_tcp_listen_sock = NULL; rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net) continue; if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { list_move_tail(&tc->t_tcp_node, &tmp_list); } else { list_del(&tc->t_tcp_node); tc->t_tcp_node_detached = true; } } spin_unlock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) rds_conn_destroy(tc->t_cpath->cp_conn); } static void __net_exit rds_tcp_exit_net(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); rds_tcp_kill_sock(net); if (rtn->rds_tcp_sysctl) unregister_net_sysctl_table(rtn->rds_tcp_sysctl); if (net != &init_net) kfree(rtn->ctl_table); } static struct pernet_operations rds_tcp_net_ops = { .init = rds_tcp_init_net, .exit = rds_tcp_exit_net, .id = &rds_tcp_netid, .size = sizeof(struct rds_tcp_net), }; void *rds_tcp_listen_sock_def_readable(struct net *net) { struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); struct socket *lsock = rtn->rds_tcp_listen_sock; if (!lsock) return NULL; return lsock->sk->sk_user_data; } /* when sysctl is used to modify some kernel socket parameters,this * function resets the RDS connections in that netns so that we can * restart with new parameters. The assumption is that such reset * events are few and far-between. */ static void rds_tcp_sysctl_reset(struct net *net) { struct rds_tcp_connection *tc, *_tc; spin_lock_irq(&rds_tcp_conn_lock); list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); if (net != c_net || !tc->t_sock) continue; /* reconnect with new parameters */ rds_conn_path_drop(tc->t_cpath, false); } spin_unlock_irq(&rds_tcp_conn_lock); } static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *fpos) { struct net *net = current->nsproxy->net_ns; int err; err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos); if (err < 0) { pr_warn("Invalid input. Must be >= %d\n", *(int *)(ctl->extra1)); return err; } if (write) rds_tcp_sysctl_reset(net); return 0; } static void rds_tcp_exit(void) { rds_tcp_set_unloading(); synchronize_rcu(); rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_deregister_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); #endif unregister_pernet_device(&rds_tcp_net_ops); rds_tcp_destroy_conns(); rds_trans_unregister(&rds_tcp_transport); rds_tcp_recv_exit(); kmem_cache_destroy(rds_tcp_conn_slab); } module_exit(rds_tcp_exit); static int rds_tcp_init(void) { int ret; rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", sizeof(struct rds_tcp_connection), 0, 0, NULL); if (!rds_tcp_conn_slab) { ret = -ENOMEM; goto out; } ret = rds_tcp_recv_init(); if (ret) goto out_slab; ret = register_pernet_device(&rds_tcp_net_ops); if (ret) goto out_recv; rds_trans_register(&rds_tcp_transport); rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); #if IS_ENABLED(CONFIG_IPV6) rds_info_register_func(RDS6_INFO_TCP_SOCKETS, rds6_tcp_tc_info); #endif goto out; out_recv: rds_tcp_recv_exit(); out_slab: kmem_cache_destroy(rds_tcp_conn_slab); out: return ret; } module_init(rds_tcp_init); MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>"); MODULE_DESCRIPTION("RDS: TCP transport"); MODULE_LICENSE("Dual BSD/GPL");
./CrossVul/dataset_final_sorted/CWE-362/c/good_836_0
crossvul-cpp_data_good_1664_3
/* BEGIN_ICS_COPYRIGHT5 **************************************** Copyright (c) 2015, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ** END_ICS_COPYRIGHT5 ****************************************/ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <signal.h> #include <unistd.h> #include <string.h> #include "hsm_com_client_api.h" #include "hsm_com_client_data.h" hsm_com_errno_t hcom_client_init ( OUT p_hsm_com_client_hdl_t *p_hdl, IN char *server_path, IN char *client_path, IN int max_data_len ) { hsm_com_client_hdl_t *hdl = NULL; hsm_com_errno_t res = HSM_COM_OK; if((strlen(server_path) > (HSM_COM_SVR_MAX_PATH - 1)) || (strlen(server_path) == 0)){ res = HSM_COM_PATH_ERR; goto cleanup; } if((strlen(client_path) > (HSM_COM_SVR_MAX_PATH - 1)) || (strlen(client_path) == 0)){ res = HSM_COM_PATH_ERR; goto cleanup; } if((hdl = calloc(1,sizeof(hsm_com_client_hdl_t))) == NULL) { res = HSM_COM_NO_MEM; goto cleanup; } if((hdl->scr.scratch = malloc(max_data_len)) == NULL) { res = HSM_COM_NO_MEM; goto cleanup; } if((hdl->recv_buf = malloc(max_data_len)) == NULL) { res = HSM_COM_NO_MEM; goto cleanup; } if((hdl->send_buf = malloc(max_data_len)) == NULL) { res = HSM_COM_NO_MEM; goto cleanup; } hdl->scr.scratch_fill = 0; hdl->scr.scratch_len = max_data_len; hdl->buf_len = max_data_len; hdl->trans_id = 1; strcpy(hdl->s_path,server_path); strcpy(hdl->c_path,client_path); if (mkstemp(hdl->c_path) == -1) { res = HSM_COM_PATH_ERR; goto cleanup; } hdl->client_state = HSM_COM_C_STATE_IN; *p_hdl = hdl; return res; cleanup: if(hdl) { if (hdl->scr.scratch) { free(hdl->scr.scratch); } if (hdl->recv_buf) { free(hdl->recv_buf); } free(hdl); } return res; } hsm_com_errno_t hcom_client_connect ( IN p_hsm_com_client_hdl_t p_hdl ) { return unix_client_connect(p_hdl); } hsm_com_errno_t hcom_client_disconnect ( IN p_hsm_com_client_hdl_t p_hdl ) { return unix_client_disconnect(p_hdl); } hsm_com_errno_t hcom_client_send_ping ( IN p_hsm_com_client_hdl_t p_hdl, IN int timeout_s ) { return unix_sck_send_ping(p_hdl,timeout_s); } hsm_com_errno_t hcom_client_send_data ( IN p_hsm_com_client_hdl_t p_hdl, IN int timeout_s, IN hsm_com_datagram_t *data, OUT hsm_com_datagram_t *res ) { if(p_hdl->client_state == HSM_COM_C_STATE_CT) return unix_sck_send_data(p_hdl, timeout_s, data, res); return HSM_COM_NOT_CONNECTED; } hsm_com_errno_t hcom_client_create_stream ( OUT p_hsm_com_stream_hdl_t *p_stream_hdl, IN p_hsm_com_client_hdl_t p_client_hdl, IN char *socket_path, IN int max_conx, IN int max_data_len ) { return HSM_COM_OK; } hsm_com_errno_t hcom_client_destroy_stream ( IN p_hsm_com_stream_hdl_t p_stream_hdl, IN p_hsm_com_client_hdl_t p_client_hdl ) { return HSM_COM_OK; }
./CrossVul/dataset_final_sorted/CWE-362/c/good_1664_3
crossvul-cpp_data_bad_4933_1
#include <linux/init.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cache.h> #include <asm/apic.h> #include <asm/uv/uv.h> #include <linux/debugfs.h> /* * Smarter SMP flushing macros. * c/o Linus Torvalds. * * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * * Optimizations Manfred Spraul <manfred@colorfullife.com> * * More scalable flush, from Andi Kleen * * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ struct flush_tlb_info { struct mm_struct *flush_mm; unsigned long flush_start; unsigned long flush_end; }; /* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) BUG(); if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); load_cr3(swapper_pg_dir); /* * This gets called in the idle path where RCU * functions differently. Tracing normally * uses RCU, so we have to call the tracepoint * specially here. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } } EXPORT_SYMBOL_GPL(leave_mm); /* * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b) * 1a) thread switch to a different mm * 1a1) set cpu_tlbstate to TLBSTATE_OK * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm * if cpu0 was in lazy tlb mode. * 1a2) update cpu active_mm * Now cpu0 accepts tlb flushes for the new mm. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); * Now the other cpus will send tlb flush ipis. * 1a4) change cr3. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); * Stop ipi delivery for the old mm. This is not synchronized with * the other cpus, but flush_tlb_func ignore flush ipis for the wrong * mm, and in the worst case we perform a superfluous tlb flush. * 1b) thread switch without mm change * cpu active_mm is correct, cpu0 already handles flush ipis. * 1b1) set cpu_tlbstate to TLBSTATE_OK * 1b2) test_and_set the cpu bit in cpu_vm_mask. * Atomically set the bit [other cpus will start sending flush ipis], * and test the bit. * 1b3) if the bit was 0: leave_mm was called, flush the tlb. * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. * - the cpu performs speculative tlb reads, i.e. even if the cpu only * runs in kernel space, the cpu could load tlb entries for user space * pages. * * The good news is that cpu_tlbstate is local to each cpu, no * write/read ordering problems. */ /* * TLB flush funcation: * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. */ static void flush_tlb_func(void *info) { struct flush_tlb_info *f = info; inc_irq_stat(irq_tlb_count); if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) return; if (!f->flush_end) f->flush_end = f->flush_start + PAGE_SIZE; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_end == TLB_FLUSH_ALL) { local_flush_tlb(); trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); } else { unsigned long addr; unsigned long nr_pages = (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); addr += PAGE_SIZE; } trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); } } else leave_mm(smp_processor_id()); } void native_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, unsigned long end) { struct flush_tlb_info info; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (is_uv_system()) { unsigned int cpu; cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func, &info, 1); return; } smp_call_function_many(cpumask, flush_tlb_func, &info, 1); } void flush_tlb_current_task(void) { struct mm_struct *mm = current->mm; preempt_disable(); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); preempt_enable(); } /* * See Documentation/x86/tlb.txt for details. We choose 33 * because it is large enough to cover the vast majority (at * least 95%) of allocations, and is small enough that we are * confident it will not cause too much overhead. Each single * flush is about 100 ns, so this caps the maximum overhead at * _about_ 3,000 ns. * * This is in units of pages. */ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long vmflag) { unsigned long addr; /* do a global flush by default */ unsigned long base_pages_to_flush = TLB_FLUSH_ALL; preempt_disable(); if (current->active_mm != mm) goto out; if (!current->mm) { leave_mm(smp_processor_id()); goto out; } if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) base_pages_to_flush = (end - start) >> PAGE_SHIFT; if (base_pages_to_flush > tlb_single_page_flush_ceiling) { base_pages_to_flush = TLB_FLUSH_ALL; count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); } else { /* flush range by one by one 'invlpg' */ for (addr = start; addr < end; addr += PAGE_SIZE) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); } } trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); out: if (base_pages_to_flush == TLB_FLUSH_ALL) { start = 0UL; end = TLB_FLUSH_ALL; } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, end); preempt_enable(); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (current->active_mm == mm) { if (current->mm) __flush_tlb_one(start); else leave_mm(smp_processor_id()); } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); preempt_enable(); } static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); __flush_tlb_all(); if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(smp_processor_id()); } void flush_tlb_all(void) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); on_each_cpu(do_flush_tlb_all, NULL, 1); } static void do_kernel_range_flush(void *info) { struct flush_tlb_info *f = info; unsigned long addr; /* flush range by one by one 'invlpg' */ for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) __flush_tlb_single(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /* Balance as user space task's flush, a bit conservative */ if (end == TLB_FLUSH_ALL || (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { on_each_cpu(do_flush_tlb_all, NULL, 1); } else { struct flush_tlb_info info; info.flush_start = start; info.flush_end = end; on_each_cpu(do_kernel_range_flush, &info, 1); } } static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; unsigned int len; len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t tlbflush_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { char buf[32]; ssize_t len; int ceiling; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoint(buf, 0, &ceiling)) return -EINVAL; if (ceiling < 0) return -EINVAL; tlb_single_page_flush_ceiling = ceiling; return count; } static const struct file_operations fops_tlbflush = { .read = tlbflush_read_file, .write = tlbflush_write_file, .llseek = default_llseek, }; static int __init create_tlb_single_page_flush_ceiling(void) { debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlbflush); return 0; } late_initcall(create_tlb_single_page_flush_ceiling);
./CrossVul/dataset_final_sorted/CWE-362/c/bad_4933_1
crossvul-cpp_data_bad_1819_4
/* * linux/fs/ext4/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/parser.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/vfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/log2.h> #include <linux/crc16.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #include <linux/kthread.h> #include <linux/freezer.h> #include "ext4.h" #include "ext4_extents.h" /* Needed for trace points definition */ #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" #include "mballoc.h" #define CREATE_TRACE_POINTS #include <trace/events/ext4.h> static struct ext4_lazy_init *ext4_li_info; static struct mutex ext4_li_mtx; static int ext4_mballoc_ready; static struct ratelimit_state ext4_mount_msg_ratelimit; static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); static int ext4_unfreeze(struct super_block *sb); static int ext4_freeze(struct super_block *sb); static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static inline int ext2_feature_set_ok(struct super_block *sb); static inline int ext3_feature_set_ok(struct super_block *sb); static int ext4_feature_set_ok(struct super_block *sb, int readonly); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); static void ext4_clear_request_list(void); #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) static struct file_system_type ext2_fs_type = { .owner = THIS_MODULE, .name = "ext2", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext2"); MODULE_ALIAS("ext2"); #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) #else #define IS_EXT2_SB(sb) (0) #endif static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext3"); MODULE_ALIAS("ext3"); #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type) static int ext4_verify_csum_type(struct super_block *sb, struct ext4_super_block *es) { if (!ext4_has_feature_metadata_csum(sb)) return 1; return es->s_checksum_type == EXT4_CRC32C_CHKSUM; } static __le32 ext4_superblock_csum(struct super_block *sb, struct ext4_super_block *es) { struct ext4_sb_info *sbi = EXT4_SB(sb); int offset = offsetof(struct ext4_super_block, s_checksum); __u32 csum; csum = ext4_chksum(sbi, ~0, (char *)es, offset); return cpu_to_le32(csum); } static int ext4_superblock_csum_verify(struct super_block *sb, struct ext4_super_block *es) { if (!ext4_has_metadata_csum(sb)) return 1; return es->s_checksum == ext4_superblock_csum(sb, es); } void ext4_superblock_csum_set(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (!ext4_has_metadata_csum(sb)) return; es->s_checksum = ext4_superblock_csum(sb, es); } void *ext4_kvmalloc(size_t size, gfp_t flags) { void *ret; ret = kmalloc(size, flags | __GFP_NOWARN); if (!ret) ret = __vmalloc(size, flags, PAGE_KERNEL); return ret; } void *ext4_kvzalloc(size_t size, gfp_t flags) { void *ret; ret = kzalloc(size, flags | __GFP_NOWARN); if (!ret) ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); return ret; } ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_block_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_bitmap_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0); } ext4_fsblk_t ext4_inode_table(struct super_block *sb, struct ext4_group_desc *bg) { return le32_to_cpu(bg->bg_inode_table_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0); } __u32 ext4_free_group_clusters(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_blocks_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0); } __u32 ext4_free_inodes_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_free_inodes_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); } __u32 ext4_used_dirs_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_used_dirs_count_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0); } __u32 ext4_itable_unused_count(struct super_block *sb, struct ext4_group_desc *bg) { return le16_to_cpu(bg->bg_itable_unused_lo) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0); } void ext4_block_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_bitmap_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32); } void ext4_inode_table_set(struct super_block *sb, struct ext4_group_desc *bg, ext4_fsblk_t blk) { bg->bg_inode_table_lo = cpu_to_le32((u32)blk); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_inode_table_hi = cpu_to_le32(blk >> 32); } void ext4_free_group_clusters_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16); } void ext4_free_inodes_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); } void ext4_used_dirs_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16); } void ext4_itable_unused_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { bg->bg_itable_unused_lo = cpu_to_le16((__u16)count); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) bg->bg_itable_unused_hi = cpu_to_le16(count >> 16); } static void __save_error_info(struct super_block *sb, const char *func, unsigned int line) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; if (bdev_read_only(sb->s_bdev)) return; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); es->s_last_error_time = cpu_to_le32(get_seconds()); strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); es->s_last_error_line = cpu_to_le32(line); if (!es->s_first_error_time) { es->s_first_error_time = es->s_last_error_time; strncpy(es->s_first_error_func, func, sizeof(es->s_first_error_func)); es->s_first_error_line = cpu_to_le32(line); es->s_first_error_ino = es->s_last_error_ino; es->s_first_error_block = es->s_last_error_block; } /* * Start the daily error reporting function if it hasn't been * started already */ if (!es->s_error_count) mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ); le32_add_cpu(&es->s_error_count, 1); } static void save_error_info(struct super_block *sb, const char *func, unsigned int line) { __save_error_info(sb, func, line); ext4_commit_super(sb, 1); } /* * The del_gendisk() function uninitializes the disk-specific data * structures, including the bdi structure, without telling anyone * else. Once this happens, any attempt to call mark_buffer_dirty() * (for example, by ext4_commit_super), will cause a kernel OOPS. * This is a kludge to prevent these oops until we can put in a proper * hook in del_gendisk() to inform the VFS and file system layers. */ static int block_device_ejected(struct super_block *sb) { struct inode *bd_inode = sb->s_bdev->bd_inode; struct backing_dev_info *bdi = inode_to_bdi(bd_inode); return bdi->dev == NULL; } static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) { struct super_block *sb = journal->j_private; struct ext4_sb_info *sbi = EXT4_SB(sb); int error = is_journal_aborted(journal); struct ext4_journal_cb_entry *jce; BUG_ON(txn->t_state == T_FINISHED); spin_lock(&sbi->s_md_lock); while (!list_empty(&txn->t_private_list)) { jce = list_entry(txn->t_private_list.next, struct ext4_journal_cb_entry, jce_list); list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); jce->jce_func(sb, jce, error); spin_lock(&sbi->s_md_lock); } spin_unlock(&sbi->s_md_lock); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext4, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the jbd2_journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext4_handle_error(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return; if (!test_opt(sb, ERRORS_CONT)) { journal_t *journal = EXT4_SB(sb)->s_journal; EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; if (journal) jbd2_journal_abort(journal, -EIO); } if (test_opt(sb, ERRORS_RO)) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); /* * Make sure updated value of ->s_mount_flags will be visible * before ->s_flags update */ smp_wmb(); sb->s_flags |= MS_RDONLY; } if (test_opt(sb, ERRORS_PANIC)) { if (EXT4_SB(sb)->s_journal && !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) return; panic("EXT4-fs (device %s): panic forced after error\n", sb->s_id); } } #define ext4_error_ratelimit(sb) \ ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state), \ "EXT4-fs error") void __ext4_error(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; if (ext4_error_ratelimit(sb)) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", sb->s_id, function, line, current->comm, &vaf); va_end(args); } save_error_info(sb, function, line); ext4_handle_error(sb); } void __ext4_error_inode(struct inode *inode, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); es->s_last_error_block = cpu_to_le64(block); if (ext4_error_ratelimit(inode->i_sb)) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (block) printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: block %llu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, block, current->comm, &vaf); else printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: " "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, &vaf); va_end(args); } save_error_info(inode->i_sb, function, line); ext4_handle_error(inode->i_sb); } void __ext4_error_file(struct file *file, const char *function, unsigned int line, ext4_fsblk_t block, const char *fmt, ...) { va_list args; struct va_format vaf; struct ext4_super_block *es; struct inode *inode = file_inode(file); char pathname[80], *path; es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); if (ext4_error_ratelimit(inode->i_sb)) { path = file_path(file, pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (block) printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: " "block %llu: comm %s: path %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, block, current->comm, path, &vaf); else printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: " "comm %s: path %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, path, &vaf); va_end(args); } save_error_info(inode->i_sb, function, line); ext4_handle_error(inode->i_sb); } const char *ext4_decode_error(struct super_block *sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EFSCORRUPTED: errstr = "Corrupt filesystem"; break; case -EFSBADCRC: errstr = "Filesystem failed CRC"; break; case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || (EXT4_SB(sb)->s_journal && EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext4_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext4_std_error(struct super_block *sb, const char *function, unsigned int line, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; if (ext4_error_ratelimit(sb)) { errstr = ext4_decode_error(sb, errno, nbuf); printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n", sb->s_id, function, line, errstr); } save_error_info(sb, function, line); ext4_handle_error(sb); } /* * ext4_abort is a much stronger failure handler than ext4_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void __ext4_abort(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { va_list args; save_error_info(sb, function, line); va_start(args, fmt); printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id, function, line); vprintk(fmt, args); printk("\n"); va_end(args); if ((sb->s_flags & MS_RDONLY) == 0) { ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; /* * Make sure updated value of ->s_mount_flags will be visible * before ->s_flags update */ smp_wmb(); sb->s_flags |= MS_RDONLY; if (EXT4_SB(sb)->s_journal) jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); save_error_info(sb, function, line); } if (test_opt(sb, ERRORS_PANIC)) { if (EXT4_SB(sb)->s_journal && !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) return; panic("EXT4-fs panic from previous error\n"); } } void __ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs")) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } #define ext4_warning_ratelimit(sb) \ ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \ "EXT4-fs warning") void __ext4_warning(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; if (!ext4_warning_ratelimit(sb)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", sb->s_id, function, line, &vaf); va_end(args); } void __ext4_warning_inode(const struct inode *inode, const char *function, unsigned int line, const char *fmt, ...) { struct va_format vaf; va_list args; if (!ext4_warning_ratelimit(inode->i_sb)) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: " "inode #%lu: comm %s: %pV\n", inode->i_sb->s_id, function, line, inode->i_ino, current->comm, &vaf); va_end(args); } void __ext4_grp_locked_error(const char *function, unsigned int line, struct super_block *sb, ext4_group_t grp, unsigned long ino, ext4_fsblk_t block, const char *fmt, ...) __releases(bitlock) __acquires(bitlock) { struct va_format vaf; va_list args; struct ext4_super_block *es = EXT4_SB(sb)->s_es; es->s_last_error_ino = cpu_to_le32(ino); es->s_last_error_block = cpu_to_le64(block); __save_error_info(sb, function, line); if (ext4_error_ratelimit(sb)) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ", sb->s_id, function, line, grp); if (ino) printk(KERN_CONT "inode %lu: ", ino); if (block) printk(KERN_CONT "block %llu:", (unsigned long long) block); printk(KERN_CONT "%pV\n", &vaf); va_end(args); } if (test_opt(sb, ERRORS_CONT)) { ext4_commit_super(sb, 0); return; } ext4_unlock_group(sb, grp); ext4_handle_error(sb); /* * We only get here in the ERRORS_RO case; relocking the group * may be dangerous, but nothing bad will happen since the * filesystem will have already been marked read/only and the * journal has been aborted. We return 1 as a hint to callers * who might what to use the return value from * ext4_grp_locked_error() to distinguish between the * ERRORS_CONT and ERRORS_RO case, and perhaps return more * aggressively from the ext4 function in question, with a * more appropriate error code. */ ext4_lock_group(sb, grp); return; } void ext4_update_dynamic_rev(struct super_block *sb) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV) return; ext4_warning(sb, "updating to rev %d because of new feature flag, " "running e2fsck is recommended", EXT4_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static void ext4_blkdev_put(struct block_device *bdev) { blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static void ext4_blkdev_remove(struct ext4_sb_info *sbi) { struct block_device *bdev; bdev = sbi->journal_bdev; if (bdev) { ext4_blkdev_put(bdev); sbi->journal_bdev = NULL; } } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi) { struct list_head *l; ext4_msg(sb, KERN_ERR, "sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); printk(KERN_ERR "sb_info orphan list:\n"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); printk(KERN_ERR " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext4_put_super(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int i, err; ext4_unregister_li_request(sb); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); flush_workqueue(sbi->rsv_conversion_wq); destroy_workqueue(sbi->rsv_conversion_wq); if (sbi->s_journal) { err = jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext4_abort(sb, "Couldn't clean up the journal"); } ext4_unregister_sysfs(sb); ext4_es_unregister_shrinker(sbi); del_timer_sync(&sbi->s_err_report); ext4_release_system_zone(sb); ext4_mb_release(sb); ext4_ext_release(sb); ext4_xattr_put_super(sb); if (!(sb->s_flags & MS_RDONLY)) { ext4_clear_feature_journal_needs_recovery(sb); es->s_state = cpu_to_le16(sbi->s_mount_state); } if (!(sb->s_flags & MS_RDONLY)) ext4_commit_super(sb, 1); for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kvfree(sbi->s_group_desc); kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); sync_blockdev(sb->s_bdev); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext4_blkdev_remove(sbi); } if (sbi->s_mb_cache) { ext4_xattr_destroy_cache(sbi->s_mb_cache); sbi->s_mb_cache = NULL; } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); sb->s_fs_info = NULL; /* * Now that we are completely done shutting down the * superblock, we need to actually destroy the kobject. */ kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext4_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext4_alloc_inode(struct super_block *sb) { struct ext4_inode_info *ei; ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->vfs_inode.i_version = 1; spin_lock_init(&ei->i_raw_lock); INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); INIT_LIST_HEAD(&ei->i_es_list); ei->i_es_all_nr = 0; ei->i_es_shk_nr = 0; ei->i_es_shrink_lblk = 0; ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; ei->i_da_metadata_calc_last_lblock = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; memset(&ei->i_dquot, 0, sizeof(ei->i_dquot)); #endif ei->jinode = NULL; INIT_LIST_HEAD(&ei->i_rsv_conversion_list); spin_lock_init(&ei->i_completed_io_lock); ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); atomic_set(&ei->i_unwritten, 0); INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); #ifdef CONFIG_EXT4_FS_ENCRYPTION ei->i_crypt_info = NULL; #endif return &ei->vfs_inode; } static int ext4_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext4_drop_inode(inode, drop); return drop; } static void ext4_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } static void ext4_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT4_I(inode)->i_orphan))) { ext4_msg(inode->i_sb, KERN_ERR, "Inode %lu (%p): orphan list check failed!", inode->i_ino, EXT4_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT4_I(inode), sizeof(struct ext4_inode_info), true); dump_stack(); } call_rcu(&inode->i_rcu, ext4_i_callback); } static void init_once(void *foo) { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); init_rwsem(&ei->xattr_sem); init_rwsem(&ei->i_data_sem); inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { ext4_inode_cachep = kmem_cache_create("ext4_inode_cache", sizeof(struct ext4_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext4_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ext4_inode_cachep); } void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); dquot_drop(inode); ext4_discard_preallocations(inode); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); if (EXT4_I(inode)->jinode) { jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), EXT4_I(inode)->jinode); jbd2_free_inode(EXT4_I(inode)->jinode); EXT4_I(inode)->jinode = NULL; } #ifdef CONFIG_EXT4_FS_ENCRYPTION if (EXT4_I(inode)->i_crypt_info) ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info); #endif } static struct inode *ext4_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext4_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext4_iget_normal(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext4_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd2 layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT4_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return jbd2_journal_try_to_free_buffers(journal, page, wait & ~__GFP_DIRECT_RECLAIM); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext4_write_dquot(struct dquot *dquot); static int ext4_acquire_dquot(struct dquot *dquot); static int ext4_release_dquot(struct dquot *dquot); static int ext4_mark_dquot_dirty(struct dquot *dquot); static int ext4_write_info(struct super_block *sb, int type); static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext4_quota_off(struct super_block *sb, int type); static int ext4_quota_on_mount(struct super_block *sb, int type); static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags); static int ext4_enable_quotas(struct super_block *sb); static struct dquot **ext4_get_dquots(struct inode *inode) { return EXT4_I(inode)->i_dquot; } static const struct dquot_operations ext4_quota_operations = { .get_reserved_space = ext4_get_reserved_space, .write_dquot = ext4_write_dquot, .acquire_dquot = ext4_acquire_dquot, .release_dquot = ext4_release_dquot, .mark_dirty = ext4_mark_dquot_dirty, .write_info = ext4_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, .quota_off = ext4_quota_off, .quota_sync = dquot_quota_sync, .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext4_sops = { .alloc_inode = ext4_alloc_inode, .destroy_inode = ext4_destroy_inode, .write_inode = ext4_write_inode, .dirty_inode = ext4_dirty_inode, .drop_inode = ext4_drop_inode, .evict_inode = ext4_evict_inode, .put_super = ext4_put_super, .sync_fs = ext4_sync_fs, .freeze_fs = ext4_freeze, .unfreeze_fs = ext4_unfreeze, .statfs = ext4_statfs, .remount_fs = ext4_remount, .show_options = ext4_show_options, #ifdef CONFIG_QUOTA .quota_read = ext4_quota_read, .quota_write = ext4_quota_write, .get_dquots = ext4_get_dquots, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext4_export_ops = { .fh_to_dentry = ext4_fh_to_dentry, .fh_to_parent = ext4_fh_to_parent, .get_parent = ext4_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_debug, Opt_removed, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev, Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, Opt_lazytime, Opt_nolazytime, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, Opt_nojournal_checksum, }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_debug, "debug"}, {Opt_removed, "oldalloc"}, {Opt_removed, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_noload, "norecovery"}, {Opt_noload, "noload"}, {Opt_removed, "nobh"}, {Opt_removed, "bh"}, {Opt_commit, "commit=%u"}, {Opt_min_batch_time, "min_batch_time=%u"}, {Opt_max_batch_time, "max_batch_time=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_journal_path, "journal_path=%s"}, {Opt_journal_checksum, "journal_checksum"}, {Opt_nojournal_checksum, "nojournal_checksum"}, {Opt_journal_async_commit, "journal_async_commit"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_i_version, "i_version"}, {Opt_dax, "dax"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, {Opt_lazytime, "lazytime"}, {Opt_nolazytime, "nolazytime"}, {Opt_nodelalloc, "nodelalloc"}, {Opt_removed, "mblk_io_submit"}, {Opt_removed, "nomblk_io_submit"}, {Opt_block_validity, "block_validity"}, {Opt_noblock_validity, "noblock_validity"}, {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, {Opt_journal_ioprio, "journal_ioprio=%u"}, {Opt_auto_da_alloc, "auto_da_alloc=%u"}, {Opt_auto_da_alloc, "auto_da_alloc"}, {Opt_noauto_da_alloc, "noauto_da_alloc"}, {Opt_dioread_nolock, "dioread_nolock"}, {Opt_dioread_lock, "dioread_lock"}, {Opt_discard, "discard"}, {Opt_nodiscard, "nodiscard"}, {Opt_init_itable, "init_itable=%u"}, {Opt_init_itable, "init_itable"}, {Opt_noinit_itable, "noinit_itable"}, {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_removed, "check=none"}, /* mount option from ext2/3 */ {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ {Opt_removed, "reservation"}, /* mount option from ext2/3 */ {Opt_removed, "noreservation"}, /* mount option from ext2/3 */ {Opt_removed, "journal=%u"}, /* mount option from ext2/3 */ {Opt_err, NULL}, }; static ext4_fsblk_t get_sb_block(void **data) { ext4_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /* TODO: use simple_strtoll with >32bit ext4 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n" "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n"; #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *qname; int ret = -1; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return -1; } if (ext4_has_feature_quota(sb)) { ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " "when QUOTA feature is enabled"); return -1; } qname = match_strdup(args); if (!qname) { ext4_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return -1; } if (sbi->s_qf_names[qtype]) { if (strcmp(sbi->s_qf_names[qtype], qname) == 0) ret = 1; else ext4_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); goto errout; } if (strchr(qname, '/')) { ext4_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); goto errout; } sbi->s_qf_names[qtype] = qname; set_opt(sb, QUOTA); return 1; errout: kfree(qname); return ret; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext4_sb_info *sbi = EXT4_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return -1; } kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 1; } #endif #define MOPT_SET 0x0001 #define MOPT_CLEAR 0x0002 #define MOPT_NOSUPPORT 0x0004 #define MOPT_EXPLICIT 0x0008 #define MOPT_CLEAR_ERR 0x0010 #define MOPT_GTE0 0x0020 #ifdef CONFIG_QUOTA #define MOPT_Q 0 #define MOPT_QFMT 0x0040 #else #define MOPT_Q MOPT_NOSUPPORT #define MOPT_QFMT MOPT_NOSUPPORT #endif #define MOPT_DATAJ 0x0080 #define MOPT_NO_EXT2 0x0100 #define MOPT_NO_EXT3 0x0200 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) #define MOPT_STRING 0x0400 static const struct mount_opts { int token; int mount_opt; int flags; } ext4_mount_opts[] = { {Opt_minix_df, EXT4_MOUNT_MINIX_DF, MOPT_SET}, {Opt_bsd_df, EXT4_MOUNT_MINIX_DF, MOPT_CLEAR}, {Opt_grpid, EXT4_MOUNT_GRPID, MOPT_SET}, {Opt_nogrpid, EXT4_MOUNT_GRPID, MOPT_CLEAR}, {Opt_block_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_SET}, {Opt_noblock_validity, EXT4_MOUNT_BLOCK_VALIDITY, MOPT_CLEAR}, {Opt_dioread_nolock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_EXT4_ONLY | MOPT_SET}, {Opt_dioread_lock, EXT4_MOUNT_DIOREAD_NOLOCK, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_discard, EXT4_MOUNT_DISCARD, MOPT_SET}, {Opt_nodiscard, EXT4_MOUNT_DISCARD, MOPT_CLEAR}, {Opt_delalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_CLEAR}, {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | EXT4_MOUNT_JOURNAL_CHECKSUM), MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, {Opt_noload, EXT4_MOUNT_NOLOAD, MOPT_NO_EXT2 | MOPT_SET}, {Opt_err_panic, EXT4_MOUNT_ERRORS_PANIC, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_err_ro, EXT4_MOUNT_ERRORS_RO, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_err_cont, EXT4_MOUNT_ERRORS_CONT, MOPT_SET | MOPT_CLEAR_ERR}, {Opt_data_err_abort, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2 | MOPT_SET}, {Opt_data_err_ignore, EXT4_MOUNT_DATA_ERR_ABORT, MOPT_NO_EXT2 | MOPT_CLEAR}, {Opt_barrier, EXT4_MOUNT_BARRIER, MOPT_SET}, {Opt_nobarrier, EXT4_MOUNT_BARRIER, MOPT_CLEAR}, {Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET}, {Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR}, {Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR}, {Opt_commit, 0, MOPT_GTE0}, {Opt_max_batch_time, 0, MOPT_GTE0}, {Opt_min_batch_time, 0, MOPT_GTE0}, {Opt_inode_readahead_blks, 0, MOPT_GTE0}, {Opt_init_itable, 0, MOPT_GTE0}, {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET}, {Opt_stripe, 0, MOPT_GTE0}, {Opt_resuid, 0, MOPT_GTE0}, {Opt_resgid, 0, MOPT_GTE0}, {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0}, {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING}, {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0}, {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET}, {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR}, #ifdef CONFIG_EXT4_FS_POSIX_ACL {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET}, {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR}, #else {Opt_acl, 0, MOPT_NOSUPPORT}, {Opt_noacl, 0, MOPT_NOSUPPORT}, #endif {Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET}, {Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET}, {Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, {Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q}, {Opt_grpquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_GRPQUOTA, MOPT_SET | MOPT_Q}, {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA | EXT4_MOUNT_GRPQUOTA), MOPT_CLEAR | MOPT_Q}, {Opt_usrjquota, 0, MOPT_Q}, {Opt_grpjquota, 0, MOPT_Q}, {Opt_offusrjquota, 0, MOPT_Q}, {Opt_offgrpjquota, 0, MOPT_Q}, {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT}, {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT}, {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT}, {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_test_dummy_encryption, 0, MOPT_GTE0}, {Opt_err, 0, 0} }; static int handle_mount_opt(struct super_block *sb, char *opt, int token, substring_t *args, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) { struct ext4_sb_info *sbi = EXT4_SB(sb); const struct mount_opts *m; kuid_t uid; kgid_t gid; int arg = 0; #ifdef CONFIG_QUOTA if (token == Opt_usrjquota) return set_qf_name(sb, USRQUOTA, &args[0]); else if (token == Opt_grpjquota) return set_qf_name(sb, GRPQUOTA, &args[0]); else if (token == Opt_offusrjquota) return clear_qf_name(sb, USRQUOTA); else if (token == Opt_offgrpjquota) return clear_qf_name(sb, GRPQUOTA); #endif switch (token) { case Opt_noacl: case Opt_nouser_xattr: ext4_msg(sb, KERN_WARNING, deprecated_msg, opt, "3.5"); break; case Opt_sb: return 1; /* handled by get_sb_block() */ case Opt_removed: ext4_msg(sb, KERN_WARNING, "Ignoring removed %s option", opt); return 1; case Opt_abort: sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; return 1; case Opt_i_version: sb->s_flags |= MS_I_VERSION; return 1; case Opt_lazytime: sb->s_flags |= MS_LAZYTIME; return 1; case Opt_nolazytime: sb->s_flags &= ~MS_LAZYTIME; return 1; } for (m = ext4_mount_opts; m->token != Opt_err; m++) if (token == m->token) break; if (m->token == Opt_err) { ext4_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" " "or missing value", opt); return -1; } if ((m->flags & MOPT_NO_EXT2) && IS_EXT2_SB(sb)) { ext4_msg(sb, KERN_ERR, "Mount option \"%s\" incompatible with ext2", opt); return -1; } if ((m->flags & MOPT_NO_EXT3) && IS_EXT3_SB(sb)) { ext4_msg(sb, KERN_ERR, "Mount option \"%s\" incompatible with ext3", opt); return -1; } if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg)) return -1; if (args->from && (m->flags & MOPT_GTE0) && (arg < 0)) return -1; if (m->flags & MOPT_EXPLICIT) { if (m->mount_opt & EXT4_MOUNT_DELALLOC) { set_opt2(sb, EXPLICIT_DELALLOC); } else if (m->mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) { set_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM); } else return -1; } if (m->flags & MOPT_CLEAR_ERR) clear_opt(sb, ERRORS_MASK); if (token == Opt_noquota && sb_any_quota_loaded(sb)) { ext4_msg(sb, KERN_ERR, "Cannot change quota " "options when quota turned on"); return -1; } if (m->flags & MOPT_NOSUPPORT) { ext4_msg(sb, KERN_ERR, "%s option not supported", opt); } else if (token == Opt_commit) { if (arg == 0) arg = JBD2_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * arg; } else if (token == Opt_max_batch_time) { sbi->s_max_batch_time = arg; } else if (token == Opt_min_batch_time) { sbi->s_min_batch_time = arg; } else if (token == Opt_inode_readahead_blks) { if (arg && (arg > (1 << 30) || !is_power_of_2(arg))) { ext4_msg(sb, KERN_ERR, "EXT4-fs: inode_readahead_blks must be " "0 or a power of 2 smaller than 2^31"); return -1; } sbi->s_inode_readahead_blks = arg; } else if (token == Opt_init_itable) { set_opt(sb, INIT_INODE_TABLE); if (!args->from) arg = EXT4_DEF_LI_WAIT_MULT; sbi->s_li_wait_mult = arg; } else if (token == Opt_max_dir_size_kb) { sbi->s_max_dir_size_kb = arg; } else if (token == Opt_stripe) { sbi->s_stripe = arg; } else if (token == Opt_resuid) { uid = make_kuid(current_user_ns(), arg); if (!uid_valid(uid)) { ext4_msg(sb, KERN_ERR, "Invalid uid value %d", arg); return -1; } sbi->s_resuid = uid; } else if (token == Opt_resgid) { gid = make_kgid(current_user_ns(), arg); if (!gid_valid(gid)) { ext4_msg(sb, KERN_ERR, "Invalid gid value %d", arg); return -1; } sbi->s_resgid = gid; } else if (token == Opt_journal_dev) { if (is_remount) { ext4_msg(sb, KERN_ERR, "Cannot specify journal on remount"); return -1; } *journal_devnum = arg; } else if (token == Opt_journal_path) { char *journal_path; struct inode *journal_inode; struct path path; int error; if (is_remount) { ext4_msg(sb, KERN_ERR, "Cannot specify journal on remount"); return -1; } journal_path = match_strdup(&args[0]); if (!journal_path) { ext4_msg(sb, KERN_ERR, "error: could not dup " "journal device string"); return -1; } error = kern_path(journal_path, LOOKUP_FOLLOW, &path); if (error) { ext4_msg(sb, KERN_ERR, "error: could not find " "journal device path: error %d", error); kfree(journal_path); return -1; } journal_inode = d_inode(path.dentry); if (!S_ISBLK(journal_inode->i_mode)) { ext4_msg(sb, KERN_ERR, "error: journal path %s " "is not a block device", journal_path); path_put(&path); kfree(journal_path); return -1; } *journal_devnum = new_encode_dev(journal_inode->i_rdev); path_put(&path); kfree(journal_path); } else if (token == Opt_journal_ioprio) { if (arg > 7) { ext4_msg(sb, KERN_ERR, "Invalid journal IO priority" " (must be 0-7)"); return -1; } *journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg); } else if (token == Opt_test_dummy_encryption) { #ifdef CONFIG_EXT4_FS_ENCRYPTION sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION; ext4_msg(sb, KERN_WARNING, "Test dummy encryption mode enabled"); #else ext4_msg(sb, KERN_WARNING, "Test dummy encryption mount option ignored"); #endif } else if (m->flags & MOPT_DATAJ) { if (is_remount) { if (!sbi->s_journal) ext4_msg(sb, KERN_WARNING, "Remounting file system with no journal so ignoring journalled data option"); else if (test_opt(sb, DATA_FLAGS) != m->mount_opt) { ext4_msg(sb, KERN_ERR, "Cannot change data mode on remount"); return -1; } } else { clear_opt(sb, DATA_FLAGS); sbi->s_mount_opt |= m->mount_opt; } #ifdef CONFIG_QUOTA } else if (m->flags & MOPT_QFMT) { if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != m->mount_opt) { ext4_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return -1; } if (ext4_has_feature_quota(sb)) { ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " "when QUOTA feature is enabled"); return -1; } sbi->s_jquota_fmt = m->mount_opt; #endif } else if (token == Opt_dax) { #ifdef CONFIG_FS_DAX ext4_msg(sb, KERN_WARNING, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); sbi->s_mount_opt |= m->mount_opt; #else ext4_msg(sb, KERN_INFO, "dax option not supported"); return -1; #endif } else { if (!args->from) arg = 1; if (m->flags & MOPT_CLEAR) arg = !arg; else if (unlikely(!(m->flags & MOPT_SET))) { ext4_msg(sb, KERN_WARNING, "buggy handling of option %s", opt); WARN_ON(1); return -1; } if (arg != 0) sbi->s_mount_opt |= m->mount_opt; else sbi->s_mount_opt &= ~m->mount_opt; } return 1; } static int parse_options(char *options, struct super_block *sb, unsigned long *journal_devnum, unsigned int *journal_ioprio, int is_remount) { struct ext4_sb_info *sbi = EXT4_SB(sb); char *p; substring_t args[MAX_OPT_ARGS]; int token; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); if (handle_mount_opt(sb, p, token, args, journal_devnum, journal_ioprio, is_remount) < 0) return 0; } #ifdef CONFIG_QUOTA if (ext4_has_feature_quota(sb) && (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) { ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA " "feature is enabled"); return 0; } if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sb, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sb, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext4_msg(sb, KERN_ERR, "old and new quota " "format mixing"); return 0; } if (!sbi->s_jquota_fmt) { ext4_msg(sb, KERN_ERR, "journaled quota format " "not specified"); return 0; } } #endif if (test_opt(sb, DIOREAD_NOLOCK)) { int blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (blocksize < PAGE_CACHE_SIZE) { ext4_msg(sb, KERN_ERR, "can't mount with " "dioread_nolock if block size != PAGE_SIZE"); return 0; } } if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA && test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit " "in data=ordered mode"); return 0; } return 1; } static inline void ext4_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext4_sb_info *sbi = EXT4_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]); #endif } static const char *token2str(int token) { const struct match_token *t; for (t = tokens; t->token != Opt_err; t++) if (t->token == token && !strchr(t->pattern, '=')) break; return t->pattern; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, int nodefs) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int def_errors, def_mount_opt = nodefs ? 0 : sbi->s_def_mount_opt; const struct mount_opts *m; char sep = nodefs ? '\n' : ','; #define SEQ_OPTS_PUTS(str) seq_printf(seq, "%c" str, sep) #define SEQ_OPTS_PRINT(str, arg) seq_printf(seq, "%c" str, sep, arg) if (sbi->s_sb_block != 1) SEQ_OPTS_PRINT("sb=%llu", sbi->s_sb_block); for (m = ext4_mount_opts; m->token != Opt_err; m++) { int want_set = m->flags & MOPT_SET; if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || (m->flags & MOPT_CLEAR_ERR)) continue; if (!(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) continue; /* skip if same as the default */ if ((want_set && (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) || (!want_set && (sbi->s_mount_opt & m->mount_opt))) continue; /* select Opt_noFoo vs Opt_Foo */ SEQ_OPTS_PRINT("%s", token2str(m->token)); } if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) SEQ_OPTS_PRINT("resuid=%u", from_kuid_munged(&init_user_ns, sbi->s_resuid)); if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) SEQ_OPTS_PRINT("resgid=%u", from_kgid_munged(&init_user_ns, sbi->s_resgid)); def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); if (test_opt(sb, ERRORS_RO) && def_errors != EXT4_ERRORS_RO) SEQ_OPTS_PUTS("errors=remount-ro"); if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE) SEQ_OPTS_PUTS("errors=continue"); if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC) SEQ_OPTS_PUTS("errors=panic"); if (nodefs || sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) SEQ_OPTS_PRINT("commit=%lu", sbi->s_commit_interval / HZ); if (nodefs || sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); if (sb->s_flags & MS_I_VERSION) SEQ_OPTS_PUTS("i_version"); if (nodefs || sbi->s_stripe) SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ def_mount_opt)) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) SEQ_OPTS_PUTS("data=journal"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) SEQ_OPTS_PUTS("data=ordered"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) SEQ_OPTS_PUTS("data=writeback"); } if (nodefs || sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS) SEQ_OPTS_PRINT("inode_readahead_blks=%u", sbi->s_inode_readahead_blks); if (nodefs || (test_opt(sb, INIT_INODE_TABLE) && (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT))) SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult); if (nodefs || sbi->s_max_dir_size_kb) SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); ext4_show_quota_options(seq, sb); return 0; } static int ext4_show_options(struct seq_file *seq, struct dentry *root) { return _ext4_show_options(seq, root->d_sb, 0); } int ext4_seq_options_show(struct seq_file *seq, void *offset) { struct super_block *sb = seq->private; int rc; seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw"); rc = _ext4_show_options(seq, sb, 1); seq_puts(seq, "\n"); return rc; } static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, int read_only) { struct ext4_sb_info *sbi = EXT4_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) { ext4_msg(sb, KERN_ERR, "revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) goto done; if (!(sbi->s_mount_state & EXT4_VALID_FS)) ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if (sbi->s_mount_state & EXT4_ERROR_FS) ext4_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) ext4_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext4_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); if (!sbi->s_journal) es->s_state &= cpu_to_le16(~EXT4_VALID_FS); if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext4_update_dynamic_rev(sb); if (sbi->s_journal) ext4_set_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); done: if (test_opt(sb, DEBUG)) printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", sb->s_blocksize, sbi->s_groups_count, EXT4_BLOCKS_PER_GROUP(sb), EXT4_INODES_PER_GROUP(sb), sbi->s_mount_opt, sbi->s_mount_opt2); cleancache_init_fs(sb); return res; } int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct flex_groups *new_groups; int size; if (!sbi->s_log_groups_per_flex) return 0; size = ext4_flex_group(sbi, ngroup - 1) + 1; if (size <= sbi->s_flex_groups_allocated) return 0; size = roundup_pow_of_two(size * sizeof(struct flex_groups)); new_groups = ext4_kvzalloc(size, GFP_KERNEL); if (!new_groups) { ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", size / (int) sizeof(struct flex_groups)); return -ENOMEM; } if (sbi->s_flex_groups) { memcpy(new_groups, sbi->s_flex_groups, (sbi->s_flex_groups_allocated * sizeof(struct flex_groups))); kvfree(sbi->s_flex_groups); } sbi->s_flex_groups = new_groups; sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); return 0; } static int ext4_fill_flex_info(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; ext4_group_t flex_group; int i, err; sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) { sbi->s_log_groups_per_flex = 0; return 1; } err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); if (err) goto failed; for (i = 0; i < sbi->s_groups_count; i++) { gdp = ext4_get_group_desc(sb, i, NULL); flex_group = ext4_flex_group(sbi, i); atomic_add(ext4_free_inodes_count(sb, gdp), &sbi->s_flex_groups[flex_group].free_inodes); atomic64_add(ext4_free_group_clusters(sb, gdp), &sbi->s_flex_groups[flex_group].free_clusters); atomic_add(ext4_used_dirs_count(sb, gdp), &sbi->s_flex_groups[flex_group].used_dirs); } return 1; failed: return 0; } static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { int offset; __u16 crc = 0; __le32 le_group = cpu_to_le32(block_group); struct ext4_sb_info *sbi = EXT4_SB(sb); if (ext4_has_metadata_csum(sbi->s_sb)) { /* Use new metadata_csum algorithm */ __le16 save_csum; __u32 csum32; save_csum = gdp->bg_checksum; gdp->bg_checksum = 0; csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group, sizeof(le_group)); csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, sbi->s_desc_size); gdp->bg_checksum = save_csum; crc = csum32 & 0xFFFF; goto out; } /* old crc16 code */ if (!ext4_has_feature_gdt_csum(sb)) return 0; offset = offsetof(struct ext4_group_desc, bg_checksum); crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); crc = crc16(crc, (__u8 *)gdp, offset); offset += sizeof(gdp->bg_checksum); /* skip checksum */ /* for checksum of struct ext4_group_desc do the rest...*/ if (ext4_has_feature_64bit(sb) && offset < le16_to_cpu(sbi->s_es->s_desc_size)) crc = crc16(crc, (__u8 *)gdp + offset, le16_to_cpu(sbi->s_es->s_desc_size) - offset); out: return cpu_to_le16(crc); } int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { if (ext4_has_group_desc_csum(sb) && (gdp->bg_checksum != ext4_group_desc_csum(sb, block_group, gdp))) return 0; return 1; } void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, struct ext4_group_desc *gdp) { if (!ext4_has_group_desc_csum(sb)) return; gdp->bg_checksum = ext4_group_desc_csum(sb, block_group, gdp); } /* Called at mount-time, super-block is locked */ static int ext4_check_descriptors(struct super_block *sb, ext4_group_t *first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); ext4_fsblk_t last_block; ext4_fsblk_t block_bitmap; ext4_fsblk_t inode_bitmap; ext4_fsblk_t inode_table; int flexbg_flag = 0; ext4_group_t i, grp = sbi->s_groups_count; if (ext4_has_feature_flex_bg(sb)) flexbg_flag = 1; ext4_debug("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); if (i == sbi->s_groups_count - 1 || flexbg_flag) last_block = ext4_blocks_count(sbi->s_es) - 1; else last_block = first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1); if ((grp == sbi->s_groups_count) && !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) grp = i; block_bitmap = ext4_block_bitmap(sb, gdp); if (block_bitmap < first_block || block_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Block bitmap for group %u not in group " "(block %llu)!", i, block_bitmap); return 0; } inode_bitmap = ext4_inode_bitmap(sb, gdp); if (inode_bitmap < first_block || inode_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode bitmap for group %u not in group " "(block %llu)!", i, inode_bitmap); return 0; } inode_table = ext4_inode_table(sb, gdp); if (inode_table < first_block || inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode table for group %u not in group " "(block %llu)!", i, inode_table); return 0; } ext4_lock_group(sb, i); if (!ext4_group_desc_csum_verify(sb, i, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Checksum for group %u failed (%u!=%u)", i, le16_to_cpu(ext4_group_desc_csum(sb, i, gdp)), le16_to_cpu(gdp->bg_checksum)); if (!(sb->s_flags & MS_RDONLY)) { ext4_unlock_group(sb, i); return 0; } } ext4_unlock_group(sb, i); if (!flexbg_flag) first_block += EXT4_BLOCKS_PER_GROUP(sb); } if (NULL != first_not_zeroed) *first_not_zeroed = grp; return 1; } /* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext4_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext4_orphan_cleanup(struct super_block *sb, struct ext4_super_block *es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, skipping orphan cleanup"); return; } /* Check if feature set would not allow a r/w mount */ if (!ext4_feature_set_ok(sb, 0)) { ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { ext4_msg(sb, KERN_INFO, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; } jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (EXT4_SB(sb)->s_qf_names[i]) { int ret = ext4_quota_on_mount(sb, i); if (ret < 0) ext4_msg(sb, KERN_ERR, "Cannot turn on journaled " "quota: error %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: truncating inode %lu to %lld bytes", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %lld bytes\n", inode->i_ino, inode->i_size); mutex_lock(&inode->i_mutex); truncate_inode_pages(inode->i_mapping, inode->i_size); ext4_truncate(inode); mutex_unlock(&inode->i_mutex); nr_truncates++; } else { if (test_opt(sb, DEBUG)) ext4_msg(sb, KERN_DEBUG, "%s: deleting unreferenced inode %lu", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x) == 1) ? "" : "s" if (nr_orphans) ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < EXT4_MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal extent format file size. * Resulting logical blkno at s_maxbytes must fit in our on-disk * extent format containers, within a sector_t, and within i_blocks * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, * so that won't be a limiting factor. * * However there is other limiting factor. We do store extents in the form * of starting block and length, hence the resulting length of the extent * covering maximum file size must fit into on-disk format containers as * well. Given that length is always by 1 unit bigger than max unit (because * we count 0 as well) we have to lower the s_maxbytes by one fs block. * * Note, this does *not* consider any metadata overhead for vfs i_blocks. */ static loff_t ext4_max_size(int blkbits, int has_huge_files) { loff_t res; loff_t upper_limit = MAX_LFS_FILESIZE; /* small i_blocks in vfs inode? */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * CONFIG_LBDAF is not enabled implies the inode * i_block represent total blocks in 512 bytes * 32 == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (blkbits - 9); upper_limit <<= blkbits; } /* * 32-bit extent-start container, ee_block. We lower the maxbytes * by one fs block, so ee_len can cover the extent of maximum file * size */ res = (1LL << 32) - 1; res <<= blkbits; /* Sanity check against vm- & vfs- imposed limits */ if (res > upper_limit) res = upper_limit; return res; } /* * Maximal bitmap file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^48 sector limit. */ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) { loff_t res = EXT4_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a dense, block * mapped file such that the file's total number of 512-byte sectors, * including data and all indirect blocks, does not exceed (2^48 - 1). * * __u32 i_blocks_lo and _u16 i_blocks_high represent the total * number of 512-byte sectors of the file. */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* * !has_huge_files or CONFIG_LBDAF not enabled implies that * the inode i_block field represents total file blocks in * 2^32 512-byte sectors == size of vfs inode i_blocks * 8 */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); } else { /* * We use 48 bit ext4_inode i_blocks * With EXT4_HUGE_FILE_FL set the i_blocks * represent total number of blocks in * file system block size */ upper_limit = (1LL << 48) - 1; } /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext4_fsblk_t descriptor_loc(struct super_block *sb, ext4_fsblk_t logical_sb_block, int nr) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!ext4_has_feature_meta_bg(sb) || nr < first_meta_bg) return logical_sb_block + nr + 1; bg = sbi->s_desc_per_block * nr; if (ext4_bg_has_super(sb, bg)) has_super = 1; /* * If we have a meta_bg fs with 1k blocks, group 0's GDT is at * block 2, not 1. If s_first_data_block == 0 (bigalloc is enabled * on modern mke2fs or blksize > 1k on older mke2fs) then we must * compensate. */ if (sb->s_blocksize == 1024 && nr == 0 && le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) == 0) has_super++; return (has_super + ext4_group_first_block_no(sb, bg)); } /** * ext4_get_stripe_size: Get the stripe size. * @sbi: In memory super block info * * If we have specified it via mount option, then * use the mount option value. If the value specified at mount time is * greater than the blocks per group use the super block value. * If the super block value is greater than blocks per group return 0. * Allocator needs it be less than blocks per group. * */ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) { unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride); unsigned long stripe_width = le32_to_cpu(sbi->s_es->s_raid_stripe_width); int ret; if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) ret = sbi->s_stripe; else if (stripe_width <= sbi->s_blocks_per_group) ret = stripe_width; else if (stride <= sbi->s_blocks_per_group) ret = stride; else ret = 0; /* * If the stripe width is 1, this makes no sense and * we set it to 0 to turn off stripe handling code. */ if (ret <= 1) ret = 0; return ret; } /* * Check whether this filesystem can be mounted based on * the features present and the RDONLY/RDWR mount requested. * Returns 1 if this filesystem can be mounted as requested, * 0 if it cannot be. */ static int ext4_feature_set_ok(struct super_block *sb, int readonly) { if (ext4_has_unknown_ext4_incompat_features(sb)) { ext4_msg(sb, KERN_ERR, "Couldn't mount because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & ~EXT4_FEATURE_INCOMPAT_SUPP)); return 0; } if (readonly) return 1; if (ext4_has_feature_readonly(sb)) { ext4_msg(sb, KERN_INFO, "filesystem is read-only"); sb->s_flags |= MS_RDONLY; return 1; } /* Check that feature set is OK for a read-write mount */ if (ext4_has_unknown_ext4_ro_compat_features(sb)) { ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " "unsupported optional features (%x)", (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & ~EXT4_FEATURE_RO_COMPAT_SUPP)); return 0; } /* * Large file size enabled file system can only be mounted * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF */ if (ext4_has_feature_huge_file(sb)) { if (sizeof(blkcnt_t) < sizeof(u64)) { ext4_msg(sb, KERN_ERR, "Filesystem with huge files " "cannot be mounted RDWR without " "CONFIG_LBDAF"); return 0; } } if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) { ext4_msg(sb, KERN_ERR, "Can't support bigalloc feature without " "extents feature\n"); return 0; } #ifndef CONFIG_QUOTA if (ext4_has_feature_quota(sb) && !readonly) { ext4_msg(sb, KERN_ERR, "Filesystem with quota feature cannot be mounted RDWR " "without CONFIG_QUOTA"); return 0; } #endif /* CONFIG_QUOTA */ return 1; } /* * This function is called once a day if we have errors logged * on the file system */ static void print_daily_error_info(unsigned long arg) { struct super_block *sb = (struct super_block *) arg; struct ext4_sb_info *sbi; struct ext4_super_block *es; sbi = EXT4_SB(sb); es = sbi->s_es; if (es->s_error_count) /* fsck newer than v1.41.13 is needed to clean this condition. */ ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, le32_to_cpu(es->s_first_error_line)); if (es->s_first_error_ino) printk(": inode %u", le32_to_cpu(es->s_first_error_ino)); if (es->s_first_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_first_error_block)); printk("\n"); } if (es->s_last_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, le32_to_cpu(es->s_last_error_line)); if (es->s_last_error_ino) printk(": inode %u", le32_to_cpu(es->s_last_error_ino)); if (es->s_last_error_block) printk(": block %llu", (unsigned long long) le64_to_cpu(es->s_last_error_block)); printk("\n"); } mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ } /* Find next suitable group and run ext4_init_inode_table */ static int ext4_run_li_request(struct ext4_li_request *elr) { struct ext4_group_desc *gdp = NULL; ext4_group_t group, ngroups; struct super_block *sb; unsigned long timeout = 0; int ret = 0; sb = elr->lr_super; ngroups = EXT4_SB(sb)->s_groups_count; sb_start_write(sb); for (group = elr->lr_next_group; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { ret = 1; break; } if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } if (group >= ngroups) ret = 1; if (!ret) { timeout = jiffies; ret = ext4_init_inode_table(sb, group, elr->lr_timeout ? 0 : 1); if (elr->lr_timeout == 0) { timeout = (jiffies - timeout) * elr->lr_sbi->s_li_wait_mult; elr->lr_timeout = timeout; } elr->lr_next_sched = jiffies + elr->lr_timeout; elr->lr_next_group = group + 1; } sb_end_write(sb); return ret; } /* * Remove lr_request from the list_request and free the * request structure. Should be called with li_list_mtx held */ static void ext4_remove_li_request(struct ext4_li_request *elr) { struct ext4_sb_info *sbi; if (!elr) return; sbi = elr->lr_sbi; list_del(&elr->lr_request); sbi->s_li_request = NULL; kfree(elr); } static void ext4_unregister_li_request(struct super_block *sb) { mutex_lock(&ext4_li_mtx); if (!ext4_li_info) { mutex_unlock(&ext4_li_mtx); return; } mutex_lock(&ext4_li_info->li_list_mtx); ext4_remove_li_request(EXT4_SB(sb)->s_li_request); mutex_unlock(&ext4_li_info->li_list_mtx); mutex_unlock(&ext4_li_mtx); } static struct task_struct *ext4_lazyinit_task; /* * This is the function where ext4lazyinit thread lives. It walks * through the request list searching for next scheduled filesystem. * When such a fs is found, run the lazy initialization request * (ext4_rn_li_request) and keep track of the time spend in this * function. Based on that time we compute next schedule time of * the request. When walking through the list is complete, compute * next waking time and put itself into sleep. */ static int ext4_lazyinit_thread(void *arg) { struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; struct list_head *pos, *n; struct ext4_li_request *elr; unsigned long next_wakeup, cur; BUG_ON(NULL == eli); cont_thread: while (true) { next_wakeup = MAX_JIFFY_OFFSET; mutex_lock(&eli->li_list_mtx); if (list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); goto exit_thread; } list_for_each_safe(pos, n, &eli->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); if (time_after_eq(jiffies, elr->lr_next_sched)) { if (ext4_run_li_request(elr) != 0) { /* error, remove the lazy_init job */ ext4_remove_li_request(elr); continue; } } if (time_before(elr->lr_next_sched, next_wakeup)) next_wakeup = elr->lr_next_sched; } mutex_unlock(&eli->li_list_mtx); try_to_freeze(); cur = jiffies; if ((time_after_eq(cur, next_wakeup)) || (MAX_JIFFY_OFFSET == next_wakeup)) { cond_resched(); continue; } schedule_timeout_interruptible(next_wakeup - cur); if (kthread_should_stop()) { ext4_clear_request_list(); goto exit_thread; } } exit_thread: /* * It looks like the request list is empty, but we need * to check it under the li_list_mtx lock, to prevent any * additions into it, and of course we should lock ext4_li_mtx * to atomically free the list and ext4_li_info, because at * this point another ext4 filesystem could be registering * new one. */ mutex_lock(&ext4_li_mtx); mutex_lock(&eli->li_list_mtx); if (!list_empty(&eli->li_request_list)) { mutex_unlock(&eli->li_list_mtx); mutex_unlock(&ext4_li_mtx); goto cont_thread; } mutex_unlock(&eli->li_list_mtx); kfree(ext4_li_info); ext4_li_info = NULL; mutex_unlock(&ext4_li_mtx); return 0; } static void ext4_clear_request_list(void) { struct list_head *pos, *n; struct ext4_li_request *elr; mutex_lock(&ext4_li_info->li_list_mtx); list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { elr = list_entry(pos, struct ext4_li_request, lr_request); ext4_remove_li_request(elr); } mutex_unlock(&ext4_li_info->li_list_mtx); } static int ext4_run_lazyinit_thread(void) { ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); if (IS_ERR(ext4_lazyinit_task)) { int err = PTR_ERR(ext4_lazyinit_task); ext4_clear_request_list(); kfree(ext4_li_info); ext4_li_info = NULL; printk(KERN_CRIT "EXT4-fs: error %d creating inode table " "initialization thread\n", err); return err; } ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; return 0; } /* * Check whether it make sense to run itable init. thread or not. * If there is at least one uninitialized inode table, return * corresponding group number, else the loop goes through all * groups and return total number of groups. */ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) { ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; struct ext4_group_desc *gdp = NULL; for (group = 0; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) continue; if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) break; } return group; } static int ext4_li_info_new(void) { struct ext4_lazy_init *eli = NULL; eli = kzalloc(sizeof(*eli), GFP_KERNEL); if (!eli) return -ENOMEM; INIT_LIST_HEAD(&eli->li_request_list); mutex_init(&eli->li_list_mtx); eli->li_state |= EXT4_LAZYINIT_QUIT; ext4_li_info = eli; return 0; } static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, ext4_group_t start) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; elr = kzalloc(sizeof(*elr), GFP_KERNEL); if (!elr) return NULL; elr->lr_super = sb; elr->lr_sbi = sbi; elr->lr_next_group = start; /* * Randomize first schedule time of the request to * spread the inode table initialization requests * better. */ elr->lr_next_sched = jiffies + (prandom_u32() % (EXT4_DEF_LI_MAX_START_DELAY * HZ)); return elr; } int ext4_register_li_request(struct super_block *sb, ext4_group_t first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr = NULL; ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; int ret = 0; mutex_lock(&ext4_li_mtx); if (sbi->s_li_request != NULL) { /* * Reset timeout so it can be computed again, because * s_li_wait_mult might have changed. */ sbi->s_li_request->lr_timeout = 0; goto out; } if (first_not_zeroed == ngroups || (sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) goto out; elr = ext4_li_request_new(sb, first_not_zeroed); if (!elr) { ret = -ENOMEM; goto out; } if (NULL == ext4_li_info) { ret = ext4_li_info_new(); if (ret) goto out; } mutex_lock(&ext4_li_info->li_list_mtx); list_add(&elr->lr_request, &ext4_li_info->li_request_list); mutex_unlock(&ext4_li_info->li_list_mtx); sbi->s_li_request = elr; /* * set elr to NULL here since it has been inserted to * the request_list and the removal and free of it is * handled by ext4_clear_request_list from now on. */ elr = NULL; if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { ret = ext4_run_lazyinit_thread(); if (ret) goto out; } out: mutex_unlock(&ext4_li_mtx); if (ret) kfree(elr); return ret; } /* * We do not need to lock anything since this is called on * module unload. */ static void ext4_destroy_lazyinit_thread(void) { /* * If thread exited earlier * there's nothing to be done. */ if (!ext4_li_info || !ext4_lazyinit_task) return; kthread_stop(ext4_lazyinit_task); } static int set_journal_csum_feature_set(struct super_block *sb) { int ret = 1; int compat, incompat; struct ext4_sb_info *sbi = EXT4_SB(sb); if (ext4_has_metadata_csum(sb)) { /* journal checksum v3 */ compat = 0; incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; } else { /* journal checksum v1 */ compat = JBD2_FEATURE_COMPAT_CHECKSUM; incompat = 0; } jbd2_journal_clear_features(sbi->s_journal, JBD2_FEATURE_COMPAT_CHECKSUM, 0, JBD2_FEATURE_INCOMPAT_CSUM_V3 | JBD2_FEATURE_INCOMPAT_CSUM_V2); if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ret = jbd2_journal_set_features(sbi->s_journal, compat, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | incompat); } else if (test_opt(sb, JOURNAL_CHECKSUM)) { ret = jbd2_journal_set_features(sbi->s_journal, compat, 0, incompat); jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } else { jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); } return ret; } /* * Note: calculating the overhead so we can be compatible with * historical BSD practice is quite difficult in the face of * clusters/bigalloc. This is because multiple metadata blocks from * different block group can end up in the same allocation cluster. * Calculating the exact overhead in the face of clustered allocation * requires either O(all block bitmaps) in memory or O(number of block * groups**2) in time. We will still calculate the superblock for * older file systems --- and if we come across with a bigalloc file * system with zero in s_overhead_clusters the estimate will be close to * correct especially for very large cluster sizes --- but for newer * file systems, it's better to calculate this figure once at mkfs * time, and store it in the superblock. If the superblock value is * present (even for non-bigalloc file systems), we will use it. */ static int count_overhead(struct super_block *sb, ext4_group_t grp, char *buf) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp; ext4_fsblk_t first_block, last_block, b; ext4_group_t i, ngroups = ext4_get_groups_count(sb); int s, j, count = 0; if (!ext4_has_feature_bigalloc(sb)) return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) + sbi->s_itb_per_group + 2); first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + (grp * EXT4_BLOCKS_PER_GROUP(sb)); last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; for (i = 0; i < ngroups; i++) { gdp = ext4_get_group_desc(sb, i, NULL); b = ext4_block_bitmap(sb, gdp); if (b >= first_block && b <= last_block) { ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); count++; } b = ext4_inode_bitmap(sb, gdp); if (b >= first_block && b <= last_block) { ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf); count++; } b = ext4_inode_table(sb, gdp); if (b >= first_block && b + sbi->s_itb_per_group <= last_block) for (j = 0; j < sbi->s_itb_per_group; j++, b++) { int c = EXT4_B2C(sbi, b - first_block); ext4_set_bit(c, buf); count++; } if (i != grp) continue; s = 0; if (ext4_bg_has_super(sb, grp)) { ext4_set_bit(s++, buf); count++; } for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { ext4_set_bit(EXT4_B2C(sbi, s++), buf); count++; } } if (!count) return 0; return EXT4_CLUSTERS_PER_GROUP(sb) - ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8); } /* * Compute the overhead and stash it in sbi->s_overhead */ int ext4_calculate_overhead(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_group_t i, ngroups = ext4_get_groups_count(sb); ext4_fsblk_t overhead = 0; char *buf = (char *) get_zeroed_page(GFP_NOFS); if (!buf) return -ENOMEM; /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are overhead */ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block)); /* * Add the overhead found in each block group */ for (i = 0; i < ngroups; i++) { int blks; blks = count_overhead(sb, i, buf); overhead += blks; if (blks) memset(buf, 0, PAGE_SIZE); cond_resched(); } /* Add the internal journal blocks as well */ if (sbi->s_journal && !sbi->journal_bdev) overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); sbi->s_overhead = overhead; smp_wmb(); free_page((unsigned long) buf); return 0; } static void ext4_set_resv_clusters(struct super_block *sb) { ext4_fsblk_t resv_clusters; struct ext4_sb_info *sbi = EXT4_SB(sb); /* * There's no need to reserve anything when we aren't using extents. * The space estimates are exact, there are no unwritten extents, * hole punching doesn't need new metadata... This is needed especially * to keep ext2/3 backward compatibility. */ if (!ext4_has_feature_extents(sb)) return; /* * By default we reserve 2% or 4096 clusters, whichever is smaller. * This should cover the situations where we can not afford to run * out of space like for example punch hole, or converting * unwritten extents in delalloc path. In most cases such * allocation would require 1, or 2 blocks, higher numbers are * very rare. */ resv_clusters = (ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits); do_div(resv_clusters, 50); resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096); atomic64_set(&sbi->s_resv_clusters, resv_clusters); } static int ext4_fill_super(struct super_block *sb, void *data, int silent) { char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; struct ext4_sb_info *sbi; ext4_fsblk_t block; ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t logical_sb_block; unsigned long offset = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; const char *descr; int ret = -ENOMEM; int blocksize, clustersize; unsigned int db_count; unsigned int i; int needs_recovery, has_huge_files, has_bigalloc; __u64 blocks_count; int err = 0; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; ext4_group_t first_not_zeroed; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto out_free_orig; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); goto out_free_orig; } sb->s_fs_info = sbi; sbi->s_sb = sb; sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; sbi->s_sb_block = sb_block; if (sb->s_bdev->bd_part) sbi->s_sectors_written_start = part_stat_read(sb->s_bdev->bd_part, sectors[1]); /* Cleanup superblock name */ strreplace(sb->s_id, '/', '!'); /* -EINVAL is default */ ret = -EINVAL; blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); if (!blocksize) { ext4_msg(sb, KERN_ERR, "unable to set blocksize"); goto out_fail; } /* * The ext4 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT4_MIN_BLOCK_SIZE) { logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); } else { logical_sb_block = sb_block; } if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) { ext4_msg(sb, KERN_ERR, "unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext4 macro-instructions depend on its value */ es = (struct ext4_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT4_SUPER_MAGIC) goto cantfind_ext4; sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); /* Warn if metadata_csum and gdt_csum are both set. */ if (ext4_has_feature_metadata_csum(sb) && ext4_has_feature_gdt_csum(sb)) ext4_warning(sb, "metadata_csum and uninit_bg are " "redundant flags; please run fsck."); /* Check for a known checksum algorithm */ if (!ext4_verify_csum_type(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "unknown checksum algorithm."); silent = 1; goto cantfind_ext4; } /* Load the checksum driver */ if (ext4_has_feature_metadata_csum(sb)) { sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); ret = PTR_ERR(sbi->s_chksum_driver); sbi->s_chksum_driver = NULL; goto failed_mount; } } /* Check superblock checksum */ if (!ext4_superblock_csum_verify(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "invalid superblock checksum. Run e2fsck?"); silent = 1; ret = -EFSBADCRC; goto cantfind_ext4; } /* Precompute checksum seed for all metadata */ if (ext4_has_feature_csum_seed(sb)) sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); else if (ext4_has_metadata_csum(sb)) sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, sizeof(es->s_uuid)); /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); set_opt(sb, INIT_INODE_TABLE); if (def_mount_opts & EXT4_DEFM_DEBUG) set_opt(sb, DEBUG); if (def_mount_opts & EXT4_DEFM_BSDGROUPS) set_opt(sb, GRPID); if (def_mount_opts & EXT4_DEFM_UID16) set_opt(sb, NO_UID32); /* xattr user namespace & acls are now defaulted on */ set_opt(sb, XATTR_USER); #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif /* don't forget to enable journal_csum when metadata_csum is enabled. */ if (ext4_has_metadata_csum(sb)) set_opt(sb, JOURNAL_CHECKSUM); if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) set_opt(sb, JOURNAL_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) set_opt(sb, ORDERED_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) set_opt(sb, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) set_opt(sb, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) set_opt(sb, ERRORS_CONT); else set_opt(sb, ERRORS_RO); /* block_validity enabled by default; disable with noblock_validity */ set_opt(sb, BLOCK_VALIDITY); if (def_mount_opts & EXT4_DEFM_DISCARD) set_opt(sb, DISCARD); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) set_opt(sb, BARRIER); /* * enable delayed allocation by default * Use -o nodelalloc to turn it off */ if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) set_opt(sb, DELALLOC); /* * set default s_li_wait_mult for lazyinit, for the case there is * no mount option specified. */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, &journal_devnum, &journal_ioprio, 0)) { ext4_msg(sb, KERN_WARNING, "failed to parse options in superblock: %s", sbi->s_es->s_mount_opts); } sbi->s_def_mount_opt = sbi->s_mount_opt; if (!parse_options((char *) data, sb, &journal_devnum, &journal_ioprio, 0)) goto failed_mount; if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { printk_once(KERN_WARNING "EXT4-fs: Warning: mounting " "with data=journal disables delayed " "allocation and O_DIRECT support!\n"); if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dioread_nolock"); goto failed_mount; } if (test_opt(sb, DAX)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); goto failed_mount; } if (test_opt(sb, DELALLOC)) clear_opt(sb, DELALLOC); } else { sb->s_iflags |= SB_I_CGROUPWB; } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && (ext4_has_compat_features(sb) || ext4_has_ro_compat_features(sb) || ext4_has_incompat_features(sb))) ext4_msg(sb, KERN_WARNING, "feature flags set on rev 0 fs, " "running e2fsck is recommended"); if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { set_opt2(sb, HURD_COMPAT); if (ext4_has_feature_64bit(sb)) { ext4_msg(sb, KERN_ERR, "The Hurd can't support 64-bit file systems"); goto failed_mount; } } if (IS_EXT2_SB(sb)) { if (ext2_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext2 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " "to feature incompatibilities"); goto failed_mount; } } if (IS_EXT3_SB(sb)) { if (ext3_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext3 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " "to feature incompatibilities"); goto failed_mount; } } /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) goto failed_mount; blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, "Unsupported filesystem blocksize %d", blocksize); goto failed_mount; } if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { if (blocksize != PAGE_SIZE) { ext4_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); goto failed_mount; } if (!sb->s_bdev->bd_disk->fops->direct_access) { ext4_msg(sb, KERN_ERR, "error: device does not support dax"); goto failed_mount; } } if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", es->s_encryption_level); goto failed_mount; } if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { ext4_msg(sb, KERN_ERR, "bad block size %d", blocksize); goto failed_mount; } brelse(bh); logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); bh = sb_bread_unmovable(sb, logical_sb_block); if (!bh) { ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext4_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); goto failed_mount; } } has_huge_files = ext4_has_feature_huge_file(sb); sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext4_msg(sb, KERN_ERR, "unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2); } sbi->s_desc_size = le16_to_cpu(es->s_desc_size); if (ext4_has_feature_64bit(sb)) { if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || sbi->s_desc_size > EXT4_MAX_DESC_SIZE || !is_power_of_2(sbi->s_desc_size)) { ext4_msg(sb, KERN_ERR, "unsupported descriptor size %lu", sbi->s_desc_size); goto failed_mount; } } else sbi->s_desc_size = EXT4_MIN_DESC_SIZE; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) goto cantfind_ext4; sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext4; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); for (i = 0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; if (ext4_has_feature_dir_index(sb)) { i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ if (!(sb->s_flags & MS_RDONLY)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else if (!(sb->s_flags & MS_RDONLY)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } } /* Handle clustersize */ clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); has_bigalloc = ext4_has_feature_bigalloc(sb); if (has_bigalloc) { if (clustersize < blocksize) { ext4_msg(sb, KERN_ERR, "cluster size (%d) smaller than " "block size (%d)", clustersize, blocksize); goto failed_mount; } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group); if (sbi->s_clusters_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu", sbi->s_clusters_per_group); goto failed_mount; } if (sbi->s_blocks_per_group != (sbi->s_clusters_per_group * (clustersize / blocksize))) { ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " "clusters per group (%lu) inconsistent", sbi->s_blocks_per_group, sbi->s_clusters_per_group); goto failed_mount; } } else { if (clustersize != blocksize) { ext4_warning(sb, "fragment/cluster size (%d) != " "block size (%d)", clustersize, blocksize); clustersize = blocksize; } if (sbi->s_blocks_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } sbi->s_clusters_per_group = sbi->s_blocks_per_group; sbi->s_cluster_bits = 0; } sbi->s_cluster_ratio = clustersize / blocksize; if (sbi->s_inodes_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } /* Do we have standard group size of clustersize * 8 blocks ? */ if (sbi->s_blocks_per_group == clustersize << 3) set_opt2(sb, STD_GROUP_SIZE); /* * Test whether we have more sectors than will fit in sector_t, * and whether the max offset is addressable by the page cache. */ err = generic_check_addressable(sb->s_blocksize_bits, ext4_blocks_count(es)); if (err) { ext4_msg(sb, KERN_ERR, "filesystem" " too large to mount safely on this system"); if (sizeof(sector_t) < 8) ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); goto failed_mount; } if (EXT4_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext4; /* check blocks count against device size */ blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (blocks_count && ext4_blocks_count(es) > blocks_count) { ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " "exceeds size of device (%llu blocks)", ext4_blocks_count(es), blocks_count); goto failed_mount; } /* * It makes no sense for the first data block to be beyond the end * of the filesystem. */ if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { ext4_msg(sb, KERN_WARNING, "bad geometry: first data " "block %u is beyond end of filesystem (%llu)", le32_to_cpu(es->s_first_data_block), ext4_blocks_count(es)); goto failed_mount; } blocks_count = (ext4_blocks_count(es) - le32_to_cpu(es->s_first_data_block) + EXT4_BLOCKS_PER_GROUP(sb) - 1); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { ext4_msg(sb, KERN_WARNING, "groups count too large: %u " "(block count %llu, first data block %u, " "blocks per group %lu)", sbi->s_groups_count, ext4_blocks_count(es), le32_to_cpu(es->s_first_data_block), EXT4_BLOCKS_PER_GROUP(sb)); goto failed_mount; } sbi->s_groups_count = blocks_count; sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); sbi->s_group_desc = ext4_kvmalloc(db_count * sizeof(struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); sbi->s_group_desc[i] = sb_bread_unmovable(sb, block); if (!sbi->s_group_desc[i]) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext4_check_descriptors(sb, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); ret = -EFSCORRUPTED; goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); setup_timer(&sbi->s_err_report, print_daily_error_info, (unsigned long) sb); /* Register extent status tree shrinker */ if (ext4_es_register_shrinker(sbi)) goto failed_mount3; sbi->s_stripe = ext4_get_stripe_size(sbi); sbi->s_extent_max_zeroout_kb = 32; /* * set up enough so that it can read an inode */ sb->s_op = &ext4_sops; sb->s_export_op = &ext4_export_ops; sb->s_xattr = ext4_xattr_handlers; #ifdef CONFIG_QUOTA sb->dq_op = &ext4_quota_operations; if (ext4_has_feature_quota(sb)) sb->s_qcop = &dquot_quotactl_sysfile_ops; else sb->s_qcop = &ext4_qctl_operations; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || ext4_has_feature_journal_needs_recovery(sb)); if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) goto failed_mount3a; /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { if (ext4_load_journal(sb, es, journal_devnum)) goto failed_mount3a; } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && ext4_has_feature_journal_needs_recovery(sb)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); goto failed_mount_wq; } else { /* Nojournal mode, all journal mount options are illegal */ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_checksum, fs mounted w/o journal"); goto failed_mount_wq; } if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_async_commit, fs mounted w/o journal"); goto failed_mount_wq; } if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { ext4_msg(sb, KERN_ERR, "can't mount with " "commit=%lu, fs mounted w/o journal", sbi->s_commit_interval / HZ); goto failed_mount_wq; } if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { ext4_msg(sb, KERN_ERR, "can't mount with " "data=, fs mounted w/o journal"); goto failed_mount_wq; } sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, DATA_FLAGS); sbi->s_journal = NULL; needs_recovery = 0; goto no_journal; } if (ext4_has_feature_64bit(sb) && !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_64BIT)) { ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); goto failed_mount_wq; } if (!set_journal_csum_feature_set(sb)) { ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " "feature set"); goto failed_mount_wq; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal * capabilities: ORDERED_DATA if the journal can * cope, else JOURNAL_DATA */ if (jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) set_opt(sb, ORDERED_DATA); else set_opt(sb, JOURNAL_DATA); break; case EXT4_MOUNT_ORDERED_DATA: case EXT4_MOUNT_WRITEBACK_DATA: if (!jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { ext4_msg(sb, KERN_ERR, "Journal does not support " "requested data journaling mode"); goto failed_mount_wq; } default: break; } set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; no_journal: if (ext4_mballoc_ready) { sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id); if (!sbi->s_mb_cache) { ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache"); goto failed_mount_wq; } } if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && (blocksize != PAGE_CACHE_SIZE)) { ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs encryption"); goto failed_mount_wq; } if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) && !ext4_has_feature_encrypt(sb)) { ext4_set_feature_encrypt(sb); ext4_commit_super(sb, 1); } /* * Get the # of file system overhead blocks from the * superblock if present. */ if (es->s_overhead_clusters) sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); else { err = ext4_calculate_overhead(sb); if (err) goto failed_mount_wq; } /* * The maximum number of concurrent works can be high and * concurrency isn't really necessary. Limit it to 1. */ EXT4_SB(sb)->rsv_conversion_wq = alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!EXT4_SB(sb)->rsv_conversion_wq) { printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); ret = -ENOMEM; goto failed_mount4; } /* * The jbd2_journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext4_iget(sb, EXT4_ROOT_INO); if (IS_ERR(root)) { ext4_msg(sb, KERN_ERR, "get root inode failed"); ret = PTR_ERR(root); root = NULL; goto failed_mount4; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); iput(root); goto failed_mount4; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext4_msg(sb, KERN_ERR, "get root dentry failed"); ret = -ENOMEM; goto failed_mount4; } if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; /* determine the minimum size of new large inodes, if present */ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; if (ext4_has_feature_extra_isize(sb)) { if (sbi->s_want_extra_isize < le16_to_cpu(es->s_want_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_want_extra_isize); if (sbi->s_want_extra_isize < le16_to_cpu(es->s_min_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_min_extra_isize); } } /* Check if enough inode space is available */ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > sbi->s_inode_size) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; ext4_msg(sb, KERN_INFO, "required extra inode space not" "available"); } ext4_set_resv_clusters(sb); err = ext4_setup_system_zone(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize system " "zone (%d)", err); goto failed_mount4a; } ext4_ext_init(sb); err = ext4_mb_init(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", err); goto failed_mount5; } block = ext4_count_free_clusters(sb); ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); err = percpu_counter_init(&sbi->s_freeclusters_counter, block, GFP_KERNEL); if (!err) { unsigned long freei = ext4_count_free_inodes(sb); sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } if (!err) err = percpu_counter_init(&sbi->s_dirs_counter, ext4_count_dirs(sb), GFP_KERNEL); if (!err) err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, GFP_KERNEL); if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); goto failed_mount6; } if (ext4_has_feature_flex_bg(sb)) if (!ext4_fill_flex_info(sb)) { ext4_msg(sb, KERN_ERR, "unable to initialize " "flex_bg meta info!"); goto failed_mount6; } err = ext4_register_li_request(sb, first_not_zeroed); if (err) goto failed_mount6; err = ext4_register_sysfs(sb); if (err) goto failed_mount7; #ifdef CONFIG_QUOTA /* Enable quota usage during mount. */ if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) { err = ext4_enable_quotas(sb); if (err) goto failed_mount8; } #endif /* CONFIG_QUOTA */ EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); ext4_mark_recovery_complete(sb, es); } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) descr = " journalled data mode"; else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) descr = " ordered data mode"; else descr = " writeback data mode"; } else descr = "out journal"; if (test_opt(sb, DISCARD)) { struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) ext4_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); } if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, *sbi->s_es->s_mount_opts ? "; " : "", orig_data); if (es->s_error_count) mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); kfree(orig_data); return 0; cantfind_ext4: if (!silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; #ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); #endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: ext4_mb_release(sb); if (sbi->s_flex_groups) kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); failed_mount5: ext4_ext_release(sb); ext4_release_system_zone(sb); failed_mount4a: dput(sb->s_root); sb->s_root = NULL; failed_mount4: ext4_msg(sb, KERN_ERR, "mount failed"); if (EXT4_SB(sb)->rsv_conversion_wq) destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); failed_mount_wq: if (sbi->s_journal) { jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } failed_mount3a: ext4_es_unregister_shrinker(sbi); failed_mount3: del_timer_sync(&sbi->s_err_report); if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kvfree(sbi->s_group_desc); failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); #ifdef CONFIG_QUOTA for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext4_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); out_free_orig: kfree(orig_data); return err ? err : ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext4_sb_info *sbi = EXT4_SB(sb); journal->j_commit_interval = sbi->s_commit_interval; journal->j_min_batch_time = sbi->s_min_batch_time; journal->j_max_batch_time = sbi->s_max_batch_time; write_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JBD2_BARRIER; else journal->j_flags &= ~JBD2_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR; write_unlock(&journal->j_state_lock); } static journal_t *ext4_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; BUG_ON(!ext4_has_feature_journal(sb)); /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext4_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext4_msg(sb, KERN_ERR, "no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext4_msg(sb, KERN_ERR, "journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %lld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext4_msg(sb, KERN_ERR, "invalid journal inode"); iput(journal_inode); return NULL; } journal = jbd2_journal_init_inode(journal_inode); if (!journal) { ext4_msg(sb, KERN_ERR, "Could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext4_init_journal_params(sb, journal); return journal; } static journal_t *ext4_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head *bh; journal_t *journal; ext4_fsblk_t start; ext4_fsblk_t len; int hblock, blocksize; ext4_fsblk_t sb_block; unsigned long offset; struct ext4_super_block *es; struct block_device *bdev; BUG_ON(!ext4_has_feature_journal(sb)); bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext4_msg(sb, KERN_ERR, "blocksize too small for journal device"); goto out_bdev; } sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; offset = EXT4_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext4_msg(sb, KERN_ERR, "couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext4_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext4_msg(sb, KERN_ERR, "external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if ((le32_to_cpu(es->s_feature_ro_compat) & EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && es->s_checksum != ext4_superblock_csum(sb, es)) { ext4_msg(sb, KERN_ERR, "external journal has " "corrupt superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext4_msg(sb, KERN_ERR, "journal UUID does not match"); brelse(bh); goto out_bdev; } len = ext4_blocks_count(es); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = jbd2_journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext4_msg(sb, KERN_ERR, "failed to create device journal"); goto out_bdev; } journal->j_private = sb; ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); wait_on_buffer(journal->j_sb_buffer); if (!buffer_uptodate(journal->j_sb_buffer)) { ext4_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext4_msg(sb, KERN_ERR, "External journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT4_SB(sb)->journal_bdev = bdev; ext4_init_journal_params(sb, journal); return journal; out_journal: jbd2_journal_destroy(journal); out_bdev: ext4_blkdev_put(bdev); return NULL; } static int ext4_load_journal(struct super_block *sb, struct ext4_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; BUG_ON(!ext4_has_feature_journal(sb)); if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext4_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (ext4_has_feature_journal_needs_recovery(sb)) { if (sb->s_flags & MS_RDONLY) { ext4_msg(sb, KERN_INFO, "INFO: recovery " "required on readonly filesystem"); if (really_read_only) { ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed"); return -EROFS; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } if (journal_inum && journal_dev) { ext4_msg(sb, KERN_ERR, "filesystem has both journal " "and inode journals!"); return -EINVAL; } if (journal_inum) { if (!(journal = ext4_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext4_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); if (!ext4_has_feature_journal_needs_recovery(sb)) err = jbd2_journal_wipe(journal, !really_read_only); if (!err) { char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL); if (save) memcpy(save, ((char *) es) + EXT4_S_ERR_START, EXT4_S_ERR_LEN); err = jbd2_journal_load(journal); if (save) memcpy(((char *) es) + EXT4_S_ERR_START, save, EXT4_S_ERR_LEN); kfree(save); } if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); jbd2_journal_destroy(journal); return err; } EXT4_SB(sb)->s_journal = journal; ext4_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext4_commit_super(sb, 1); } return 0; } static int ext4_commit_super(struct super_block *sb, int sync) { struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; if (!sbh || block_device_ejected(sb)) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext4_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); if (sb->s_bdev->bd_part) es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - EXT4_SB(sb)->s_sectors_written_start) >> 1)); else es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter)) ext4_free_blocks_count_set(es, EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeclusters_counter))); if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter)) es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive( &EXT4_SB(sb)->s_freeinodes_counter)); BUFFER_TRACE(sbh, "marking dirty"); ext4_superblock_csum_set(sb); mark_buffer_dirty(sbh); if (sync) { error = __sync_dirty_buffer(sbh, test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC); if (error) return error; error = buffer_write_io_error(sbh); if (error) { ext4_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { BUG_ON(journal != NULL); return; } jbd2_journal_lock_updates(journal); if (jbd2_journal_flush(journal) < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb->s_flags & MS_RDONLY) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } out: jbd2_journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; BUG_ON(!ext4_has_feature_journal(sb)); journal = EXT4_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext4_error() or ext4_abort() */ j_errno = jbd2_journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext4_decode_error(sb, j_errno, nbuf); ext4_warning(sb, "Filesystem error recorded " "from previous mount: %s", errstr); ext4_warning(sb, "Marking fs in need of filesystem check."); EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; es->s_state |= cpu_to_le16(EXT4_ERROR_FS); ext4_commit_super(sb, 1); jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext4_force_commit(struct super_block *sb) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; return ext4_journal_force_commit(journal); } static int ext4_sync_fs(struct super_block *sb, int wait) { int ret = 0; tid_t target; bool needs_barrier = false; struct ext4_sb_info *sbi = EXT4_SB(sb); trace_ext4_sync_fs(sb, wait); flush_workqueue(sbi->rsv_conversion_wq); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); /* * Data writeback is possible w/o journal transaction, so barrier must * being sent at the end of the function. But we can skip it if * transaction_commit will do it for us. */ if (sbi->s_journal) { target = jbd2_get_latest_transaction(sbi->s_journal); if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) needs_barrier = true; if (jbd2_journal_start_commit(sbi->s_journal, &target)) { if (wait) ret = jbd2_log_wait_commit(sbi->s_journal, target); } } else if (wait && test_opt(sb, BARRIER)) needs_barrier = true; if (needs_barrier) { int err; err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); if (!ret) ret = err; } return ret; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. * * Note that only this function cannot bring a filesystem to be in a clean * state independently. It relies on upper layer to stop all data & metadata * modifications. */ static int ext4_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; if (journal) { /* Now we set up the journal barrier. */ jbd2_journal_lock_updates(journal); /* * Don't clear the needs_recovery flag if we failed to * flush the journal. */ error = jbd2_journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ ext4_clear_feature_journal_needs_recovery(sb); } error = ext4_commit_super(sb, 1); out: if (journal) /* we rely on upper layer to stop further updates */ jbd2_journal_unlock_updates(journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext4_unfreeze(struct super_block *sb) { if (sb->s_flags & MS_RDONLY) return 0; if (EXT4_SB(sb)->s_journal) { /* Reset the needs_recovery flag before the fs is unlocked. */ ext4_set_feature_journal_needs_recovery(sb); } ext4_commit_super(sb, 1); return 0; } /* * Structure to save mount options for ext4_remount's benefit */ struct ext4_mount_options { unsigned long s_mount_opt; unsigned long s_mount_opt2; kuid_t s_resuid; kgid_t s_resgid; unsigned long s_commit_interval; u32 s_min_batch_time, s_max_batch_time; #ifdef CONFIG_QUOTA int s_jquota_fmt; char *s_qf_names[EXT4_MAXQUOTAS]; #endif }; static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned long old_sb_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; int err = 0; #ifdef CONFIG_QUOTA int i, j; #endif char *orig_data = kstrdup(data, GFP_KERNEL); /* Store the original options */ old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_mount_opt2 = sbi->s_mount_opt2; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; old_opts.s_min_batch_time = sbi->s_min_batch_time; old_opts.s_max_batch_time = sbi->s_max_batch_time; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) if (sbi->s_qf_names[i]) { old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], GFP_KERNEL); if (!old_opts.s_qf_names[i]) { for (j = 0; j < i; j++) kfree(old_opts.s_qf_names[j]); kfree(orig_data); return -ENOMEM; } } else old_opts.s_qf_names[i] = NULL; #endif if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; } if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ test_opt(sb, JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "changing journal_checksum " "during remount not supported; ignoring"); sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; } if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); err = -EINVAL; goto restore_opts; } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dioread_nolock"); err = -EINVAL; goto restore_opts; } if (test_opt(sb, DAX)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); err = -EINVAL; goto restore_opts; } } if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { ext4_msg(sb, KERN_WARNING, "warning: refusing change of " "dax flag with busy inodes while remounting"); sbi->s_mount_opt ^= EXT4_MOUNT_DAX; } if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } if (*flags & MS_LAZYTIME) sb->s_flags |= MS_LAZYTIME; if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = sync_filesystem(sb); if (err < 0) goto restore_opts; err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) && (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); if (sbi->s_journal) ext4_mark_recovery_complete(sb, es); } else { /* Make sure we can mount this feature set readwrite */ if (ext4_has_feature_readonly(sb) || !ext4_feature_set_ok(sb, 0)) { err = -EROFS; goto restore_opts; } /* * Make sure the group descriptor checksums * are sane. If they aren't, refuse to remount r/w. */ for (g = 0; g < sbi->s_groups_count; g++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, g, NULL); if (!ext4_group_desc_csum_verify(sb, g, gdp)) { ext4_msg(sb, KERN_ERR, "ext4_remount: Checksum for group %u failed (%u!=%u)", g, le16_to_cpu(ext4_group_desc_csum(sb, g, gdp)), le16_to_cpu(gdp->bg_checksum)); err = -EFSBADCRC; goto restore_opts; } } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount/remount for now. */ if (es->s_last_orphan) { ext4_msg(sb, KERN_WARNING, "Couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount/remount instead"); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ if (sbi->s_journal) ext4_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if (!ext4_setup_super(sb, es, 0)) sb->s_flags &= ~MS_RDONLY; if (ext4_has_feature_mmp(sb)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) { err = -EROFS; goto restore_opts; } enable_quota = 1; } } /* * Reinitialize lazy itable initialization thread based on * current settings */ if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) ext4_unregister_li_request(sb); else { ext4_group_t first_not_zeroed; first_not_zeroed = ext4_has_uninit_itable(sb); ext4_register_li_request(sb, first_not_zeroed); } ext4_setup_system_zone(sb); if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) ext4_commit_super(sb, 1); #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(old_opts.s_qf_names[i]); if (enable_quota) { if (sb_any_quota_suspended(sb)) dquot_resume(sb, -1); else if (ext4_has_feature_quota(sb)) { err = ext4_enable_quotas(sb); if (err) goto restore_opts; } } #endif *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_mount_opt2 = old_opts.s_mount_opt2; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif kfree(orig_data); return err; } static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t overhead = 0, resv_blocks; u64 fsid; s64 bfree; resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); if (!test_opt(sb, MINIX_DF)) overhead = sbi->s_overhead; buf->f_type = EXT4_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead); bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); /* prevent underflow in case that few free space is available */ buf->f_bfree = EXT4_C2B(sbi, max_t(s64, bfree, 0)); buf->f_bavail = buf->f_bfree - (ext4_r_blocks_count(es) + resv_blocks); if (buf->f_bfree < (ext4_r_blocks_count(es) + resv_blocks)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT4_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction * before quota file is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext4_create() quota_sync() * jbd2_journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) jbd2_journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; } static int ext4_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext4_journal_start(inode, EXT4_HT_QUOTA, EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext4_mark_dquot_dirty(struct dquot *dquot) { struct super_block *sb = dquot->dq_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); /* Are we journaling quotas? */ if (ext4_has_feature_quota(sb) || sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext4_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext4_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext4_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext4_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], EXT4_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext4_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT4_SB(sb)->s_qf_names[type]) { /* Quotafile not in fs root? */ if (path->dentry->d_parent != sb->s_root) ext4_msg(sb, KERN_WARNING, "Quota file not on filesystem root. " "Journaled quota will not work"); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (EXT4_SB(sb)->s_journal && ext4_should_journal_data(d_inode(path->dentry))) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags) { int err; struct inode *qf_inode; unsigned long qf_inums[EXT4_MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; BUG_ON(!ext4_has_feature_quota(sb)); if (!qf_inums[type]) return -EPERM; qf_inode = ext4_iget(sb, qf_inums[type]); if (IS_ERR(qf_inode)) { ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]); return PTR_ERR(qf_inode); } /* Don't account quota for quota files to avoid recursion */ qf_inode->i_flags |= S_NOQUOTA; err = dquot_enable(qf_inode, type, format_id, flags); iput(qf_inode); return err; } /* Enable usage tracking for all quota types. */ static int ext4_enable_quotas(struct super_block *sb) { int type, err = 0; unsigned long qf_inums[EXT4_MAXQUOTAS] = { le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) }; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; for (type = 0; type < EXT4_MAXQUOTAS; type++) { if (qf_inums[type]) { err = ext4_quota_enable(sb, type, QFMT_VFS_V1, DQUOT_USAGE_ENABLED); if (err) { ext4_warning(sb, "Failed to enable quota tracking " "(type=%d, err=%d). Please run " "e2fsck to fix.", type, err); return err; } } } return 0; } static int ext4_quota_off(struct super_block *sb, int type) { struct inode *inode = sb_dqopt(sb)->files[type]; handle_t *handle; /* Force all delayed allocation blocks to be allocated. * Caller already holds s_umount sem */ if (test_opt(sb, DELALLOC)) sync_filesystem(sb); if (!inode) goto out; /* Update modification times of quota files when userspace can * start looking at them */ handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1); if (IS_ERR(handle)) goto out; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); out: return dquot_quota_off(sb, type); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext4_bread(NULL, inode, blk, 0); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); int err, offset = off & (sb->s_blocksize - 1); int retries = 0; struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (EXT4_SB(sb)->s_journal && !handle) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because transaction is not started", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } do { bh = ext4_bread(handle, inode, blk, EXT4_GET_BLOCKS_CREATE | EXT4_GET_BLOCKS_METADATA_NOFAIL); } while (IS_ERR(bh) && (PTR_ERR(bh) == -ENOSPC) && ext4_should_retry_alloc(inode->i_sb, &retries)); if (IS_ERR(bh)) return PTR_ERR(bh); if (!bh) goto out; BUFFER_TRACE(bh, "get write access"); err = ext4_journal_get_write_access(handle, bh); if (err) { brelse(bh); return err; } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); err = ext4_handle_dirty_metadata(handle, NULL, bh); brelse(bh); out: if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT4_I(inode)->i_disksize = inode->i_size; ext4_mark_inode_dirty(handle, inode); } return len; } #endif static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super); } #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2) static inline void register_as_ext2(void) { int err = register_filesystem(&ext2_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext2 (%d)\n", err); } static inline void unregister_as_ext2(void) { unregister_filesystem(&ext2_fs_type); } static inline int ext2_feature_set_ok(struct super_block *sb) { if (ext4_has_unknown_ext2_incompat_features(sb)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (ext4_has_unknown_ext2_ro_compat_features(sb)) return 0; return 1; } #else static inline void register_as_ext2(void) { } static inline void unregister_as_ext2(void) { } static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } #endif static inline void register_as_ext3(void) { int err = register_filesystem(&ext3_fs_type); if (err) printk(KERN_WARNING "EXT4-fs: Unable to register as ext3 (%d)\n", err); } static inline void unregister_as_ext3(void) { unregister_filesystem(&ext3_fs_type); } static inline int ext3_feature_set_ok(struct super_block *sb) { if (ext4_has_unknown_ext3_incompat_features(sb)) return 0; if (!ext4_has_feature_journal(sb)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (ext4_has_unknown_ext3_ro_compat_features(sb)) return 0; return 1; } static struct file_system_type ext4_fs_type = { .owner = THIS_MODULE, .name = "ext4", .mount = ext4_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext4"); /* Shared across all ext4 file systems */ wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; static int __init ext4_init_fs(void) { int i, err; ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); ext4_li_info = NULL; mutex_init(&ext4_li_mtx); /* Build-time check for flags consistency */ ext4_check_flag_values(); for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { mutex_init(&ext4__aio_mutex[i]); init_waitqueue_head(&ext4__ioend_wq[i]); } err = ext4_init_es(); if (err) return err; err = ext4_init_pageio(); if (err) goto out5; err = ext4_init_system_zone(); if (err) goto out4; err = ext4_init_sysfs(); if (err) goto out3; err = ext4_init_mballoc(); if (err) goto out2; else ext4_mballoc_ready = 1; err = init_inodecache(); if (err) goto out1; register_as_ext3(); register_as_ext2(); err = register_filesystem(&ext4_fs_type); if (err) goto out; return 0; out: unregister_as_ext2(); unregister_as_ext3(); destroy_inodecache(); out1: ext4_mballoc_ready = 0; ext4_exit_mballoc(); out2: ext4_exit_sysfs(); out3: ext4_exit_system_zone(); out4: ext4_exit_pageio(); out5: ext4_exit_es(); return err; } static void __exit ext4_exit_fs(void) { ext4_exit_crypto(); ext4_destroy_lazyinit_thread(); unregister_as_ext2(); unregister_as_ext3(); unregister_filesystem(&ext4_fs_type); destroy_inodecache(); ext4_exit_mballoc(); ext4_exit_sysfs(); ext4_exit_system_zone(); ext4_exit_pageio(); ext4_exit_es(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Fourth Extended Filesystem"); MODULE_LICENSE("GPL"); module_init(ext4_init_fs) module_exit(ext4_exit_fs)
./CrossVul/dataset_final_sorted/CWE-362/c/bad_1819_4
crossvul-cpp_data_bad_5097_1
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "internal.h" static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) { /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) { struct mm_struct *mm = vma->vm_mm; struct dev_pagemap *pgmap = NULL; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !pte_write(pte)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ pgmap = get_dev_pagemap(pte_pfn(pte), NULL); if (pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) { get_page(page); /* drop the pgmap reference now that we hold the page */ if (pgmap) { put_dev_pagemap(pgmap); pgmap = NULL; } } if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @page_mask: on output, *page_mask is set according to the size of the page * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * Returns the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; *page_mask = 0; page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); pud = pud_offset(pgd, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) return no_page_table(vma, flags); if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) return no_page_table(vma, flags); if (pmd_devmap(*pmd)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(*pmd))) return follow_page_pte(vma, address, pmd, flags); ptl = pmd_lock(mm, pmd); if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { get_page(page); spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); *page_mask = HPAGE_PMD_NR - 1; return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { unsigned int fault_flags = 0; int ret; /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; /* For mm_populate(), just skip the stack guard page. */ if ((*flags & FOLL_POPULATE) && (stack_guard_page_start(vma, address) || stack_guard_page_end(vma, address + PAGE_SIZE))) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) return -EFAULT; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags &= ~FOLL_WRITE; return 0; } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long i = 0; unsigned int page_mask; struct vm_area_struct *vma = NULL; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { int ret; ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) return i ? : ret; page_mask = 0; goto next_page; } if (!vma || check_vma_flags(vma, gup_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags); continue; } } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (unlikely(fatal_signal_pending(current))) return i ? i : -ERESTARTSYS; cond_resched(); page = follow_page_mask(vma, start, foll_flags, &page_mask); if (!page) { int ret; ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EFAULT: case -ENOMEM: case -EHWPOISON: return i ? i : ret; case -EBUSY: return i; case -ENOENT: goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { return i ? i : PTR_ERR(page); } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); return i; } EXPORT_SYMBOL(__get_user_pages); bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; int ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; retry: vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return -ENOMEM; if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) return -EHWPOISON; if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) return -EFAULT; BUG(); } if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } if (tsk) { if (major) tsk->maj_flt++; else tsk->min_flt++; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas, int *locked, bool notify_drop, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; if (write) flags |= FOLL_WRITE; if (force) flags |= FOLL_FORCE; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* VM_FAULT_RETRY didn't trigger */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (notify_drop && lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, write, force, pages, NULL, locked, true, FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_locked); /* * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to * pass additional gup_flags as last parameter (like FOLL_HWPOISON). * * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the * caller if required (just like with __get_user_pages). "FOLL_GET", * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed * according to the parameters "pages", "write", "force" * respectively. */ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, unsigned int gup_flags) { long ret; int locked = 1; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, pages, NULL, &locked, false, gup_flags); if (locked) up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(__get_user_pages_unlocked); /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead, if the two parameters * "tsk" and "mm" are respectively equal to current and current->mm, * or if "force" shall be set to 1 (get_user_pages_fast misses the * "force" parameter). */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { return __get_user_pages_unlocked(current, current->mm, start, nr_pages, write, force, pages, FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to by the caller * @force: whether to force access even when user mapping is currently * protected (but never forces write access to shared mapping). * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If write=0, the page must not be written to. If the page is written to, * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called * after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, pages, vmas, NULL, false, FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task * and mm being operated on are the current task's. We also * obviously don't pass FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(current, current->mm, start, nr_pages, write, force, pages, vmas, NULL, false, FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ /* * Generic RCU Fast GUP * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free * pages containing page tables. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GENERIC_RCU_GUP #ifdef __HAVE_ARCH_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { pte_t *ptep, *ptem; int ret = 0; ptem = ptep = pte_offset_map(&pmd, addr); do { /* * In the line below we are assuming that the pte can be read * atomically. If this is not the case for your architecture, * please wrap this in a helper function! * * for an example see gup_get_pte in arch/x86/mm/gup.c */ pte_t pte = READ_ONCE(*ptep); struct page *head, *page; /* * Similar to the PMD case below, NUMA hinting must take slow * path using the pte_protnone check. */ if (!pte_present(pte) || pte_special(pte) || pte_protnone(pte) || (write && !pte_write(pte))) goto pte_unmap; if (!arch_pte_access_permitted(pte, write)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = compound_head(page); if (!page_cache_get_speculative(head)) goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); goto pte_unmap; } VM_BUG_ON_PAGE(compound_head(page) != head, page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* __HAVE_ARCH_PTE_SPECIAL */ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (write && !pmd_write(orig)) return 0; refs = 0; head = pmd_page(orig); page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { VM_BUG_ON_PAGE(compound_head(page) != head, page); pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (write && !pud_write(orig)) return 0; refs = 0; head = pud_page(orig); page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { VM_BUG_ON_PAGE(compound_head(page) != head, page); pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; struct page *head, *page; if (write && !pgd_write(orig)) return 0; refs = 0; head = pgd_page(orig); page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); do { VM_BUG_ON_PAGE(compound_head(page) != head, page); pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); if (pmd_none(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&pgd, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. It will only return non-negative values. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; unsigned long next, flags; pgd_t *pgdp; int nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, start, len))) return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h * for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ local_irq_save(flags); pgdp = pgd_offset(mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) break; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, &nr)) break; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, &nr)) break; } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) break; } while (pgdp++, addr = next, addr != end); local_irq_restore(flags); return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { int nr, ret; start &= PAGE_MASK; nr = __get_user_pages_fast(start, nr_pages, write, pages); ret = nr; if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */
./CrossVul/dataset_final_sorted/CWE-362/c/bad_5097_1
crossvul-cpp_data_good_4963_0
/* * Timers abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/module.h> #include <linux/string.h> #include <sound/core.h> #include <sound/timer.h> #include <sound/control.h> #include <sound/info.h> #include <sound/minors.h> #include <sound/initval.h> #include <linux/kmod.h> #if IS_ENABLED(CONFIG_SND_HRTIMER) #define DEFAULT_TIMER_LIMIT 4 #elif IS_ENABLED(CONFIG_SND_RTCTIMER) #define DEFAULT_TIMER_LIMIT 2 #else #define DEFAULT_TIMER_LIMIT 1 #endif static int timer_limit = DEFAULT_TIMER_LIMIT; static int timer_tstamp_monotonic = 1; MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA timer interface"); MODULE_LICENSE("GPL"); module_param(timer_limit, int, 0444); MODULE_PARM_DESC(timer_limit, "Maximum global timers in system."); module_param(timer_tstamp_monotonic, int, 0444); MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default)."); MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER); MODULE_ALIAS("devname:snd/timer"); struct snd_timer_user { struct snd_timer_instance *timeri; int tread; /* enhanced read with timestamps and events */ unsigned long ticks; unsigned long overrun; int qhead; int qtail; int qused; int queue_size; struct snd_timer_read *queue; struct snd_timer_tread *tqueue; spinlock_t qlock; unsigned long last_resolution; unsigned int filter; struct timespec tstamp; /* trigger tstamp */ wait_queue_head_t qchange_sleep; struct fasync_struct *fasync; struct mutex ioctl_lock; }; /* list of timers */ static LIST_HEAD(snd_timer_list); /* list of slave instances */ static LIST_HEAD(snd_timer_slave_list); /* lock for slave active lists */ static DEFINE_SPINLOCK(slave_active_lock); static DEFINE_MUTEX(register_mutex); static int snd_timer_free(struct snd_timer *timer); static int snd_timer_dev_free(struct snd_device *device); static int snd_timer_dev_register(struct snd_device *device); static int snd_timer_dev_disconnect(struct snd_device *device); static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left); /* * create a timer instance with the given owner string. * when timer is not NULL, increments the module counter */ static struct snd_timer_instance *snd_timer_instance_new(char *owner, struct snd_timer *timer) { struct snd_timer_instance *timeri; timeri = kzalloc(sizeof(*timeri), GFP_KERNEL); if (timeri == NULL) return NULL; timeri->owner = kstrdup(owner, GFP_KERNEL); if (! timeri->owner) { kfree(timeri); return NULL; } INIT_LIST_HEAD(&timeri->open_list); INIT_LIST_HEAD(&timeri->active_list); INIT_LIST_HEAD(&timeri->ack_list); INIT_LIST_HEAD(&timeri->slave_list_head); INIT_LIST_HEAD(&timeri->slave_active_head); timeri->timer = timer; if (timer && !try_module_get(timer->module)) { kfree(timeri->owner); kfree(timeri); return NULL; } return timeri; } /* * find a timer instance from the given timer id */ static struct snd_timer *snd_timer_find(struct snd_timer_id *tid) { struct snd_timer *timer = NULL; list_for_each_entry(timer, &snd_timer_list, device_list) { if (timer->tmr_class != tid->dev_class) continue; if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD || timer->tmr_class == SNDRV_TIMER_CLASS_PCM) && (timer->card == NULL || timer->card->number != tid->card)) continue; if (timer->tmr_device != tid->device) continue; if (timer->tmr_subdevice != tid->subdevice) continue; return timer; } return NULL; } #ifdef CONFIG_MODULES static void snd_timer_request(struct snd_timer_id *tid) { switch (tid->dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: if (tid->device < timer_limit) request_module("snd-timer-%i", tid->device); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (tid->card < snd_ecards_limit) request_module("snd-card-%i", tid->card); break; default: break; } } #endif /* * look for a master instance matching with the slave id of the given slave. * when found, relink the open_link of the slave. * * call this with register_mutex down. */ static void snd_timer_check_slave(struct snd_timer_instance *slave) { struct snd_timer *timer; struct snd_timer_instance *master; /* FIXME: it's really dumb to look up all entries.. */ list_for_each_entry(timer, &snd_timer_list, device_list) { list_for_each_entry(master, &timer->open_list_head, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; spin_unlock_irq(&slave_active_lock); return; } } } } /* * look for slave instances matching with the slave id of the given master. * when found, relink the open_link of slaves. * * call this with register_mutex down. */ static void snd_timer_check_master(struct snd_timer_instance *master) { struct snd_timer_instance *slave, *tmp; /* check all pending slaves */ list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) { if (slave->slave_class == master->slave_class && slave->slave_id == master->slave_id) { list_move_tail(&slave->open_list, &master->slave_list_head); spin_lock_irq(&slave_active_lock); slave->master = master; slave->timer = master->timer; if (slave->flags & SNDRV_TIMER_IFLG_RUNNING) list_add_tail(&slave->active_list, &master->slave_active_head); spin_unlock_irq(&slave_active_lock); } } } /* * open a timer instance * when opening a master, the slave id must be here given. */ int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id) { struct snd_timer *timer; struct snd_timer_instance *timeri = NULL; if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) { /* open a slave instance */ if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE || tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) { pr_debug("ALSA: timer: invalid slave class %i\n", tid->dev_sclass); return -EINVAL; } mutex_lock(&register_mutex); timeri = snd_timer_instance_new(owner, NULL); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = tid->device; timeri->flags |= SNDRV_TIMER_IFLG_SLAVE; list_add_tail(&timeri->open_list, &snd_timer_slave_list); snd_timer_check_slave(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } /* open a master instance */ mutex_lock(&register_mutex); timer = snd_timer_find(tid); #ifdef CONFIG_MODULES if (!timer) { mutex_unlock(&register_mutex); snd_timer_request(tid); mutex_lock(&register_mutex); timer = snd_timer_find(tid); } #endif if (!timer) { mutex_unlock(&register_mutex); return -ENODEV; } if (!list_empty(&timer->open_list_head)) { timeri = list_entry(timer->open_list_head.next, struct snd_timer_instance, open_list); if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { mutex_unlock(&register_mutex); return -EBUSY; } } timeri = snd_timer_instance_new(owner, timer); if (!timeri) { mutex_unlock(&register_mutex); return -ENOMEM; } timeri->slave_class = tid->dev_sclass; timeri->slave_id = slave_id; if (list_empty(&timer->open_list_head) && timer->hw.open) timer->hw.open(timer); list_add_tail(&timeri->open_list, &timer->open_list_head); snd_timer_check_master(timeri); mutex_unlock(&register_mutex); *ti = timeri; return 0; } static int _snd_timer_stop(struct snd_timer_instance *timeri, int keep_flag, int event); /* * close a timer instance */ int snd_timer_close(struct snd_timer_instance *timeri) { struct snd_timer *timer = NULL; struct snd_timer_instance *slave, *tmp; if (snd_BUG_ON(!timeri)) return -ENXIO; /* force to stop the timer */ snd_timer_stop(timeri); if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { /* wait, until the active callback is finished */ spin_lock_irq(&slave_active_lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&slave_active_lock); udelay(10); spin_lock_irq(&slave_active_lock); } spin_unlock_irq(&slave_active_lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); mutex_unlock(&register_mutex); } else { timer = timeri->timer; if (snd_BUG_ON(!timer)) goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { spin_unlock_irq(&timer->lock); udelay(10); spin_lock_irq(&timer->lock); } spin_unlock_irq(&timer->lock); mutex_lock(&register_mutex); list_del(&timeri->open_list); if (timer && list_empty(&timer->open_list_head) && timer->hw.close) timer->hw.close(timer); /* remove slave links */ list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head, open_list) { spin_lock_irq(&slave_active_lock); _snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION); list_move_tail(&slave->open_list, &snd_timer_slave_list); slave->master = NULL; slave->timer = NULL; spin_unlock_irq(&slave_active_lock); } mutex_unlock(&register_mutex); } out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); kfree(timeri); if (timer) module_put(timer->module); return 0; } unsigned long snd_timer_resolution(struct snd_timer_instance *timeri) { struct snd_timer * timer; if (timeri == NULL) return 0; if ((timer = timeri->timer) != NULL) { if (timer->hw.c_resolution) return timer->hw.c_resolution(timer); return timer->hw.resolution; } return 0; } static void snd_timer_notify1(struct snd_timer_instance *ti, int event) { struct snd_timer *timer; unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ts; struct timespec tstamp; if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START || event > SNDRV_TIMER_EVENT_PAUSE)) return; if (event == SNDRV_TIMER_EVENT_START || event == SNDRV_TIMER_EVENT_CONTINUE) resolution = snd_timer_resolution(ti); if (ti->ccallback) ti->ccallback(ti, event, &tstamp, resolution); if (ti->flags & SNDRV_TIMER_IFLG_SLAVE) return; timer = ti->timer; if (timer == NULL) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; spin_lock_irqsave(&timer->lock, flags); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ti, event + 100, &tstamp, resolution); spin_unlock_irqrestore(&timer->lock, flags); } static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri, unsigned long sticks) { list_move_tail(&timeri->active_list, &timer->active_list_head); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) goto __start_now; timer->flags |= SNDRV_TIMER_FLG_RESCHED; timeri->flags |= SNDRV_TIMER_IFLG_START; return 1; /* delayed start */ } else { timer->sticks = sticks; timer->hw.start(timer); __start_now: timer->running++; timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; return 0; } } static int snd_timer_start_slave(struct snd_timer_instance *timeri) { unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master) list_add_tail(&timeri->active_list, &timeri->master->slave_active_head); spin_unlock_irqrestore(&slave_active_lock, flags); return 1; /* delayed start */ } /* * start the timer instance */ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL || ticks < 1) return -EINVAL; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { result = snd_timer_start_slave(timeri); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } timer = timeri->timer; if (timer == NULL) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->ticks = timeri->cticks = ticks; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, ticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } static int _snd_timer_stop(struct snd_timer_instance * timeri, int keep_flag, int event) { struct snd_timer *timer; unsigned long flags; if (snd_BUG_ON(!timeri)) return -ENXIO; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { if (!keep_flag) { spin_lock_irqsave(&slave_active_lock, flags); timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; spin_unlock_irqrestore(&slave_active_lock, flags); } goto __end; } timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) && !(--timer->running)) { timer->hw.stop(timer); if (timer->flags & SNDRV_TIMER_FLG_RESCHED) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; snd_timer_reschedule(timer, 0); if (timer->flags & SNDRV_TIMER_FLG_CHANGE) { timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } } if (!keep_flag) timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); spin_unlock_irqrestore(&timer->lock, flags); __end: if (event != SNDRV_TIMER_EVENT_RESOLUTION) snd_timer_notify1(timeri, event); return 0; } /* * stop the timer instance. * * do not call this from the timer callback! */ int snd_timer_stop(struct snd_timer_instance *timeri) { struct snd_timer *timer; unsigned long flags; int err; err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP); if (err < 0) return err; timer = timeri->timer; if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->cticks = timeri->ticks; timeri->pticks = 0; spin_unlock_irqrestore(&timer->lock, flags); return 0; } /* * start again.. the tick is kept. */ int snd_timer_continue(struct snd_timer_instance *timeri) { struct snd_timer *timer; int result = -EINVAL; unsigned long flags; if (timeri == NULL) return result; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) return snd_timer_start_slave(timeri); timer = timeri->timer; if (! timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); if (!timeri->cticks) timeri->cticks = 1; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, timer->sticks); spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); return result; } /* * pause.. remember the ticks left */ int snd_timer_pause(struct snd_timer_instance * timeri) { return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE); } /* * reschedule the timer * * start pending instances and check the scheduling ticks. * when the scheduling ticks is changed set CHANGE flag to reprogram the timer. */ static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti; unsigned long ticks = ~0UL; list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->flags & SNDRV_TIMER_IFLG_START) { ti->flags &= ~SNDRV_TIMER_IFLG_START; ti->flags |= SNDRV_TIMER_IFLG_RUNNING; timer->running++; } if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) { if (ticks > ti->cticks) ticks = ti->cticks; } } if (ticks == ~0UL) { timer->flags &= ~SNDRV_TIMER_FLG_RESCHED; return; } if (ticks > timer->hw.ticks) ticks = timer->hw.ticks; if (ticks_left != ticks) timer->flags |= SNDRV_TIMER_FLG_CHANGE; timer->sticks = ticks; } /* * timer tasklet * */ static void snd_timer_tasklet(unsigned long arg) { struct snd_timer *timer = (struct snd_timer *) arg; struct snd_timer_instance *ti; struct list_head *p; unsigned long resolution, ticks; unsigned long flags; spin_lock_irqsave(&timer->lock, flags); /* now process all callbacks */ while (!list_empty(&timer->sack_list_head)) { p = timer->sack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; resolution = ti->resolution; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } spin_unlock_irqrestore(&timer->lock, flags); } /* * timer interrupt * * ticks_left is usually equal to timer->sticks. * */ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) { struct snd_timer_instance *ti, *ts, *tmp; unsigned long resolution, ticks; struct list_head *p, *ack_list_head; unsigned long flags; int use_tasklet = 0; if (timer == NULL) return; spin_lock_irqsave(&timer->lock, flags); /* remember the current resolution */ if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; /* loop for all active instances * Here we cannot use list_for_each_entry because the active_list of a * processed instance is relinked to done_list_head before the callback * is called. */ list_for_each_entry_safe(ti, tmp, &timer->active_list_head, active_list) { if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING)) continue; ti->pticks += ticks_left; ti->resolution = resolution; if (ti->cticks < ticks_left) ti->cticks = 0; else ti->cticks -= ticks_left; if (ti->cticks) /* not expired */ continue; if (ti->flags & SNDRV_TIMER_IFLG_AUTO) { ti->cticks = ti->ticks; } else { ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; if (--timer->running) list_del_init(&ti->active_list); } if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) || (ti->flags & SNDRV_TIMER_IFLG_FAST)) ack_list_head = &timer->ack_list_head; else ack_list_head = &timer->sack_list_head; if (list_empty(&ti->ack_list)) list_add_tail(&ti->ack_list, ack_list_head); list_for_each_entry(ts, &ti->slave_active_head, active_list) { ts->pticks = ti->pticks; ts->resolution = resolution; if (list_empty(&ts->ack_list)) list_add_tail(&ts->ack_list, ack_list_head); } } if (timer->flags & SNDRV_TIMER_FLG_RESCHED) snd_timer_reschedule(timer, timer->sticks); if (timer->running) { if (timer->hw.flags & SNDRV_TIMER_HW_STOP) { timer->hw.stop(timer); timer->flags |= SNDRV_TIMER_FLG_CHANGE; } if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) || (timer->flags & SNDRV_TIMER_FLG_CHANGE)) { /* restart timer */ timer->flags &= ~SNDRV_TIMER_FLG_CHANGE; timer->hw.start(timer); } } else { timer->hw.stop(timer); } /* now process all fast callbacks */ while (!list_empty(&timer->ack_list_head)) { p = timer->ack_list_head.next; /* get first item */ ti = list_entry(p, struct snd_timer_instance, ack_list); /* remove from ack_list and make empty */ list_del_init(p); ticks = ti->pticks; ti->pticks = 0; ti->flags |= SNDRV_TIMER_IFLG_CALLBACK; spin_unlock(&timer->lock); if (ti->callback) ti->callback(ti, resolution, ticks); spin_lock(&timer->lock); ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK; } /* do we have any slow callbacks? */ use_tasklet = !list_empty(&timer->sack_list_head); spin_unlock_irqrestore(&timer->lock, flags); if (use_tasklet) tasklet_schedule(&timer->task_queue); } /* */ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, struct snd_timer **rtimer) { struct snd_timer *timer; int err; static struct snd_device_ops ops = { .dev_free = snd_timer_dev_free, .dev_register = snd_timer_dev_register, .dev_disconnect = snd_timer_dev_disconnect, }; if (snd_BUG_ON(!tid)) return -EINVAL; if (rtimer) *rtimer = NULL; timer = kzalloc(sizeof(*timer), GFP_KERNEL); if (!timer) return -ENOMEM; timer->tmr_class = tid->dev_class; timer->card = card; timer->tmr_device = tid->device; timer->tmr_subdevice = tid->subdevice; if (id) strlcpy(timer->id, id, sizeof(timer->id)); INIT_LIST_HEAD(&timer->device_list); INIT_LIST_HEAD(&timer->open_list_head); INIT_LIST_HEAD(&timer->active_list_head); INIT_LIST_HEAD(&timer->ack_list_head); INIT_LIST_HEAD(&timer->sack_list_head); spin_lock_init(&timer->lock); tasklet_init(&timer->task_queue, snd_timer_tasklet, (unsigned long)timer); if (card != NULL) { timer->module = card->module; err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops); if (err < 0) { snd_timer_free(timer); return err; } } if (rtimer) *rtimer = timer; return 0; } static int snd_timer_free(struct snd_timer *timer) { if (!timer) return 0; mutex_lock(&register_mutex); if (! list_empty(&timer->open_list_head)) { struct list_head *p, *n; struct snd_timer_instance *ti; pr_warn("ALSA: timer %p is busy?\n", timer); list_for_each_safe(p, n, &timer->open_list_head) { list_del_init(p); ti = list_entry(p, struct snd_timer_instance, open_list); ti->timer = NULL; } } list_del(&timer->device_list); mutex_unlock(&register_mutex); if (timer->private_free) timer->private_free(timer); kfree(timer); return 0; } static int snd_timer_dev_free(struct snd_device *device) { struct snd_timer *timer = device->device_data; return snd_timer_free(timer); } static int snd_timer_dev_register(struct snd_device *dev) { struct snd_timer *timer = dev->device_data; struct snd_timer *timer1; if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop)) return -ENXIO; if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) && !timer->hw.resolution && timer->hw.c_resolution == NULL) return -EINVAL; mutex_lock(&register_mutex); list_for_each_entry(timer1, &snd_timer_list, device_list) { if (timer1->tmr_class > timer->tmr_class) break; if (timer1->tmr_class < timer->tmr_class) continue; if (timer1->card && timer->card) { if (timer1->card->number > timer->card->number) break; if (timer1->card->number < timer->card->number) continue; } if (timer1->tmr_device > timer->tmr_device) break; if (timer1->tmr_device < timer->tmr_device) continue; if (timer1->tmr_subdevice > timer->tmr_subdevice) break; if (timer1->tmr_subdevice < timer->tmr_subdevice) continue; /* conflicts.. */ mutex_unlock(&register_mutex); return -EBUSY; } list_add_tail(&timer->device_list, &timer1->device_list); mutex_unlock(&register_mutex); return 0; } static int snd_timer_dev_disconnect(struct snd_device *device) { struct snd_timer *timer = device->device_data; mutex_lock(&register_mutex); list_del_init(&timer->device_list); mutex_unlock(&register_mutex); return 0; } void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp) { unsigned long flags; unsigned long resolution = 0; struct snd_timer_instance *ti, *ts; if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) return; if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART || event > SNDRV_TIMER_EVENT_MRESUME)) return; spin_lock_irqsave(&timer->lock, flags); if (event == SNDRV_TIMER_EVENT_MSTART || event == SNDRV_TIMER_EVENT_MCONTINUE || event == SNDRV_TIMER_EVENT_MRESUME) { if (timer->hw.c_resolution) resolution = timer->hw.c_resolution(timer); else resolution = timer->hw.resolution; } list_for_each_entry(ti, &timer->active_list_head, active_list) { if (ti->ccallback) ti->ccallback(ti, event, tstamp, resolution); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) ts->ccallback(ts, event, tstamp, resolution); } spin_unlock_irqrestore(&timer->lock, flags); } /* * exported functions for global timers */ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer) { struct snd_timer_id tid; tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = -1; tid.device = device; tid.subdevice = 0; return snd_timer_new(NULL, id, &tid, rtimer); } int snd_timer_global_free(struct snd_timer *timer) { return snd_timer_free(timer); } int snd_timer_global_register(struct snd_timer *timer) { struct snd_device dev; memset(&dev, 0, sizeof(dev)); dev.device_data = timer; return snd_timer_dev_register(&dev); } /* * System timer */ struct snd_timer_system_private { struct timer_list tlist; unsigned long last_expires; unsigned long last_jiffies; unsigned long correction; }; static void snd_timer_s_function(unsigned long data) { struct snd_timer *timer = (struct snd_timer *)data; struct snd_timer_system_private *priv = timer->private_data; unsigned long jiff = jiffies; if (time_after(jiff, priv->last_expires)) priv->correction += (long)jiff - (long)priv->last_expires; snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies); } static int snd_timer_s_start(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long njiff; priv = (struct snd_timer_system_private *) timer->private_data; njiff = (priv->last_jiffies = jiffies); if (priv->correction > timer->sticks - 1) { priv->correction -= timer->sticks - 1; njiff++; } else { njiff += timer->sticks - priv->correction; priv->correction = 0; } priv->last_expires = priv->tlist.expires = njiff; add_timer(&priv->tlist); return 0; } static int snd_timer_s_stop(struct snd_timer * timer) { struct snd_timer_system_private *priv; unsigned long jiff; priv = (struct snd_timer_system_private *) timer->private_data; del_timer(&priv->tlist); jiff = jiffies; if (time_before(jiff, priv->last_expires)) timer->sticks = priv->last_expires - jiff; else timer->sticks = 1; priv->correction = 0; return 0; } static struct snd_timer_hardware snd_timer_system = { .flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .resolution = 1000000000L / HZ, .ticks = 10000000L, .start = snd_timer_s_start, .stop = snd_timer_s_stop }; static void snd_timer_free_system(struct snd_timer *timer) { kfree(timer->private_data); } static int snd_timer_register_system(void) { struct snd_timer *timer; struct snd_timer_system_private *priv; int err; err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer); if (err < 0) return err; strcpy(timer->name, "system timer"); timer->hw = snd_timer_system; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) { snd_timer_free(timer); return -ENOMEM; } setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer); timer->private_data = priv; timer->private_free = snd_timer_free_system; return snd_timer_global_register(timer); } #ifdef CONFIG_SND_PROC_FS /* * Info interface */ static void snd_timer_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_timer *timer; struct snd_timer_instance *ti; mutex_lock(&register_mutex); list_for_each_entry(timer, &snd_timer_list, device_list) { switch (timer->tmr_class) { case SNDRV_TIMER_CLASS_GLOBAL: snd_iprintf(buffer, "G%i: ", timer->tmr_device); break; case SNDRV_TIMER_CLASS_CARD: snd_iprintf(buffer, "C%i-%i: ", timer->card->number, timer->tmr_device); break; case SNDRV_TIMER_CLASS_PCM: snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number, timer->tmr_device, timer->tmr_subdevice); break; default: snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class, timer->card ? timer->card->number : -1, timer->tmr_device, timer->tmr_subdevice); } snd_iprintf(buffer, "%s :", timer->name); if (timer->hw.resolution) snd_iprintf(buffer, " %lu.%03luus (%lu ticks)", timer->hw.resolution / 1000, timer->hw.resolution % 1000, timer->hw.ticks); if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) snd_iprintf(buffer, " SLAVE"); snd_iprintf(buffer, "\n"); list_for_each_entry(ti, &timer->open_list_head, open_list) snd_iprintf(buffer, " Client %s : %s\n", ti->owner ? ti->owner : "unknown", ti->flags & (SNDRV_TIMER_IFLG_START | SNDRV_TIMER_IFLG_RUNNING) ? "running" : "stopped"); } mutex_unlock(&register_mutex); } static struct snd_info_entry *snd_timer_proc_entry; static void __init snd_timer_proc_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL); if (entry != NULL) { entry->c.text.read = snd_timer_proc_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); entry = NULL; } } snd_timer_proc_entry = entry; } static void __exit snd_timer_proc_done(void) { snd_info_free_entry(snd_timer_proc_entry); } #else /* !CONFIG_SND_PROC_FS */ #define snd_timer_proc_init() #define snd_timer_proc_done() #endif /* * USER SPACE interface */ static void snd_timer_user_interrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_read *r; int prev; spin_lock(&tu->qlock); if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->queue[prev]; if (r->resolution == resolution) { r->ticks += ticks; goto __wake; } } if (tu->qused >= tu->queue_size) { tu->overrun++; } else { r = &tu->queue[tu->qtail++]; tu->qtail %= tu->queue_size; r->resolution = resolution; r->ticks = ticks; tu->qused++; } __wake: spin_unlock(&tu->qlock); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu, struct snd_timer_tread *tread) { if (tu->qused >= tu->queue_size) { tu->overrun++; } else { memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread)); tu->qtail %= tu->queue_size; tu->qused++; } } static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, int event, struct timespec *tstamp, unsigned long resolution) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread r1; unsigned long flags; if (event >= SNDRV_TIMER_EVENT_START && event <= SNDRV_TIMER_EVENT_PAUSE) tu->tstamp = *tstamp; if ((tu->filter & (1 << event)) == 0 || !tu->tread) return; r1.event = event; r1.tstamp = *tstamp; r1.val = resolution; spin_lock_irqsave(&tu->qlock, flags); snd_timer_user_append_to_tqueue(tu, &r1); spin_unlock_irqrestore(&tu->qlock, flags); kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, unsigned long resolution, unsigned long ticks) { struct snd_timer_user *tu = timeri->callback_data; struct snd_timer_tread *r, r1; struct timespec tstamp; int prev, append = 0; memset(&tstamp, 0, sizeof(tstamp)); spin_lock(&tu->qlock); if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) | (1 << SNDRV_TIMER_EVENT_TICK))) == 0) { spin_unlock(&tu->qlock); return; } if (tu->last_resolution != resolution || ticks > 0) { if (timer_tstamp_monotonic) ktime_get_ts(&tstamp); else getnstimeofday(&tstamp); } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && tu->last_resolution != resolution) { r1.event = SNDRV_TIMER_EVENT_RESOLUTION; r1.tstamp = tstamp; r1.val = resolution; snd_timer_user_append_to_tqueue(tu, &r1); tu->last_resolution = resolution; append++; } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0) goto __wake; if (ticks == 0) goto __wake; if (tu->qused > 0) { prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1; r = &tu->tqueue[prev]; if (r->event == SNDRV_TIMER_EVENT_TICK) { r->tstamp = tstamp; r->val += ticks; append++; goto __wake; } } r1.event = SNDRV_TIMER_EVENT_TICK; r1.tstamp = tstamp; r1.val = ticks; snd_timer_user_append_to_tqueue(tu, &r1); append++; __wake: spin_unlock(&tu->qlock); if (append == 0) return; kill_fasync(&tu->fasync, SIGIO, POLL_IN); wake_up(&tu->qchange_sleep); } static int snd_timer_user_open(struct inode *inode, struct file *file) { struct snd_timer_user *tu; int err; err = nonseekable_open(inode, file); if (err < 0) return err; tu = kzalloc(sizeof(*tu), GFP_KERNEL); if (tu == NULL) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); mutex_init(&tu->ioctl_lock); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) { kfree(tu); return -ENOMEM; } file->private_data = tu; return 0; } static int snd_timer_user_release(struct inode *inode, struct file *file) { struct snd_timer_user *tu; if (file->private_data) { tu = file->private_data; file->private_data = NULL; mutex_lock(&tu->ioctl_lock); if (tu->timeri) snd_timer_close(tu->timeri); mutex_unlock(&tu->ioctl_lock); kfree(tu->queue); kfree(tu->tqueue); kfree(tu); } return 0; } static void snd_timer_user_zero_id(struct snd_timer_id *id) { id->dev_class = SNDRV_TIMER_CLASS_NONE; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = -1; id->device = -1; id->subdevice = -1; } static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer) { id->dev_class = timer->tmr_class; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = timer->card ? timer->card->number : -1; id->device = timer->tmr_device; id->subdevice = timer->tmr_subdevice; } static int snd_timer_user_next_device(struct snd_timer_id __user *_tid) { struct snd_timer_id id; struct snd_timer *timer; struct list_head *p; if (copy_from_user(&id, _tid, sizeof(id))) return -EFAULT; mutex_lock(&register_mutex); if (id.dev_class < 0) { /* first item */ if (list_empty(&snd_timer_list)) snd_timer_user_zero_id(&id); else { timer = list_entry(snd_timer_list.next, struct snd_timer, device_list); snd_timer_user_copy_id(&id, timer); } } else { switch (id.dev_class) { case SNDRV_TIMER_CLASS_GLOBAL: id.device = id.device < 0 ? 0 : id.device + 1; list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device >= id.device) { snd_timer_user_copy_id(&id, timer); break; } } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; case SNDRV_TIMER_CLASS_CARD: case SNDRV_TIMER_CLASS_PCM: if (id.card < 0) { id.card = 0; } else { if (id.card < 0) { id.card = 0; } else { if (id.device < 0) { id.device = 0; } else { if (id.subdevice < 0) { id.subdevice = 0; } else { id.subdevice++; } } } } list_for_each(p, &snd_timer_list) { timer = list_entry(p, struct snd_timer, device_list); if (timer->tmr_class > id.dev_class) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_class < id.dev_class) continue; if (timer->card->number > id.card) { snd_timer_user_copy_id(&id, timer); break; } if (timer->card->number < id.card) continue; if (timer->tmr_device > id.device) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_device < id.device) continue; if (timer->tmr_subdevice > id.subdevice) { snd_timer_user_copy_id(&id, timer); break; } if (timer->tmr_subdevice < id.subdevice) continue; snd_timer_user_copy_id(&id, timer); break; } if (p == &snd_timer_list) snd_timer_user_zero_id(&id); break; default: snd_timer_user_zero_id(&id); } } mutex_unlock(&register_mutex); if (copy_to_user(_tid, &id, sizeof(*_tid))) return -EFAULT; return 0; } static int snd_timer_user_ginfo(struct file *file, struct snd_timer_ginfo __user *_ginfo) { struct snd_timer_ginfo *ginfo; struct snd_timer_id tid; struct snd_timer *t; struct list_head *p; int err = 0; ginfo = memdup_user(_ginfo, sizeof(*ginfo)); if (IS_ERR(ginfo)) return PTR_ERR(ginfo); tid = ginfo->tid; memset(ginfo, 0, sizeof(*ginfo)); ginfo->tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { ginfo->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) ginfo->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(ginfo->id, t->id, sizeof(ginfo->id)); strlcpy(ginfo->name, t->name, sizeof(ginfo->name)); ginfo->resolution = t->hw.resolution; if (t->hw.resolution_min > 0) { ginfo->resolution_min = t->hw.resolution_min; ginfo->resolution_max = t->hw.resolution_max; } list_for_each(p, &t->open_list_head) { ginfo->clients++; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo))) err = -EFAULT; kfree(ginfo); return err; } static int snd_timer_user_gparams(struct file *file, struct snd_timer_gparams __user *_gparams) { struct snd_timer_gparams gparams; struct snd_timer *t; int err; if (copy_from_user(&gparams, _gparams, sizeof(gparams))) return -EFAULT; mutex_lock(&register_mutex); t = snd_timer_find(&gparams.tid); if (!t) { err = -ENODEV; goto _error; } if (!list_empty(&t->open_list_head)) { err = -EBUSY; goto _error; } if (!t->hw.set_period) { err = -ENOSYS; goto _error; } err = t->hw.set_period(t, gparams.period_num, gparams.period_den); _error: mutex_unlock(&register_mutex); return err; } static int snd_timer_user_gstatus(struct file *file, struct snd_timer_gstatus __user *_gstatus) { struct snd_timer_gstatus gstatus; struct snd_timer_id tid; struct snd_timer *t; int err = 0; if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus))) return -EFAULT; tid = gstatus.tid; memset(&gstatus, 0, sizeof(gstatus)); gstatus.tid = tid; mutex_lock(&register_mutex); t = snd_timer_find(&tid); if (t != NULL) { if (t->hw.c_resolution) gstatus.resolution = t->hw.c_resolution(t); else gstatus.resolution = t->hw.resolution; if (t->hw.precise_resolution) { t->hw.precise_resolution(t, &gstatus.resolution_num, &gstatus.resolution_den); } else { gstatus.resolution_num = gstatus.resolution; gstatus.resolution_den = 1000000000uL; } } else { err = -ENODEV; } mutex_unlock(&register_mutex); if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus))) err = -EFAULT; return err; } static int snd_timer_user_tselect(struct file *file, struct snd_timer_select __user *_tselect) { struct snd_timer_user *tu; struct snd_timer_select tselect; char str[32]; int err = 0; tu = file->private_data; if (tu->timeri) { snd_timer_close(tu->timeri); tu->timeri = NULL; } if (copy_from_user(&tselect, _tselect, sizeof(tselect))) { err = -EFAULT; goto __err; } sprintf(str, "application %i", current->pid); if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE) tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION; err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid); if (err < 0) goto __err; kfree(tu->queue); tu->queue = NULL; kfree(tu->tqueue); tu->tqueue = NULL; if (tu->tread) { tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread), GFP_KERNEL); if (tu->tqueue == NULL) err = -ENOMEM; } else { tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) err = -ENOMEM; } if (err < 0) { snd_timer_close(tu->timeri); tu->timeri = NULL; } else { tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST; tu->timeri->callback = tu->tread ? snd_timer_user_tinterrupt : snd_timer_user_interrupt; tu->timeri->ccallback = snd_timer_user_ccallback; tu->timeri->callback_data = (void *)tu; } __err: return err; } static int snd_timer_user_info(struct file *file, struct snd_timer_info __user *_info) { struct snd_timer_user *tu; struct snd_timer_info *info; struct snd_timer *t; int err = 0; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; info->card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) info->flags |= SNDRV_TIMER_FLG_SLAVE; strlcpy(info->id, t->id, sizeof(info->id)); strlcpy(info->name, t->name, sizeof(info->name)); info->resolution = t->hw.resolution; if (copy_to_user(_info, info, sizeof(*_info))) err = -EFAULT; kfree(info); return err; } static int snd_timer_user_params(struct file *file, struct snd_timer_params __user *_params) { struct snd_timer_user *tu; struct snd_timer_params params; struct snd_timer *t; struct snd_timer_read *tr; struct snd_timer_tread *ttr; int err; tu = file->private_data; if (!tu->timeri) return -EBADFD; t = tu->timeri->timer; if (!t) return -EBADFD; if (copy_from_user(&params, _params, sizeof(params))) return -EFAULT; if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) { err = -EINVAL; goto _end; } if (params.queue_size > 0 && (params.queue_size < 32 || params.queue_size > 1024)) { err = -EINVAL; goto _end; } if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)| (1<<SNDRV_TIMER_EVENT_TICK)| (1<<SNDRV_TIMER_EVENT_START)| (1<<SNDRV_TIMER_EVENT_STOP)| (1<<SNDRV_TIMER_EVENT_CONTINUE)| (1<<SNDRV_TIMER_EVENT_PAUSE)| (1<<SNDRV_TIMER_EVENT_SUSPEND)| (1<<SNDRV_TIMER_EVENT_RESUME)| (1<<SNDRV_TIMER_EVENT_MSTART)| (1<<SNDRV_TIMER_EVENT_MSTOP)| (1<<SNDRV_TIMER_EVENT_MCONTINUE)| (1<<SNDRV_TIMER_EVENT_MPAUSE)| (1<<SNDRV_TIMER_EVENT_MSUSPEND)| (1<<SNDRV_TIMER_EVENT_MRESUME))) { err = -EINVAL; goto _end; } snd_timer_stop(tu->timeri); spin_lock_irq(&t->lock); tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO| SNDRV_TIMER_IFLG_EXCLUSIVE| SNDRV_TIMER_IFLG_EARLY_EVENT); if (params.flags & SNDRV_TIMER_PSFLG_AUTO) tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO; if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE) tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE; if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT) tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT; spin_unlock_irq(&t->lock); if (params.queue_size > 0 && (unsigned int)tu->queue_size != params.queue_size) { if (tu->tread) { ttr = kmalloc(params.queue_size * sizeof(*ttr), GFP_KERNEL); if (ttr) { kfree(tu->tqueue); tu->queue_size = params.queue_size; tu->tqueue = ttr; } } else { tr = kmalloc(params.queue_size * sizeof(*tr), GFP_KERNEL); if (tr) { kfree(tu->queue); tu->queue_size = params.queue_size; tu->queue = tr; } } } tu->qhead = tu->qtail = tu->qused = 0; if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { if (tu->tread) { struct snd_timer_tread tread; tread.event = SNDRV_TIMER_EVENT_EARLY; tread.tstamp.tv_sec = 0; tread.tstamp.tv_nsec = 0; tread.val = 0; snd_timer_user_append_to_tqueue(tu, &tread); } else { struct snd_timer_read *r = &tu->queue[0]; r->resolution = 0; r->ticks = 0; tu->qused++; tu->qtail++; } } tu->filter = params.filter; tu->ticks = params.ticks; err = 0; _end: if (copy_to_user(_params, &params, sizeof(params))) return -EFAULT; return err; } static int snd_timer_user_status(struct file *file, struct snd_timer_status __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (!tu->timeri) return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; } static int snd_timer_user_start(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; snd_timer_stop(tu->timeri); tu->timeri->lost = 0; tu->last_resolution = 0; return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0; } static int snd_timer_user_stop(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_continue(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; tu->timeri->lost = 0; return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0; } static int snd_timer_user_pause(struct file *file) { int err; struct snd_timer_user *tu; tu = file->private_data; if (!tu->timeri) return -EBADFD; return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0; } enum { SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20), SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21), SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22), SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23), }; static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu; void __user *argp = (void __user *)arg; int __user *p = argp; tu = file->private_data; switch (cmd) { case SNDRV_TIMER_IOCTL_PVERSION: return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0; case SNDRV_TIMER_IOCTL_NEXT_DEVICE: return snd_timer_user_next_device(argp); case SNDRV_TIMER_IOCTL_TREAD: { int xarg; if (tu->timeri) /* too late */ return -EBUSY; if (get_user(xarg, p)) return -EFAULT; tu->tread = xarg ? 1 : 0; return 0; } case SNDRV_TIMER_IOCTL_GINFO: return snd_timer_user_ginfo(file, argp); case SNDRV_TIMER_IOCTL_GPARAMS: return snd_timer_user_gparams(file, argp); case SNDRV_TIMER_IOCTL_GSTATUS: return snd_timer_user_gstatus(file, argp); case SNDRV_TIMER_IOCTL_SELECT: return snd_timer_user_tselect(file, argp); case SNDRV_TIMER_IOCTL_INFO: return snd_timer_user_info(file, argp); case SNDRV_TIMER_IOCTL_PARAMS: return snd_timer_user_params(file, argp); case SNDRV_TIMER_IOCTL_STATUS: return snd_timer_user_status(file, argp); case SNDRV_TIMER_IOCTL_START: case SNDRV_TIMER_IOCTL_START_OLD: return snd_timer_user_start(file); case SNDRV_TIMER_IOCTL_STOP: case SNDRV_TIMER_IOCTL_STOP_OLD: return snd_timer_user_stop(file); case SNDRV_TIMER_IOCTL_CONTINUE: case SNDRV_TIMER_IOCTL_CONTINUE_OLD: return snd_timer_user_continue(file); case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: return snd_timer_user_pause(file); } return -ENOTTY; } static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu = file->private_data; long ret; mutex_lock(&tu->ioctl_lock); ret = __snd_timer_user_ioctl(file, cmd, arg); mutex_unlock(&tu->ioctl_lock); return ret; } static int snd_timer_user_fasync(int fd, struct file * file, int on) { struct snd_timer_user *tu; tu = file->private_data; return fasync_helper(fd, file, on, &tu->fasync); } static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct snd_timer_user *tu; long result = 0, unit; int err = 0; tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; break; } set_current_state(TASK_INTERRUPTIBLE); init_waitqueue_entry(&wait, current); add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); schedule(); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); if (signal_pending(current)) { err = -ERESTARTSYS; break; } } spin_unlock_irq(&tu->qlock); if (err < 0) goto _error; if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], sizeof(struct snd_timer_tread))) { err = -EFAULT; goto _error; } } else { if (copy_to_user(buffer, &tu->queue[tu->qhead++], sizeof(struct snd_timer_read))) { err = -EFAULT; goto _error; } } tu->qhead %= tu->queue_size; result += unit; buffer += unit; spin_lock_irq(&tu->qlock); tu->qused--; } spin_unlock_irq(&tu->qlock); _error: return result > 0 ? result : err; } static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_timer_user *tu; tu = file->private_data; poll_wait(file, &tu->qchange_sleep, wait); mask = 0; if (tu->qused) mask |= POLLIN | POLLRDNORM; return mask; } #ifdef CONFIG_COMPAT #include "timer_compat.c" #else #define snd_timer_user_ioctl_compat NULL #endif static const struct file_operations snd_timer_f_ops = { .owner = THIS_MODULE, .read = snd_timer_user_read, .open = snd_timer_user_open, .release = snd_timer_user_release, .llseek = no_llseek, .poll = snd_timer_user_poll, .unlocked_ioctl = snd_timer_user_ioctl, .compat_ioctl = snd_timer_user_ioctl_compat, .fasync = snd_timer_user_fasync, }; /* unregister the system timer */ static void snd_timer_free_all(void) { struct snd_timer *timer, *n; list_for_each_entry_safe(timer, n, &snd_timer_list, device_list) snd_timer_free(timer); } static struct device timer_dev; /* * ENTRY functions */ static int __init alsa_timer_init(void) { int err; snd_device_initialize(&timer_dev, NULL); dev_set_name(&timer_dev, "timer"); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1, "system timer"); #endif err = snd_timer_register_system(); if (err < 0) { pr_err("ALSA: unable to register system timer (%i)\n", err); put_device(&timer_dev); return err; } err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0, &snd_timer_f_ops, NULL, &timer_dev); if (err < 0) { pr_err("ALSA: unable to register timer device (%i)\n", err); snd_timer_free_all(); put_device(&timer_dev); return err; } snd_timer_proc_init(); return 0; } static void __exit alsa_timer_exit(void) { snd_unregister_device(&timer_dev); snd_timer_free_all(); put_device(&timer_dev); snd_timer_proc_done(); #ifdef SNDRV_OSS_INFO_DEV_TIMERS snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1); #endif } module_init(alsa_timer_init) module_exit(alsa_timer_exit) EXPORT_SYMBOL(snd_timer_open); EXPORT_SYMBOL(snd_timer_close); EXPORT_SYMBOL(snd_timer_resolution); EXPORT_SYMBOL(snd_timer_start); EXPORT_SYMBOL(snd_timer_stop); EXPORT_SYMBOL(snd_timer_continue); EXPORT_SYMBOL(snd_timer_pause); EXPORT_SYMBOL(snd_timer_new); EXPORT_SYMBOL(snd_timer_notify); EXPORT_SYMBOL(snd_timer_global_new); EXPORT_SYMBOL(snd_timer_global_free); EXPORT_SYMBOL(snd_timer_global_register); EXPORT_SYMBOL(snd_timer_interrupt);
./CrossVul/dataset_final_sorted/CWE-362/c/good_4963_0
crossvul-cpp_data_bad_3459_0
/* * Linux NET3: IP/IP protocol decoder. * * Authors: * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 * * Fixes: * Alan Cox : Merged and made usable non modular (its so tiny its silly as * a module taking up 2 pages). * Alan Cox : Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph) * to keep ip_forward happy. * Alan Cox : More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8). * Kai Schulte : Fixed #defines for IP_FIREWALL->FIREWALL * David Woodhouse : Perform some basic ICMP handling. * IPIP Routing without decapsulation. * Carlos Picoto : GRE over IP support * Alexey Kuznetsov: Reworked. Really, now it is truncated version of ipv4/ip_gre.c. * I do not want to merge them together. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ /* tunnel.c: an IP tunnel driver The purpose of this driver is to provide an IP tunnel through which you can tunnel network traffic transparently across subnets. This was written by looking at Nick Holloway's dummy driver Thanks for the great code! -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95 Minor tweaks: Cleaned up the code a little and added some pre-1.3.0 tweaks. dev->hard_header/hard_header_len changed to use no headers. Comments/bracketing tweaked. Made the tunnels use dev->name not tunnel: when error reporting. Added tx_dropped stat -Alan Cox (alan@lxorguk.ukuu.org.uk) 21 March 95 Reworked: Changed to tunnel to destination gateway in addition to the tunnel's pointopoint address Almost completely rewritten Note: There is currently no firewall or ICMP handling done. -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96 */ /* Things I wish I had known when writing the tunnel driver: When the tunnel_xmit() function is called, the skb contains the packet to be sent (plus a great deal of extra info), and dev contains the tunnel device that _we_ are. When we are passed a packet, we are expected to fill in the source address with our source IP address. What is the proper way to allocate, copy and free a buffer? After you allocate it, it is a "0 length" chunk of memory starting at zero. If you want to add headers to the buffer later, you'll have to call "skb_reserve(skb, amount)" with the amount of memory you want reserved. Then, you call "skb_put(skb, amount)" with the amount of space you want in the buffer. skb_put() returns a pointer to the top (#0) of that buffer. skb->len is set to the amount of space you have "allocated" with skb_put(). You can then write up to skb->len bytes to that buffer. If you need more, you can call skb_put() again with the additional amount of space you need. You can find out how much more space you can allocate by calling "skb_tailroom(skb)". Now, to add header space, call "skb_push(skb, header_len)". This creates space at the beginning of the buffer and returns a pointer to this new space. If later you need to strip a header from a buffer, call "skb_pull(skb, header_len)". skb_headroom() will return how much space is left at the top of the buffer (before the main data). Remember, this headroom space must be reserved before the skb_put() function is called. */ /* This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c For comments look at net/ipv4/ip_gre.c --ANK */ #include <linux/capability.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/if_arp.h> #include <linux/mroute.h> #include <linux/init.h> #include <linux/netfilter_ipv4.h> #include <linux/if_ether.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/ipip.h> #include <net/inet_ecn.h> #include <net/xfrm.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #define HASH_SIZE 16 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) static int ipip_net_id __read_mostly; struct ipip_net { struct ip_tunnel *tunnels_r_l[HASH_SIZE]; struct ip_tunnel *tunnels_r[HASH_SIZE]; struct ip_tunnel *tunnels_l[HASH_SIZE]; struct ip_tunnel *tunnels_wc[1]; struct ip_tunnel **tunnels[4]; struct net_device *fb_tunnel_dev; }; static void ipip_tunnel_init(struct net_device *dev); static void ipip_tunnel_setup(struct net_device *dev); /* * Locking : hash tables are protected by RCU and a spinlock */ static DEFINE_SPINLOCK(ipip_lock); #define for_each_ip_tunnel_rcu(start) \ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, __be32 remote, __be32 local) { unsigned h0 = HASH(remote); unsigned h1 = HASH(local); struct ip_tunnel *t; struct ipip_net *ipn = net_generic(net, ipip_net_id); for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1]) if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) return t; for_each_ip_tunnel_rcu(ipn->tunnels_r[h0]) if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP)) return t; for_each_ip_tunnel_rcu(ipn->tunnels_l[h1]) if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP)) return t; t = rcu_dereference(ipn->tunnels_wc[0]); if (t && (t->dev->flags&IFF_UP)) return t; return NULL; } static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn, struct ip_tunnel_parm *parms) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; unsigned h = 0; int prio = 0; if (remote) { prio |= 2; h ^= HASH(remote); } if (local) { prio |= 1; h ^= HASH(local); } return &ipn->tunnels[prio][h]; } static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn, struct ip_tunnel *t) { return __ipip_bucket(ipn, &t->parms); } static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) { struct ip_tunnel **tp; for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { if (t == *tp) { spin_lock_bh(&ipip_lock); *tp = t->next; spin_unlock_bh(&ipip_lock); break; } } } static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) { struct ip_tunnel **tp = ipip_bucket(ipn, t); spin_lock_bh(&ipip_lock); t->next = *tp; rcu_assign_pointer(*tp, t); spin_unlock_bh(&ipip_lock); } static struct ip_tunnel * ipip_tunnel_locate(struct net *net, struct ip_tunnel_parm *parms, int create) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; struct ip_tunnel *t, **tp, *nt; struct net_device *dev; char name[IFNAMSIZ]; struct ipip_net *ipn = net_generic(net, ipip_net_id); for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) { if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) return t; } if (!create) return NULL; if (parms->name[0]) strlcpy(name, parms->name, IFNAMSIZ); else sprintf(name, "tunl%%d"); dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup); if (dev == NULL) return NULL; dev_net_set(dev, net); if (strchr(name, '%')) { if (dev_alloc_name(dev, name) < 0) goto failed_free; } nt = netdev_priv(dev); nt->parms = *parms; ipip_tunnel_init(dev); if (register_netdevice(dev) < 0) goto failed_free; dev_hold(dev); ipip_tunnel_link(ipn, nt); return nt; failed_free: free_netdev(dev); return NULL; } static void ipip_tunnel_uninit(struct net_device *dev) { struct net *net = dev_net(dev); struct ipip_net *ipn = net_generic(net, ipip_net_id); if (dev == ipn->fb_tunnel_dev) { spin_lock_bh(&ipip_lock); ipn->tunnels_wc[0] = NULL; spin_unlock_bh(&ipip_lock); } else ipip_tunnel_unlink(ipn, netdev_priv(dev)); dev_put(dev); } static int ipip_err(struct sk_buff *skb, u32 info) { /* All the routers (except for Linux) return only 8 bytes of packet payload. It means, that precise relaying of ICMP in the real Internet is absolutely infeasible. */ struct iphdr *iph = (struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct ip_tunnel *t; int err; switch (type) { default: case ICMP_PARAMETERPROB: return 0; case ICMP_DEST_UNREACH: switch (code) { case ICMP_SR_FAILED: case ICMP_PORT_UNREACH: /* Impossible event. */ return 0; case ICMP_FRAG_NEEDED: /* Soft state for pmtu is maintained by IP core. */ return 0; default: /* All others are translated to HOST_UNREACH. rfc2003 contains "deep thoughts" about NET_UNREACH, I believe they are just ether pollution. --ANK */ break; } break; case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return 0; break; } err = -ENOENT; rcu_read_lock(); t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); if (t == NULL || t->parms.iph.daddr == 0) goto out; err = 0; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) goto out; if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) t->err_count++; else t->err_count = 1; t->err_time = jiffies; out: rcu_read_unlock(); return err; } static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph, struct sk_buff *skb) { struct iphdr *inner_iph = ip_hdr(skb); if (INET_ECN_is_ce(outer_iph->tos)) IP_ECN_set_ce(inner_iph); } static int ipip_rcv(struct sk_buff *skb) { struct ip_tunnel *tunnel; const struct iphdr *iph = ip_hdr(skb); rcu_read_lock(); if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr)) != NULL) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { rcu_read_unlock(); kfree_skb(skb); return 0; } secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; tunnel->dev->stats.rx_packets++; tunnel->dev->stats.rx_bytes += skb->len; skb->dev = tunnel->dev; skb_dst_drop(skb); nf_reset(skb); ipip_ecn_decapsulate(iph, skb); netif_rx(skb); rcu_read_unlock(); return 0; } rcu_read_unlock(); return -1; } /* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); struct iphdr *tiph = &tunnel->parms.iph; u8 tos = tunnel->parms.iph.tos; __be16 df = tiph->frag_off; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ struct iphdr *old_iph = ip_hdr(skb); struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; int mtu; if (skb->protocol != htons(ETH_P_IP)) goto tx_error; if (tos&1) tos = old_iph->tos; if (!dst) { /* NBMA tunnel */ if ((rt = skb_rtable(skb)) == NULL) { stats->tx_fifo_errors++; goto tx_error; } if ((dst = rt->rt_gateway) == 0) goto tx_error_icmp; } { struct flowi fl = { .oif = tunnel->parms.link, .nl_u = { .ip4_u = { .daddr = dst, .saddr = tiph->saddr, .tos = RT_TOS(tos) } }, .proto = IPPROTO_IPIP }; if (ip_route_output_key(dev_net(dev), &rt, &fl)) { stats->tx_carrier_errors++; goto tx_error_icmp; } } tdev = rt->u.dst.dev; if (tdev == dev) { ip_rt_put(rt); stats->collisions++; goto tx_error; } df |= old_iph->frag_off & htons(IP_DF); if (df) { mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); if (mtu < 68) { stats->collisions++; ip_rt_put(rt); goto tx_error; } if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if ((old_iph->frag_off & htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); ip_rt_put(rt); goto tx_error; } } if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; dst_link_failure(skb); } else tunnel->err_count = 0; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr)); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { ip_rt_put(rt); txq->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; old_iph = ip_hdr(skb); } skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_drop(skb); skb_dst_set(skb, &rt->u.dst); /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; iph->protocol = IPPROTO_IPIP; iph->tos = INET_ECN_encapsulate(tos, old_iph->tos); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; if ((iph->ttl = tiph->ttl) == 0) iph->ttl = old_iph->ttl; nf_reset(skb); IPTUNNEL_XMIT(); return NETDEV_TX_OK; tx_error_icmp: dst_link_failure(skb); tx_error: stats->tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static void ipip_tunnel_bind_dev(struct net_device *dev) { struct net_device *tdev = NULL; struct ip_tunnel *tunnel; struct iphdr *iph; tunnel = netdev_priv(dev); iph = &tunnel->parms.iph; if (iph->daddr) { struct flowi fl = { .oif = tunnel->parms.link, .nl_u = { .ip4_u = { .daddr = iph->daddr, .saddr = iph->saddr, .tos = RT_TOS(iph->tos) } }, .proto = IPPROTO_IPIP }; struct rtable *rt; if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { tdev = rt->u.dst.dev; ip_rt_put(rt); } dev->flags |= IFF_POINTOPOINT; } if (!tdev && tunnel->parms.link) tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); if (tdev) { dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); dev->mtu = tdev->mtu - sizeof(struct iphdr); } dev->iflink = tunnel->parms.link; } static int ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip_tunnel_parm p; struct ip_tunnel *t; struct net *net = dev_net(dev); struct ipip_net *ipn = net_generic(net, ipip_net_id); switch (cmd) { case SIOCGETTUNNEL: t = NULL; if (dev == ipn->fb_tunnel_dev) { if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { err = -EFAULT; break; } t = ipip_tunnel_locate(net, &p, 0); } if (t == NULL) t = netdev_priv(dev); memcpy(&p, &t->parms, sizeof(p)); if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) err = -EFAULT; break; case SIOCADDTUNNEL: case SIOCCHGTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) goto done; err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) goto done; err = -EINVAL; if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP || p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF))) goto done; if (p.iph.ttl) p.iph.frag_off |= htons(IP_DF); t = ipip_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL); if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; break; } } else { if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) || (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) { err = -EINVAL; break; } t = netdev_priv(dev); ipip_tunnel_unlink(ipn, t); t->parms.iph.saddr = p.iph.saddr; t->parms.iph.daddr = p.iph.daddr; memcpy(dev->dev_addr, &p.iph.saddr, 4); memcpy(dev->broadcast, &p.iph.daddr, 4); ipip_tunnel_link(ipn, t); netdev_state_change(dev); } } if (t) { err = 0; if (cmd == SIOCCHGTUNNEL) { t->parms.iph.ttl = p.iph.ttl; t->parms.iph.tos = p.iph.tos; t->parms.iph.frag_off = p.iph.frag_off; if (t->parms.link != p.link) { t->parms.link = p.link; ipip_tunnel_bind_dev(dev); netdev_state_change(dev); } } if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); break; case SIOCDELTUNNEL: err = -EPERM; if (!capable(CAP_NET_ADMIN)) goto done; if (dev == ipn->fb_tunnel_dev) { err = -EFAULT; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) goto done; err = -ENOENT; if ((t = ipip_tunnel_locate(net, &p, 0)) == NULL) goto done; err = -EPERM; if (t->dev == ipn->fb_tunnel_dev) goto done; dev = t->dev; } unregister_netdevice(dev); err = 0; break; default: err = -EINVAL; } done: return err; } static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr)) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops ipip_netdev_ops = { .ndo_uninit = ipip_tunnel_uninit, .ndo_start_xmit = ipip_tunnel_xmit, .ndo_do_ioctl = ipip_tunnel_ioctl, .ndo_change_mtu = ipip_tunnel_change_mtu, }; static void ipip_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ipip_netdev_ops; dev->destructor = free_netdev; dev->type = ARPHRD_TUNNEL; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); dev->flags = IFF_NOARP; dev->iflink = 0; dev->addr_len = 4; dev->features |= NETIF_F_NETNS_LOCAL; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; } static void ipip_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); ipip_tunnel_bind_dev(dev); } static void __net_init ipip_fb_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; struct ipip_net *ipn = net_generic(dev_net(dev), ipip_net_id); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->protocol = IPPROTO_IPIP; iph->ihl = 5; dev_hold(dev); ipn->tunnels_wc[0] = tunnel; } static struct xfrm_tunnel ipip_handler = { .handler = ipip_rcv, .err_handler = ipip_err, .priority = 1, }; static const char banner[] __initconst = KERN_INFO "IPv4 over IPv4 tunneling driver\n"; static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head) { int prio; for (prio = 1; prio < 4; prio++) { int h; for (h = 0; h < HASH_SIZE; h++) { struct ip_tunnel *t = ipn->tunnels[prio][h]; while (t != NULL) { unregister_netdevice_queue(t->dev, head); t = t->next; } } } } static int __net_init ipip_init_net(struct net *net) { struct ipip_net *ipn = net_generic(net, ipip_net_id); int err; ipn->tunnels[0] = ipn->tunnels_wc; ipn->tunnels[1] = ipn->tunnels_l; ipn->tunnels[2] = ipn->tunnels_r; ipn->tunnels[3] = ipn->tunnels_r_l; ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "tunl0", ipip_tunnel_setup); if (!ipn->fb_tunnel_dev) { err = -ENOMEM; goto err_alloc_dev; } dev_net_set(ipn->fb_tunnel_dev, net); ipip_fb_tunnel_init(ipn->fb_tunnel_dev); if ((err = register_netdev(ipn->fb_tunnel_dev))) goto err_reg_dev; return 0; err_reg_dev: free_netdev(ipn->fb_tunnel_dev); err_alloc_dev: /* nothing */ return err; } static void __net_exit ipip_exit_net(struct net *net) { struct ipip_net *ipn = net_generic(net, ipip_net_id); LIST_HEAD(list); rtnl_lock(); ipip_destroy_tunnels(ipn, &list); unregister_netdevice_queue(ipn->fb_tunnel_dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); } static struct pernet_operations ipip_net_ops = { .init = ipip_init_net, .exit = ipip_exit_net, .id = &ipip_net_id, .size = sizeof(struct ipip_net), }; static int __init ipip_init(void) { int err; printk(banner); if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) { printk(KERN_INFO "ipip init: can't register tunnel\n"); return -EAGAIN; } err = register_pernet_device(&ipip_net_ops); if (err) xfrm4_tunnel_deregister(&ipip_handler, AF_INET); return err; } static void __exit ipip_fini(void) { if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) printk(KERN_INFO "ipip close: can't deregister tunnel\n"); unregister_pernet_device(&ipip_net_ops); } module_init(ipip_init); module_exit(ipip_fini); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-362/c/bad_3459_0
crossvul-cpp_data_good_1770_4
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_SONMP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> static struct sonmp_chassis sonmp_chassis_types[] = { {1, "unknown (via SONMP)"}, {2, "Nortel 3000"}, {3, "Nortel 3030"}, {4, "Nortel 2310"}, {5, "Nortel 2810"}, {6, "Nortel 2912"}, {7, "Nortel 2914"}, {8, "Nortel 271x"}, {9, "Nortel 2813"}, {10, "Nortel 2814"}, {11, "Nortel 2915"}, {12, "Nortel 5000"}, {13, "Nortel 2813SA"}, {14, "Nortel 2814SA"}, {15, "Nortel 810M"}, {16, "Nortel EtherCell"}, {17, "Nortel 5005"}, {18, "Alcatel Ethernet workgroup conc."}, {20, "Nortel 2715SA"}, {21, "Nortel 2486"}, {22, "Nortel 28000 series"}, {23, "Nortel 23000 series"}, {24, "Nortel 5DN00x series"}, {25, "BayStack Ethernet"}, {26, "Nortel 23100 series"}, {27, "Nortel 100Base-T Hub"}, {28, "Nortel 3000 Fast Ethernet"}, {29, "Nortel Orion switch"}, {30, "unknown"}, {31, "Nortel DDS "}, {32, "Nortel Centillion"}, {33, "Nortel Centillion"}, {34, "Nortel Centillion"}, {35, "BayStack 301"}, {36, "BayStack TokenRing Hub"}, {37, "Nortel FVC Multimedia Switch"}, {38, "Nortel Switch Node"}, {39, "BayStack 302 Switch"}, {40, "BayStack 350 Switch"}, {41, "BayStack 150 Ethernet Hub"}, {42, "Nortel Centillion 50N switch"}, {43, "Nortel Centillion 50T switch"}, {44, "BayStack 303 and 304 Switches"}, {45, "BayStack 200 Ethernet Hub"}, {46, "BayStack 250 10/100 Ethernet Hub"}, {48, "BayStack 450 10/100/1000 Switches"}, {49, "BayStack 410 10/100 Switches"}, {50, "Nortel Ethernet Routing 1200 L3 Switch"}, {51, "Nortel Ethernet Routing 1250 L3 Switch"}, {52, "Nortel Ethernet Routing 1100 L3 Switch"}, {53, "Nortel Ethernet Routing 1150 L3 Switch"}, {54, "Nortel Ethernet Routing 1050 L3 Switch"}, {55, "Nortel Ethernet Routing 1051 L3 Switch"}, {56, "Nortel Ethernet Routing 8610 L3 Switch"}, {57, "Nortel Ethernet Routing 8606 L3 Switch"}, {58, "Nortel Ethernet Routing Switch 8010"}, {59, "Nortel Ethernet Routing Switch 8006"}, {60, "BayStack 670 wireless access point"}, {61, "Nortel Ethernet Routing Switch 740 "}, {62, "Nortel Ethernet Routing Switch 750 "}, {63, "Nortel Ethernet Routing Switch 790"}, {64, "Nortel Business Policy Switch 2000 10/100 Switches"}, {65, "Nortel Ethernet Routing 8110 L2 Switch"}, {66, "Nortel Ethernet Routing 8106 L2 Switch"}, {67, "BayStack 3580 Gig Switch"}, {68, "BayStack 10 Power Supply Unit"}, {69, "BayStack 420 10/100 Switch"}, {70, "OPTera Metro 1200 Ethernet Service Module"}, {71, "Nortel Ethernet Routing Switch 8010co"}, {72, "Nortel Ethernet Routing 8610co L3 switch"}, {73, "Nortel Ethernet Routing 8110co L2 switch"}, {74, "Nortel Ethernet Routing 8003"}, {75, "Nortel Ethernet Routing 8603 L3 switch"}, {76, "Nortel Ethernet Routing 8103 L2 switch"}, {77, "BayStack 380 10/100/1000 Switch"}, {78, "Nortel Ethernet Switch 470-48T"}, {79, "OPTera Metro 1450 Ethernet Service Module"}, {80, "OPTera Metro 1400 Ethernet Service Module"}, {81, "Alteon Switch Family"}, {82, "Ethernet Switch 460-24T-PWR"}, {83, "OPTera Metro 8010 OPM L2 Switch"}, {84, "OPTera Metro 8010co OPM L2 Switch"}, {85, "OPTera Metro 8006 OPM L2 Switch"}, {86, "OPTera Metro 8003 OPM L2 Switch"}, {87, "Alteon 180e"}, {88, "Alteon AD3"}, {89, "Alteon 184"}, {90, "Alteon AD4"}, {91, "Nortel Ethernet Routing 1424 L3 switch"}, {92, "Nortel Ethernet Routing 1648 L3 switch"}, {93, "Nortel Ethernet Routing 1612 L3 switch"}, {94, "Nortel Ethernet Routing 1624 L3 switch "}, {95, "BayStack 380-24F Fiber 1000 Switch"}, {96, "Nortel Ethernet Routing Switch 5510-24T"}, {97, "Nortel Ethernet Routing Switch 5510-48T"}, {98, "Nortel Ethernet Switch 470-24T"}, {99, "Nortel Networks Wireless LAN Access Point 2220"}, {100, "Ethernet Routing RBS 2402 L3 switch"}, {101, "Alteon Application Switch 2424 "}, {102, "Alteon Application Switch 2224 "}, {103, "Alteon Application Switch 2208 "}, {104, "Alteon Application Switch 2216"}, {105, "Alteon Application Switch 3408"}, {106, "Alteon Application Switch 3416"}, {107, "Nortel Networks Wireless LAN SecuritySwitch 2250"}, {108, "Ethernet Switch 425-48T"}, {109, "Ethernet Switch 425-24T"}, {110, "Nortel Networks Wireless LAN Access Point 2221"}, {111, "Nortel Metro Ethernet Service Unit 24-T SPF switch"}, {112, "Nortel Metro Ethernet Service Unit 24-T LX DC switch"}, {113, "Nortel Ethernet Routing Switch 8300 10-slot chassis"}, {114, "Nortel Ethernet Routing Switch 8300 6-slot chassis"}, {115, "Nortel Ethernet Routing Switch 5520-24T-PWR"}, {116, "Nortel Ethernet Routing Switch 5520-48T-PWR"}, {117, "Nortel Networks VPN Gateway 3050"}, {118, "Alteon SSL 310 10/100"}, {119, "Alteon SSL 310 10/100 Fiber"}, {120, "Alteon SSL 310 10/100 FIPS"}, {121, "Alteon SSL 410 10/100/1000"}, {122, "Alteon SSL 410 10/100/1000 Fiber"}, {123, "Alteon Application Switch 2424-SSL"}, {124, "Nortel Ethernet Switch 325-24T"}, {125, "Nortel Ethernet Switch 325-24G"}, {126, "Nortel Networks Wireless LAN Access Point 2225"}, {127, "Nortel Networks Wireless LAN SecuritySwitch 2270"}, {128, "Nortel 24-port Ethernet Switch 470-24T-PWR"}, {129, "Nortel 48-port Ethernet Switch 470-48T-PWR"}, {130, "Nortel Ethernet Routing Switch 5530-24TFD"}, {131, "Nortel Ethernet Switch 3510-24T"}, {132, "Nortel Metro Ethernet Service Unit 12G AC L3 switch"}, {133, "Nortel Metro Ethernet Service Unit 12G DC L3 switch"}, {134, "Nortel Secure Access Switch"}, {135, "Networks VPN Gateway 3070"}, {136, "OPTera Metro 3500"}, {137, "SMB BES 1010 24T"}, {138, "SMB BES 1010 48T"}, {139, "SMB BES 1020 24T PWR"}, {140, "SMB BES 1020 48T PWR"}, {141, "SMB BES 2010 24T"}, {142, "SMB BES 2010 48T"}, {143, "SMB BES 2020 24T PWR"}, {144, "SMB BES 2020 48T PWR"}, {145, "SMB BES 110 24T"}, {146, "SMB BES 110 48T"}, {147, "SMB BES 120 24T PWR"}, {148, "SMB BES 120 48T PWR"}, {149, "SMB BES 210 24T"}, {150, "SMB BES 210 48T"}, {151, "SMB BES 220 24T PWR"}, {152, "SMB BES 220 48T PWR"}, {153, "OME 6500"}, {0, "unknown (via SONMP)"}, }; int sonmp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_NORTEL; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; u_int8_t *packet, *pos, *pos_pid, *end; int length; struct in_addr address; log_debug("sonmp", "send SONMP PDU to %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* SONMP multicast address as target */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC addresss */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* SONMP frame is of fixed size */ POKE_UINT16(SONMP_SIZE))) goto toobig; /* LLC header */ if (!( /* DSAP and SSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_SAVE(pos_pid) && /* We will modify PID later to create a new frame */ POKE_UINT16(LLC_PID_SONMP_HELLO))) goto toobig; address.s_addr = htonl(INADDR_ANY); TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { if (mgmt->m_family == LLDPD_AF_IPV4) { address.s_addr = mgmt->m_addr.inet.s_addr; } break; } /* SONMP */ if (!( /* Our IP address */ POKE_BYTES(&address, sizeof(struct in_addr)) && /* Segment on three bytes, we don't have slots, so we skip the first two bytes */ POKE_UINT16(0) && POKE_UINT8(hardware->h_ifindex) && POKE_UINT8(1) && /* Chassis: Other */ POKE_UINT8(12) && /* Back: Ethernet, Fast Ethernet and Gigabit */ POKE_UINT8(SONMP_TOPOLOGY_NEW) && /* Should work. We have no state */ POKE_UINT8(1) && /* Links: Dunno what it is */ POKE_SAVE(end))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } POKE_RESTORE(pos_pid); /* Modify LLC PID */ (void)POKE_UINT16(LLC_PID_SONMP_FLATNET); POKE_RESTORE(packet); /* Go to the beginning */ PEEK_DISCARD(ETHER_ADDR_LEN - 1); /* Modify the last byte of the MAC address */ (void)POKE_UINT8(1); if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send second SONMP packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); hardware->h_tx_cnt++; return 0; toobig: free(packet); return -1; } int sonmp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; int length, i; u_int8_t *pos; u_int8_t seg[3], rchassis; struct in_addr address; log_debug("sonmp", "decode SONMP PDU from %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("sonmp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("sonmp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < SONMP_SIZE) { log_warnx("sonmp", "too short SONMP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(mcastaddr, sizeof(mcastaddr)) != 0) /* There is two multicast address. We just handle only one of * them. */ goto malformed; /* We skip to LLC PID */ PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); if (PEEK_UINT16 != LLC_PID_SONMP_HELLO) { log_debug("sonmp", "incorrect LLC protocol ID received for SONMP on %s", hardware->h_ifname); goto malformed; } chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_ADDR; if ((chassis->c_id = calloc(1, sizeof(struct in_addr) + 1)) == NULL) { log_warn("sonmp", "unable to allocate memory for chassis id on %s", hardware->h_ifname); goto malformed; } chassis->c_id_len = sizeof(struct in_addr) + 1; chassis->c_id[0] = 1; PEEK_BYTES(&address, sizeof(struct in_addr)); memcpy(chassis->c_id + 1, &address, sizeof(struct in_addr)); if (asprintf(&chassis->c_name, "%s", inet_ntoa(address)) == -1) { log_warnx("sonmp", "unable to write chassis name for %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(seg, sizeof(seg)); rchassis = PEEK_UINT8; for (i=0; sonmp_chassis_types[i].type != 0; i++) { if (sonmp_chassis_types[i].type == rchassis) break; } if (asprintf(&chassis->c_descr, "%s", sonmp_chassis_types[i].description) == -1) { log_warnx("sonmp", "unable to write chassis description for %s", hardware->h_ifname); goto malformed; } mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { if (errno == ENOMEM) log_warn("sonmp", "unable to allocate memory for management address"); else log_warn("sonmp", "too large management address received on %s", hardware->h_ifname); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); chassis->c_ttl = cfg?(cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold): LLDPD_TTL; port->p_id_subtype = LLDP_PORTID_SUBTYPE_LOCAL; if (asprintf(&port->p_id, "%02x-%02x-%02x", seg[0], seg[1], seg[2]) == -1) { log_warn("sonmp", "unable to allocate memory for port id on %s", hardware->h_ifname); goto malformed; } port->p_id_len = strlen(port->p_id); /* Port description depend on the number of segments */ if ((seg[0] == 0) && (seg[1] == 0)) { if (asprintf(&port->p_descr, "port %d", seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else if (seg[0] == 0) { if (asprintf(&port->p_descr, "port %d/%d", seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else { if (asprintf(&port->p_descr, "port %x:%x:%x", seg[0], seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_SONMP */
./CrossVul/dataset_final_sorted/CWE-617/c/good_1770_4
crossvul-cpp_data_good_1770_1
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* We also supports FDP which is very similar to CDPv1 */ #include "lldpd.h" #include "frame.h" #if defined (ENABLE_CDP) || defined (ENABLE_FDP) #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> static int cdp_send(struct lldpd *global, struct lldpd_hardware *hardware, int version) { const char *platform = "Unknown"; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; struct lldpd_port *port; u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; u_int8_t llcorg[] = LLC_ORG_CISCO; #ifdef ENABLE_FDP char *capstr; #endif u_int16_t checksum; int length, i; u_int32_t cap; u_int8_t *packet; u_int8_t *pos, *pos_len_eh, *pos_llc, *pos_cdp, *pos_checksum, *tlv, *end; log_debug("cdp", "send CDP frame on %s", hardware->h_ifname); port = &(hardware->h_lport); chassis = port->p_chassis; #ifdef ENABLE_FDP if (version == 0) { /* With FDP, change multicast address and LLC PID */ const u_int8_t fdpmcastaddr[] = FDP_MULTICAST_ADDR; const u_int8_t fdpllcorg[] = LLC_ORG_FOUNDRY; memcpy(mcastaddr, fdpmcastaddr, sizeof(mcastaddr)); memcpy(llcorg, fdpllcorg, sizeof(llcorg)); } #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && POKE_UINT8(0xaa) && /* SSAP */ POKE_UINT8(0xaa) && /* DSAP */ POKE_UINT8(0x03) && /* Control field */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_CDP))) goto toobig; /* CDP header */ if (!( POKE_SAVE(pos_cdp) && POKE_UINT8((version == 0)?1:version) && POKE_UINT8(chassis->c_ttl) && POKE_SAVE(pos_checksum) && /* Save checksum position */ POKE_UINT16(0))) goto toobig; /* Chassis ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_CHASSIS) && (chassis->c_name? POKE_BYTES(chassis->c_name, strlen(chassis->c_name)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Adresses */ /* See: * http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#xtocid12 * * It seems that Cisco implies that CDP supports IPv6 using * 802.2 address format with 0xAAAA03 0x000000 0x0800, but * 0x0800 is the Ethernet protocol type for IPv4. Therefore, * we support only IPv4. */ i = 0; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) if (mgmt->m_family == LLDPD_AF_IPV4) i++; if (i > 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_ADDRESSES) && POKE_UINT32(i))) goto toobig; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { switch (mgmt->m_family) { case LLDPD_AF_IPV4: if (!( POKE_UINT8(1) && /* Type: NLPID */ POKE_UINT8(1) && /* Length: 1 */ POKE_UINT8(CDP_ADDRESS_PROTO_IP) && /* IP */ POKE_UINT16(sizeof(struct in_addr)) && /* Address length */ POKE_BYTES(&mgmt->m_addr, sizeof(struct in_addr)))) goto toobig; break; } } if (!(POKE_END_CDP_TLV)) goto toobig; } /* Port ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_PORT) && (hardware->h_lport.p_descr? POKE_BYTES(hardware->h_lport.p_descr, strlen(hardware->h_lport.p_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Capabilities */ if (version != 0) { cap = 0; if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) cap |= CDP_CAP_ROUTER; if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) cap |= CDP_CAP_SWITCH; cap |= CDP_CAP_HOST; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_UINT32(cap) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_FDP } else { /* With FDP, it seems that a string is used in place of an int */ if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) capstr = "Router"; else if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) capstr = "Switch"; else if (chassis->c_cap_enabled & LLDP_CAP_REPEATER) capstr = "Bridge"; else capstr = "Host"; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_BYTES(capstr, strlen(capstr)) && POKE_END_CDP_TLV)) goto toobig; #endif } /* Native VLAN */ #ifdef ENABLE_DOT1 if (version >=2 && hardware->h_lport.p_pvid != 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_NATIVEVLAN) && POKE_UINT16(hardware->h_lport.p_pvid) && POKE_END_CDP_TLV)) goto toobig; } #endif /* Software version */ if (!( POKE_START_CDP_TLV(CDP_TLV_SOFTWARE) && (chassis->c_descr? POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Platform */ if (global && global->g_config.c_platform) platform = global->g_config.c_platform; if (!( POKE_START_CDP_TLV(CDP_TLV_PLATFORM) && POKE_BYTES(platform, strlen(platform)) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_LLDPMED /* Power use */ if ((version >= 2) && port->p_med_cap_enabled && (port->p_med_power.source != LLDP_MED_POW_SOURCE_LOCAL) && (port->p_med_power.val > 0) && (port->p_med_power.val <= 655)) { if (!( POKE_START_CDP_TLV(CDP_TLV_POWER_CONSUMPTION) && POKE_UINT16(port->p_med_power.val * 100) && POKE_END_CDP_TLV)) goto toobig; } #endif (void)POKE_SAVE(end); /* Compute len and checksum */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(end - pos_llc))) goto toobig; checksum = frame_checksum(pos_cdp, end - pos_cdp, (version != 0) ? 1 : 0); POKE_RESTORE(pos_checksum); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("cdp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; free(packet); return 0; toobig: free(packet); return -1; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("cdp", name " CDP/FDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) /* cdp_decode also decodes FDP */ int cdp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; struct in_addr addr; #if 0 u_int16_t cksum; #endif u_int8_t *software = NULL, *platform = NULL; int software_len = 0, platform_len = 0, proto, version, nb, caps; const unsigned char cdpaddr[] = CDP_MULTICAST_ADDR; #ifdef ENABLE_FDP const unsigned char fdpaddr[] = CDP_MULTICAST_ADDR; int fdp = 0; #endif u_int8_t *pos, *tlv, *pos_address, *pos_next_address; int length, len_eth, tlv_type, tlv_len, addresses_len, address_len; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; #endif log_debug("cdp", "decode CDP frame received on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("cdp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("cdp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) { log_warn("cdp", "too short CDP/FDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(cdpaddr, sizeof(cdpaddr)) != 0) { #ifdef ENABLE_FDP PEEK_RESTORE((u_int8_t*)frame); if (PEEK_CMP(fdpaddr, sizeof(fdpaddr)) != 0) fdp = 1; else { #endif log_info("cdp", "frame not targeted at CDP/FDP multicast address received on %s", hardware->h_ifname); goto malformed; #ifdef ENABLE_FDP } #endif } PEEK_DISCARD(ETHER_ADDR_LEN); /* Don't care of source address */ len_eth = PEEK_UINT16; if (len_eth > length) { log_warnx("cdp", "incorrect 802.3 frame size reported on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(6); /* Skip beginning of LLC */ proto = PEEK_UINT16; if (proto != LLC_PID_CDP) { if ((proto != LLC_PID_DRIP) && (proto != LLC_PID_PAGP) && (proto != LLC_PID_PVSTP) && (proto != LLC_PID_UDLD) && (proto != LLC_PID_VTP) && (proto != LLC_PID_DTP) && (proto != LLC_PID_STP)) log_debug("cdp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } #if 0 /* Check checksum */ cksum = frame_checksum(pos, len_eth - 8, #ifdef ENABLE_FDP !fdp /* fdp = 0 -> cisco checksum */ #else 1 /* cisco checksum */ #endif ); if (cksum != 0) { log_info("cdp", "incorrect CDP/FDP checksum for frame received on %s (%d)", hardware->h_ifname, cksum); goto malformed; } #endif /* Check version */ version = PEEK_UINT8; if ((version != 1) && (version != 2)) { log_warnx("cdp", "incorrect CDP/FDP version (%d) for frame received on %s", version, hardware->h_ifname); goto malformed; } chassis->c_ttl = PEEK_UINT8; /* TTL */ PEEK_DISCARD_UINT16; /* Checksum, already checked */ while (length) { if (length < 4) { log_warnx("cdp", "CDP/FDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT16; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (length < tlv_len)) { log_warnx("cdp", "incorrect size in CDP/FDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case CDP_TLV_CHASSIS: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis name"); goto malformed; } PEEK_BYTES(chassis->c_name, tlv_len); chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; if ((chassis->c_id = (char *)malloc(tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis ID"); goto malformed; } memcpy(chassis->c_id, chassis->c_name, tlv_len); chassis->c_id_len = tlv_len; break; case CDP_TLV_ADDRESSES: CHECK_TLV_SIZE(4, "Address"); addresses_len = tlv_len - 4; for (nb = PEEK_UINT32; nb > 0; nb--) { (void)PEEK_SAVE(pos_address); /* We first try to get the real length of the packet */ if (addresses_len < 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; addresses_len--; address_len = PEEK_UINT8; addresses_len--; if (addresses_len < address_len + 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); addresses_len -= address_len; address_len = PEEK_UINT16; addresses_len -= 2; if (addresses_len < address_len) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); (void)PEEK_SAVE(pos_next_address); /* Next, we go back and try to extract IPv4 address */ PEEK_RESTORE(pos_address); if ((PEEK_UINT8 == 1) && (PEEK_UINT8 == 1) && (PEEK_UINT8 == CDP_ADDRESS_PROTO_IP) && (PEEK_UINT16 == sizeof(struct in_addr))) { PEEK_BYTES(&addr, sizeof(struct in_addr)); mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &addr, sizeof(struct in_addr), 0); if (mgmt == NULL) { if (errno == ENOMEM) log_warn("cdp", "unable to allocate memory for management address"); else log_warn("cdp", "too large management address received on %s", hardware->h_ifname); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } /* Go to the end of the address */ PEEK_RESTORE(pos_next_address); } break; case CDP_TLV_PORT: if (tlv_len == 0) { log_warn("cd[", "too short port description received"); goto malformed; } if ((port->p_descr = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for port description"); goto malformed; } PEEK_BYTES(port->p_descr, tlv_len); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; if ((port->p_id = (char *)calloc(1, tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for port ID"); goto malformed; } memcpy(port->p_id, port->p_descr, tlv_len); port->p_id_len = tlv_len; break; case CDP_TLV_CAPABILITIES: #ifdef ENABLE_FDP if (fdp) { /* Capabilities are string with FDP */ if (!strncmp("Router", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_ROUTER; else if (!strncmp("Switch", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_BRIDGE; else if (!strncmp("Bridge", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_REPEATER; else chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; } #endif CHECK_TLV_SIZE(4, "Capabilities"); caps = PEEK_UINT32; if (caps & CDP_CAP_ROUTER) chassis->c_cap_enabled |= LLDP_CAP_ROUTER; if (caps & 0x0e) chassis->c_cap_enabled |= LLDP_CAP_BRIDGE; if (chassis->c_cap_enabled == 0) chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; case CDP_TLV_SOFTWARE: software_len = tlv_len; (void)PEEK_SAVE(software); break; case CDP_TLV_PLATFORM: platform_len = tlv_len; (void)PEEK_SAVE(platform); break; #ifdef ENABLE_DOT1 case CDP_TLV_NATIVEVLAN: CHECK_TLV_SIZE(2, "Native VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("cdp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = port->p_pvid = PEEK_UINT16; if (asprintf(&vlan->v_name, "VLAN #%d", vlan->v_vid) == -1) { log_warn("cdp", "unable to alloc VLAN name for " "TLV received on %s", hardware->h_ifname); free(vlan); goto malformed; } TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); break; #endif default: log_debug("cdp", "unknown CDP/FDP TLV type (%d) received on %s", ntohs(tlv_type), hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if (!software && platform) { if ((chassis->c_descr = (char *)calloc(1, platform_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); } else if (software && !platform) { if ((chassis->c_descr = (char *)calloc(1, software_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, software, software_len); } else if (software && platform) { #define CONCAT_PLATFORM " running on\n" if ((chassis->c_descr = (char *)calloc(1, software_len + platform_len + strlen(CONCAT_PLATFORM) + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); memcpy(chassis->c_descr + platform_len, CONCAT_PLATFORM, strlen(CONCAT_PLATFORM)); memcpy(chassis->c_descr + platform_len + strlen(CONCAT_PLATFORM), software, software_len); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (chassis->c_ttl == 0) || (chassis->c_cap_enabled == 0)) { log_warnx("cdp", "some mandatory CDP/FDP tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #ifdef ENABLE_CDP int cdpv1_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 1); } int cdpv2_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 2); } #endif #ifdef ENABLE_FDP int fdp_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 0); } #endif #ifdef ENABLE_CDP static int cdp_guess(char *pos, int length, int version) { const u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) return 0; if (PEEK_CMP(mcastaddr, ETHER_ADDR_LEN) != 0) return 0; PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; /* Ethernet */ PEEK_DISCARD(8); /* LLC */ return (PEEK_UINT8 == version); } int cdpv1_guess(char *frame, int len) { return cdp_guess(frame, len, 1); } int cdpv2_guess(char *frame, int len) { return cdp_guess(frame, len, 2); } #endif #endif /* defined (ENABLE_CDP) || defined (ENABLE_FDP) */
./CrossVul/dataset_final_sorted/CWE-617/c/good_1770_1
crossvul-cpp_data_good_3363_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP N N GGGG % % P P NN N G % % PPPP N N N G GG % % P N NN G G % % P N N GGG % % % % % % Read/Write Portable Network Graphics Image Format % % % % Software Design % % Cristy % % Glenn Randers-Pehrson % % November 1997 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/static.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/transform.h" #include "magick/utility.h" #if defined(MAGICKCORE_PNG_DELEGATE) /* Suppress libpng pedantic warnings that were added in * libpng-1.2.41 and libpng-1.4.0. If you are working on * migration to libpng-1.5, remove these defines and then * fix any code that generates warnings. */ /* #define PNG_DEPRECATED Use of this function is deprecated */ /* #define PNG_USE_RESULT The result of this function must be checked */ /* #define PNG_NORETURN This function does not return */ /* #define PNG_ALLOCATED The result of the function is new memory */ /* #define PNG_DEPSTRUCT Access to this struct member is deprecated */ /* PNG_PTR_NORETURN does not work on some platforms, in libpng-1.5.x */ #define PNG_PTR_NORETURN #include "png.h" #include "zlib.h" /* ImageMagick differences */ #define first_scene scene #if PNG_LIBPNG_VER > 10011 /* Optional declarations. Define or undefine them as you like. */ /* #define PNG_DEBUG -- turning this on breaks VisualC compiling */ /* Features under construction. Define these to work on them. */ #undef MNG_OBJECT_BUFFERS #undef MNG_BASI_SUPPORTED #define MNG_COALESCE_LAYERS /* In 5.4.4, this interfered with MMAP'ed files. */ #define MNG_INSERT_LAYERS /* Troublesome, but seem to work as of 5.4.4 */ #if defined(MAGICKCORE_JPEG_DELEGATE) # define JNG_SUPPORTED /* Not finished as of 5.5.2. See "To do" comments. */ #endif #if !defined(RGBColorMatchExact) #define IsPNGColorEqual(color,target) \ (((color).red == (target).red) && \ ((color).green == (target).green) && \ ((color).blue == (target).blue)) #endif /* Table of recognized sRGB ICC profiles */ struct sRGB_info_struct { png_uint_32 len; png_uint_32 crc; png_byte intent; }; const struct sRGB_info_struct sRGB_info[] = { /* ICC v2 perceptual sRGB_IEC61966-2-1_black_scaled.icc */ { 3048, 0x3b8772b9UL, 0}, /* ICC v2 relative sRGB_IEC61966-2-1_no_black_scaling.icc */ { 3052, 0x427ebb21UL, 1}, /* ICC v4 perceptual sRGB_v4_ICC_preference_displayclass.icc */ {60988, 0x306fd8aeUL, 0}, /* ICC v4 perceptual sRGB_v4_ICC_preference.icc perceptual */ {60960, 0xbbef7812UL, 0}, /* HP? sRGB v2 media-relative sRGB_IEC61966-2-1_noBPC.icc */ { 3024, 0x5d5129ceUL, 1}, /* HP-Microsoft sRGB v2 perceptual */ { 3144, 0x182ea552UL, 0}, /* HP-Microsoft sRGB v2 media-relative */ { 3144, 0xf29e526dUL, 1}, /* Facebook's "2012/01/25 03:41:57", 524, "TINYsRGB.icc" */ { 524, 0xd4938c39UL, 0}, /* "2012/11/28 22:35:21", 3212, "Argyll_sRGB.icm") */ { 3212, 0x034af5a1UL, 0}, /* Not recognized */ { 0, 0x00000000UL, 0}, }; /* Macros for left-bit-replication to ensure that pixels * and PixelPackets all have the same image->depth, and for use * in PNG8 quantization. */ /* LBR01: Replicate top bit */ #define LBR01PacketRed(pixelpacket) \ (pixelpacket).red=(ScaleQuantumToChar((pixelpacket).red) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketGreen(pixelpacket) \ (pixelpacket).green=(ScaleQuantumToChar((pixelpacket).green) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketBlue(pixelpacket) \ (pixelpacket).blue=(ScaleQuantumToChar((pixelpacket).blue) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketOpacity(pixelpacket) \ (pixelpacket).opacity=(ScaleQuantumToChar((pixelpacket).opacity) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketRGB(pixelpacket) \ { \ LBR01PacketRed((pixelpacket)); \ LBR01PacketGreen((pixelpacket)); \ LBR01PacketBlue((pixelpacket)); \ } #define LBR01PacketRGBO(pixelpacket) \ { \ LBR01PacketRGB((pixelpacket)); \ LBR01PacketOpacity((pixelpacket)); \ } #define LBR01PixelRed(pixel) \ (SetPixelRed((pixel), \ ScaleQuantumToChar(GetPixelRed((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelGreen(pixel) \ (SetPixelGreen((pixel), \ ScaleQuantumToChar(GetPixelGreen((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelBlue(pixel) \ (SetPixelBlue((pixel), \ ScaleQuantumToChar(GetPixelBlue((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelOpacity(pixel) \ (SetPixelOpacity((pixel), \ ScaleQuantumToChar(GetPixelOpacity((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelRGB(pixel) \ { \ LBR01PixelRed((pixel)); \ LBR01PixelGreen((pixel)); \ LBR01PixelBlue((pixel)); \ } #define LBR01PixelRGBO(pixel) \ { \ LBR01PixelRGB((pixel)); \ LBR01PixelOpacity((pixel)); \ } /* LBR02: Replicate top 2 bits */ #define LBR02PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xc0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xc0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xc0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xc0; \ (pixelpacket).opacity=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketRGB(pixelpacket) \ { \ LBR02PacketRed((pixelpacket)); \ LBR02PacketGreen((pixelpacket)); \ LBR02PacketBlue((pixelpacket)); \ } #define LBR02PacketRGBO(pixelpacket) \ { \ LBR02PacketRGB((pixelpacket)); \ LBR02PacketOpacity((pixelpacket)); \ } #define LBR02PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xc0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xc0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xc0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02Opacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xc0; \ SetPixelOpacity((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelRGB(pixel) \ { \ LBR02PixelRed((pixel)); \ LBR02PixelGreen((pixel)); \ LBR02PixelBlue((pixel)); \ } #define LBR02PixelRGBO(pixel) \ { \ LBR02PixelRGB((pixel)); \ LBR02Opacity((pixel)); \ } /* LBR03: Replicate top 3 bits (only used with opaque pixels during PNG8 quantization) */ #define LBR03PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xe0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xe0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xe0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketRGB(pixelpacket) \ { \ LBR03PacketRed((pixelpacket)); \ LBR03PacketGreen((pixelpacket)); \ LBR03PacketBlue((pixelpacket)); \ } #define LBR03PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xe0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xe0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelBlue(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelBlue((pixel))) \ & 0xe0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelRGB(pixel) \ { \ LBR03PixelRed((pixel)); \ LBR03PixelGreen((pixel)); \ LBR03PixelBlue((pixel)); \ } /* LBR04: Replicate top 4 bits */ #define LBR04PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xf0; \ (pixelpacket).red=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xf0; \ (pixelpacket).green=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xf0; \ (pixelpacket).blue=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xf0; \ (pixelpacket).opacity=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketRGB(pixelpacket) \ { \ LBR04PacketRed((pixelpacket)); \ LBR04PacketGreen((pixelpacket)); \ LBR04PacketBlue((pixelpacket)); \ } #define LBR04PacketRGBO(pixelpacket) \ { \ LBR04PacketRGB((pixelpacket)); \ LBR04PacketOpacity((pixelpacket)); \ } #define LBR04PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xf0; \ SetPixelRed((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xf0; \ SetPixelGreen((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xf0; \ SetPixelBlue((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelOpacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xf0; \ SetPixelOpacity((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelRGB(pixel) \ { \ LBR04PixelRed((pixel)); \ LBR04PixelGreen((pixel)); \ LBR04PixelBlue((pixel)); \ } #define LBR04PixelRGBO(pixel) \ { \ LBR04PixelRGB((pixel)); \ LBR04PixelOpacity((pixel)); \ } /* Establish thread safety. setjmp/longjmp is claimed to be safe on these platforms: setjmp/longjmp is alleged to be unsafe on these platforms: */ #ifdef PNG_SETJMP_SUPPORTED # ifndef IMPNG_SETJMP_IS_THREAD_SAFE # define IMPNG_SETJMP_NOT_THREAD_SAFE # endif # ifdef IMPNG_SETJMP_NOT_THREAD_SAFE static SemaphoreInfo *ping_semaphore = (SemaphoreInfo *) NULL; # endif #endif /* This temporary until I set up malloc'ed object attributes array. Recompile with MNG_MAX_OBJECTS=65536L to avoid this limit but waste more memory. */ #define MNG_MAX_OBJECTS 256 /* If this not defined, spec is interpreted strictly. If it is defined, an attempt will be made to recover from some errors, including o global PLTE too short */ #undef MNG_LOOSE /* Don't try to define PNG_MNG_FEATURES_SUPPORTED here. Make sure it's defined in libpng/pngconf.h, version 1.0.9 or later. It won't work with earlier versions of libpng. From libpng-1.0.3a to libpng-1.0.8, PNG_READ|WRITE_EMPTY_PLTE were used but those have been deprecated in libpng in favor of PNG_MNG_FEATURES_SUPPORTED, so we set them here. PNG_MNG_FEATURES_SUPPORTED is disabled by default in libpng-1.0.9 and will be enabled by default in libpng-1.2.0. */ #ifdef PNG_MNG_FEATURES_SUPPORTED # ifndef PNG_READ_EMPTY_PLTE_SUPPORTED # define PNG_READ_EMPTY_PLTE_SUPPORTED # endif # ifndef PNG_WRITE_EMPTY_PLTE_SUPPORTED # define PNG_WRITE_EMPTY_PLTE_SUPPORTED # endif #endif /* Maximum valid size_t in PNG/MNG chunks is (2^31)-1 This macro is only defined in libpng-1.0.3 and later. Previously it was PNG_MAX_UINT but that was deprecated in libpng-1.2.6 */ #ifndef PNG_UINT_31_MAX #define PNG_UINT_31_MAX (png_uint_32) 0x7fffffffL #endif /* Constant strings for known chunk types. If you need to add a chunk, add a string holding the name here. To make the code more portable, we use ASCII numbers like this, not characters. */ /* until registration of eXIf */ static const png_byte mng_exIf[5]={101, 120, 73, 102, (png_byte) '\0'}; /* after registration of eXIf */ static const png_byte mng_eXIf[5]={101, 88, 73, 102, (png_byte) '\0'}; static const png_byte mng_MHDR[5]={ 77, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_BACK[5]={ 66, 65, 67, 75, (png_byte) '\0'}; static const png_byte mng_BASI[5]={ 66, 65, 83, 73, (png_byte) '\0'}; static const png_byte mng_CLIP[5]={ 67, 76, 73, 80, (png_byte) '\0'}; static const png_byte mng_CLON[5]={ 67, 76, 79, 78, (png_byte) '\0'}; static const png_byte mng_DEFI[5]={ 68, 69, 70, 73, (png_byte) '\0'}; static const png_byte mng_DHDR[5]={ 68, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_DISC[5]={ 68, 73, 83, 67, (png_byte) '\0'}; static const png_byte mng_ENDL[5]={ 69, 78, 68, 76, (png_byte) '\0'}; static const png_byte mng_FRAM[5]={ 70, 82, 65, 77, (png_byte) '\0'}; static const png_byte mng_IEND[5]={ 73, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_IHDR[5]={ 73, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_JHDR[5]={ 74, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_LOOP[5]={ 76, 79, 79, 80, (png_byte) '\0'}; static const png_byte mng_MAGN[5]={ 77, 65, 71, 78, (png_byte) '\0'}; static const png_byte mng_MEND[5]={ 77, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_MOVE[5]={ 77, 79, 86, 69, (png_byte) '\0'}; static const png_byte mng_PAST[5]={ 80, 65, 83, 84, (png_byte) '\0'}; static const png_byte mng_PLTE[5]={ 80, 76, 84, 69, (png_byte) '\0'}; static const png_byte mng_SAVE[5]={ 83, 65, 86, 69, (png_byte) '\0'}; static const png_byte mng_SEEK[5]={ 83, 69, 69, 75, (png_byte) '\0'}; static const png_byte mng_SHOW[5]={ 83, 72, 79, 87, (png_byte) '\0'}; static const png_byte mng_TERM[5]={ 84, 69, 82, 77, (png_byte) '\0'}; static const png_byte mng_bKGD[5]={ 98, 75, 71, 68, (png_byte) '\0'}; static const png_byte mng_caNv[5]={ 99, 97, 78, 118, (png_byte) '\0'}; static const png_byte mng_cHRM[5]={ 99, 72, 82, 77, (png_byte) '\0'}; static const png_byte mng_gAMA[5]={103, 65, 77, 65, (png_byte) '\0'}; static const png_byte mng_iCCP[5]={105, 67, 67, 80, (png_byte) '\0'}; static const png_byte mng_nEED[5]={110, 69, 69, 68, (png_byte) '\0'}; static const png_byte mng_pHYg[5]={112, 72, 89, 103, (png_byte) '\0'}; static const png_byte mng_vpAg[5]={118, 112, 65, 103, (png_byte) '\0'}; static const png_byte mng_pHYs[5]={112, 72, 89, 115, (png_byte) '\0'}; static const png_byte mng_sBIT[5]={115, 66, 73, 84, (png_byte) '\0'}; static const png_byte mng_sRGB[5]={115, 82, 71, 66, (png_byte) '\0'}; static const png_byte mng_tRNS[5]={116, 82, 78, 83, (png_byte) '\0'}; #if defined(JNG_SUPPORTED) static const png_byte mng_IDAT[5]={ 73, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAT[5]={ 74, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAA[5]={ 74, 68, 65, 65, (png_byte) '\0'}; static const png_byte mng_JdAA[5]={ 74, 100, 65, 65, (png_byte) '\0'}; static const png_byte mng_JSEP[5]={ 74, 83, 69, 80, (png_byte) '\0'}; static const png_byte mng_oFFs[5]={111, 70, 70, 115, (png_byte) '\0'}; #endif #if 0 /* Other known chunks that are not yet supported by ImageMagick: */ static const png_byte mng_hIST[5]={104, 73, 83, 84, (png_byte) '\0'}; static const png_byte mng_iTXt[5]={105, 84, 88, 116, (png_byte) '\0'}; static const png_byte mng_sPLT[5]={115, 80, 76, 84, (png_byte) '\0'}; static const png_byte mng_sTER[5]={115, 84, 69, 82, (png_byte) '\0'}; static const png_byte mng_tEXt[5]={116, 69, 88, 116, (png_byte) '\0'}; static const png_byte mng_tIME[5]={116, 73, 77, 69, (png_byte) '\0'}; static const png_byte mng_zTXt[5]={122, 84, 88, 116, (png_byte) '\0'}; #endif typedef struct _MngBox { long left, right, top, bottom; } MngBox; typedef struct _MngPair { volatile long a, b; } MngPair; #ifdef MNG_OBJECT_BUFFERS typedef struct _MngBuffer { size_t height, width; Image *image; png_color plte[256]; int reference_count; unsigned char alpha_sample_depth, compression_method, color_type, concrete, filter_method, frozen, image_type, interlace_method, pixel_sample_depth, plte_length, sample_depth, viewable; } MngBuffer; #endif typedef struct _MngInfo { #ifdef MNG_OBJECT_BUFFERS MngBuffer *ob[MNG_MAX_OBJECTS]; #endif Image * image; RectangleInfo page; int adjoin, #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED bytes_in_read_buffer, found_empty_plte, #endif equal_backgrounds, equal_chrms, equal_gammas, #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) equal_palettes, #endif equal_physs, equal_srgbs, framing_mode, have_global_bkgd, have_global_chrm, have_global_gama, have_global_phys, have_global_sbit, have_global_srgb, have_saved_bkgd_index, have_write_global_chrm, have_write_global_gama, have_write_global_plte, have_write_global_srgb, need_fram, object_id, old_framing_mode, saved_bkgd_index; int new_number_colors; ssize_t image_found, loop_count[256], loop_iteration[256], scenes_found, x_off[MNG_MAX_OBJECTS], y_off[MNG_MAX_OBJECTS]; MngBox clip, frame, image_box, object_clip[MNG_MAX_OBJECTS]; unsigned char /* These flags could be combined into one byte */ exists[MNG_MAX_OBJECTS], frozen[MNG_MAX_OBJECTS], loop_active[256], invisible[MNG_MAX_OBJECTS], viewable[MNG_MAX_OBJECTS]; MagickOffsetType loop_jump[256]; png_colorp global_plte; png_color_8 global_sbit; png_byte #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED read_buffer[8], #endif global_trns[256]; float global_gamma; ChromaticityInfo global_chrm; RenderingIntent global_srgb_intent; unsigned int delay, global_plte_length, global_trns_length, global_x_pixels_per_unit, global_y_pixels_per_unit, mng_width, mng_height, ticks_per_second; MagickBooleanType need_blob; unsigned int IsPalette, global_phys_unit_type, basi_warning, clon_warning, dhdr_warning, jhdr_warning, magn_warning, past_warning, phyg_warning, phys_warning, sbit_warning, show_warning, mng_type, write_mng, write_png_colortype, write_png_depth, write_png_compression_level, write_png_compression_strategy, write_png_compression_filter, write_png8, write_png24, write_png32, write_png48, write_png64; #ifdef MNG_BASI_SUPPORTED size_t basi_width, basi_height; unsigned int basi_depth, basi_color_type, basi_compression_method, basi_filter_type, basi_interlace_method, basi_red, basi_green, basi_blue, basi_alpha, basi_viewable; #endif png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; PixelPacket mng_global_bkgd; /* Added at version 6.6.6-7 */ MagickBooleanType ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, ping_exclude_eXIf, ping_exclude_EXIF, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tRNS, ping_exclude_vpAg, ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, /* Added at version 6.8.5-7 */ ping_preserve_iCCP, /* Added at version 6.8.9-9 */ ping_exclude_tIME; } MngInfo; #endif /* VER */ /* Forward declarations. */ static MagickBooleanType WritePNGImage(const ImageInfo *,Image *); static MagickBooleanType WriteMNGImage(const ImageInfo *,Image *); #if defined(JNG_SUPPORTED) static MagickBooleanType WriteJNGImage(const ImageInfo *,Image *); #endif #if PNG_LIBPNG_VER > 10011 #if (MAGICKCORE_QUANTUM_DEPTH >= 16) static MagickBooleanType LosslessReduceDepthOK(Image *image) { /* Reduce bit depth if it can be reduced losslessly from 16+ to 8. * * This is true if the high byte and the next highest byte of * each sample of the image, the colormap, and the background color * are equal to each other. We check this by seeing if the samples * are unchanged when we scale them down to 8 and back up to Quantum. * * We don't use the method GetImageDepth() because it doesn't check * background and doesn't handle PseudoClass specially. */ #define QuantumToCharToQuantumEqQuantum(quantum) \ ((ScaleCharToQuantum((unsigned char) ScaleQuantumToChar(quantum))) == quantum) MagickBooleanType ok_to_reduce=MagickFalse; if (image->depth >= 16) { const PixelPacket *p; ok_to_reduce= QuantumToCharToQuantumEqQuantum(image->background_color.red) && QuantumToCharToQuantumEqQuantum(image->background_color.green) && QuantumToCharToQuantumEqQuantum(image->background_color.blue) ? MagickTrue : MagickFalse; if (ok_to_reduce != MagickFalse && image->storage_class == PseudoClass) { int indx; for (indx=0; indx < (ssize_t) image->colors; indx++) { ok_to_reduce=( QuantumToCharToQuantumEqQuantum( image->colormap[indx].red) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].green) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].blue)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; } } if ((ok_to_reduce != MagickFalse) && (image->storage_class != PseudoClass)) { ssize_t y; register ssize_t x; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) { ok_to_reduce = MagickFalse; break; } for (x=(ssize_t) image->columns-1; x >= 0; x--) { ok_to_reduce= QuantumToCharToQuantumEqQuantum(GetPixelRed(p)) && QuantumToCharToQuantumEqQuantum(GetPixelGreen(p)) && QuantumToCharToQuantumEqQuantum(GetPixelBlue(p)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; p++; } if (x >= 0) break; } } if (ok_to_reduce != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " OK to reduce PNG bit depth to 8 without loss of info"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Not OK to reduce PNG bit depth to 8 without loss of info"); } } return ok_to_reduce; } #endif /* MAGICKCORE_QUANTUM_DEPTH >= 16 */ static const char* PngColorTypeToString(const unsigned int color_type) { const char *result = "Unknown"; switch (color_type) { case PNG_COLOR_TYPE_GRAY: result = "Gray"; break; case PNG_COLOR_TYPE_GRAY_ALPHA: result = "Gray+Alpha"; break; case PNG_COLOR_TYPE_PALETTE: result = "Palette"; break; case PNG_COLOR_TYPE_RGB: result = "RGB"; break; case PNG_COLOR_TYPE_RGB_ALPHA: result = "RGB+Alpha"; break; } return result; } static int Magick_RenderingIntent_to_PNG_RenderingIntent(const RenderingIntent intent) { switch (intent) { case PerceptualIntent: return 0; case RelativeIntent: return 1; case SaturationIntent: return 2; case AbsoluteIntent: return 3; default: return -1; } } static RenderingIntent Magick_RenderingIntent_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return PerceptualIntent; case 1: return RelativeIntent; case 2: return SaturationIntent; case 3: return AbsoluteIntent; default: return UndefinedIntent; } } static const char * Magick_RenderingIntentString_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return "Perceptual Intent"; case 1: return "Relative Intent"; case 2: return "Saturation Intent"; case 3: return "Absolute Intent"; default: return "Undefined Intent"; } } static const char * Magick_ColorType_from_PNG_ColorType(const int ping_colortype) { switch (ping_colortype) { case 0: return "Grayscale"; case 2: return "Truecolor"; case 3: return "Indexed"; case 4: return "GrayAlpha"; case 6: return "RGBA"; default: return "UndefinedColorType"; } } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* MAGICKCORE_PNG_DELEGATE */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMNG() returns MagickTrue if the image format type, identified by the % magick string, is MNG. % % The format of the IsMNG method is: % % MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\212MNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJNG() returns MagickTrue if the image format type, identified by the % magick string, is JNG. % % The format of the IsJNG method is: % % MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\213JNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPNG() returns MagickTrue if the image format type, identified by the % magick string, is PNG. % % The format of the IsPNG method is: % % MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\211PNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_PNG_DELEGATE) #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #if (PNG_LIBPNG_VER > 10011) static size_t WriteBlobMSBULong(Image *image,const size_t value) { unsigned char buffer[4]; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; return((size_t) WriteBlob(image,4,buffer)); } static void PNGLong(png_bytep p,png_uint_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGsLong(png_bytep p,png_int_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGShort(png_bytep p,png_uint_16 value) { *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGType(png_bytep p,const png_byte *type) { (void) CopyMagickMemory(p,type,4*sizeof(png_byte)); } static void LogPNGChunk(MagickBooleanType logging, const png_byte *type, size_t length) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing %c%c%c%c chunk, length: %.20g", type[0],type[1],type[2],type[3],(double) length); } #endif /* PNG_LIBPNG_VER > 10011 */ #if defined(__cplusplus) || defined(c_plusplus) } #endif #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPNGImage() reads a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image or set of images. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadPNGImage method is: % % Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % % To do, more or less in chronological order (as of version 5.5.2, % November 26, 2002 -- glennrp -- see also "To do" under WriteMNGImage): % % Get 16-bit cheap transparency working. % % (At this point, PNG decoding is supposed to be in full MNG-LC compliance) % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % (At this point, PNG encoding should be in full MNG compliance) % % Provide options for choice of background to use when the MNG BACK % chunk is not present or is not mandatory (i.e., leave transparent, % user specified, MNG BACK, PNG bKGD) % % Implement LOOP/ENDL [done, but could do discretionary loops more % efficiently by linking in the duplicate frames.]. % % Decode and act on the MHDR simplicity profile (offer option to reject % files or attempt to process them anyway when the profile isn't LC or VLC). % % Upgrade to full MNG without Delta-PNG. % % o BACK [done a while ago except for background image ID] % o MOVE [done 15 May 1999] % o CLIP [done 15 May 1999] % o DISC [done 19 May 1999] % o SAVE [partially done 19 May 1999 (marks objects frozen)] % o SEEK [partially done 19 May 1999 (discard function only)] % o SHOW % o PAST % o BASI % o MNG-level tEXt/iTXt/zTXt % o pHYg % o pHYs % o sBIT % o bKGD % o iTXt (wait for libpng implementation). % % Use the scene signature to discover when an identical scene is % being reused, and just point to the original image->exception instead % of storing another set of pixels. This not specific to MNG % but could be applied generally. % % Upgrade to full MNG with Delta-PNG. % % JNG tEXt/iTXt/zTXt % % We will not attempt to read files containing the CgBI chunk. % They are really Xcode files meant for display on the iPhone. % These are not valid PNG files and it is impossible to recover % the original PNG from files that have been converted to Xcode-PNG, % since irretrievable loss of color data has occurred due to the % use of premultiplied alpha. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* This the function that does the actual reading of data. It is the same as the one supplied in libpng, except that it receives the datastream from the ReadBlob() function instead of standard input. */ static void png_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) ReadBlob(image,(size_t) length,data); if (check != length) { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent, "Expected %.20g bytes; found %.20g bytes",(double) length, (double) check); png_warning(png_ptr,msg); png_error(png_ptr,"Read Exception"); } } } #if !defined(PNG_READ_EMPTY_PLTE_SUPPORTED) && \ !defined(PNG_MNG_FEATURES_SUPPORTED) /* We use mng_get_data() instead of png_get_data() if we have a libpng * older than libpng-1.0.3a, which was the first to allow the empty * PLTE, or a newer libpng in which PNG_MNG_FEATURES_SUPPORTED was * ifdef'ed out. Earlier versions would crash if the bKGD chunk was * encountered after an empty PLTE, so we have to look ahead for bKGD * chunks and remove them from the datastream that is passed to libpng, * and store their contents for later use. */ static void mng_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { MngInfo *mng_info; Image *image; png_size_t check; register ssize_t i; i=0; mng_info=(MngInfo *) png_get_io_ptr(png_ptr); image=(Image *) mng_info->image; while (mng_info->bytes_in_read_buffer && length) { data[i]=mng_info->read_buffer[i]; mng_info->bytes_in_read_buffer--; length--; i++; } if (length != 0) { check=(png_size_t) ReadBlob(image,(size_t) length,(char *) data); if (check != length) png_error(png_ptr,"Read Exception"); if (length == 4) { if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 0)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_PLTE,4) == 0) mng_info->found_empty_plte=MagickTrue; if (memcmp(mng_info->read_buffer,mng_IEND,4) == 0) { mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; } } if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 1)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_bKGD,4) == 0) if (mng_info->found_empty_plte) { /* Skip the bKGD data byte and CRC. */ check=(png_size_t) ReadBlob(image,5,(char *) mng_info->read_buffer); check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->saved_bkgd_index=mng_info->read_buffer[0]; mng_info->have_saved_bkgd_index=MagickTrue; mng_info->bytes_in_read_buffer=0; } } } } } #endif static void png_put_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) WriteBlob(image,(size_t) length,data); if (check != length) png_error(png_ptr,"WriteBlob Failed"); } } static void png_flush_data(png_structp png_ptr) { (void) png_ptr; } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED static int PalettesAreEqual(Image *a,Image *b) { ssize_t i; if ((a == (Image *) NULL) || (b == (Image *) NULL)) return((int) MagickFalse); if (a->storage_class != PseudoClass || b->storage_class != PseudoClass) return((int) MagickFalse); if (a->colors != b->colors) return((int) MagickFalse); for (i=0; i < (ssize_t) a->colors; i++) { if ((a->colormap[i].red != b->colormap[i].red) || (a->colormap[i].green != b->colormap[i].green) || (a->colormap[i].blue != b->colormap[i].blue)) return((int) MagickFalse); } return((int) MagickTrue); } #endif static void MngInfoDiscardObject(MngInfo *mng_info,int i) { if (i && (i < MNG_MAX_OBJECTS) && (mng_info != (MngInfo *) NULL) && mng_info->exists[i] && !mng_info->frozen[i]) { #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) { if (mng_info->ob[i]->reference_count > 0) mng_info->ob[i]->reference_count--; if (mng_info->ob[i]->reference_count == 0) { if (mng_info->ob[i]->image != (Image *) NULL) mng_info->ob[i]->image=DestroyImage(mng_info->ob[i]->image); mng_info->ob[i]=DestroyString(mng_info->ob[i]); } } mng_info->ob[i]=(MngBuffer *) NULL; #endif mng_info->exists[i]=MagickFalse; mng_info->invisible[i]=MagickFalse; mng_info->viewable[i]=MagickFalse; mng_info->frozen[i]=MagickFalse; mng_info->x_off[i]=0; mng_info->y_off[i]=0; mng_info->object_clip[i].left=0; mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].top=0; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } } static MngInfo *MngInfoFreeStruct(MngInfo *mng_info) { register ssize_t i; if (mng_info == (MngInfo *) NULL) return((MngInfo *) NULL); for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); if (mng_info->global_plte != (png_colorp) NULL) mng_info->global_plte=(png_colorp) RelinquishMagickMemory(mng_info->global_plte); return((MngInfo *) RelinquishMagickMemory(mng_info)); } static MngBox mng_minimum_box(MngBox box1,MngBox box2) { MngBox box; box=box1; if (box.left < box2.left) box.left=box2.left; if (box.top < box2.top) box.top=box2.top; if (box.right > box2.right) box.right=box2.right; if (box.bottom > box2.bottom) box.bottom=box2.bottom; return box; } static MngBox mng_read_box(MngBox previous_box,char delta_type,unsigned char *p) { MngBox box; /* Read clipping boundaries from DEFI, CLIP, FRAM, or PAST chunk. */ box.left=(ssize_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); box.right=(ssize_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); box.top=(ssize_t) ((p[8] << 24) | (p[9] << 16) | (p[10] << 8) | p[11]); box.bottom=(ssize_t) ((p[12] << 24) | (p[13] << 16) | (p[14] << 8) | p[15]); if (delta_type != 0) { box.left+=previous_box.left; box.right+=previous_box.right; box.top+=previous_box.top; box.bottom+=previous_box.bottom; } return(box); } static MngPair mng_read_pair(MngPair previous_pair,int delta_type, unsigned char *p) { MngPair pair; /* Read two ssize_ts from CLON, MOVE or PAST chunk */ pair.a=(long) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); pair.b=(long) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); if (delta_type != 0) { pair.a+=previous_pair.a; pair.b+=previous_pair.b; } return(pair); } static long mng_get_long(unsigned char *p) { return((long) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])); } typedef struct _PNGErrorInfo { Image *image; ExceptionInfo *exception; } PNGErrorInfo; static void MagickPNGErrorHandler(png_struct *ping,png_const_charp message) { Image *image; image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s error: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderError, message,"`%s'",image->filename); #if (PNG_LIBPNG_VER < 10500) /* A warning about deprecated use of jmpbuf here is unavoidable if you * are building with libpng-1.4.x and can be ignored. */ longjmp(ping->jmpbuf,1); #else png_longjmp(ping,1); #endif } static void MagickPNGWarningHandler(png_struct *ping,png_const_charp message) { Image *image; if (LocaleCompare(message, "Missing PLTE before tRNS") == 0) png_error(ping, message); image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s warning: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderWarning, message,"`%s'",image->filename); } #ifdef PNG_USER_MEM_SUPPORTED #if PNG_LIBPNG_VER >= 10400 static png_voidp Magick_png_malloc(png_structp png_ptr,png_alloc_size_t size) #else static png_voidp Magick_png_malloc(png_structp png_ptr,png_size_t size) #endif { (void) png_ptr; return((png_voidp) AcquireMagickMemory((size_t) size)); } /* Free a pointer. It is removed from the list at the same time. */ static png_free_ptr Magick_png_free(png_structp png_ptr,png_voidp ptr) { (void) png_ptr; ptr=RelinquishMagickMemory(ptr); return((png_free_ptr) NULL); } #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif static int Magick_png_read_raw_profile(png_struct *ping,Image *image, const ImageInfo *image_info, png_textp text,int ii) { register ssize_t i; register unsigned char *dp; register png_charp sp; png_uint_32 length, nibbles; StringInfo *profile; const unsigned char unhex[103]={0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,1, 2,3,4,5,6,7,8,9,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,10,11,12, 13,14,15}; sp=text[ii].text+1; /* look for newline */ while (*sp != '\n') sp++; /* look for length */ while (*sp == '\0' || *sp == ' ' || *sp == '\n') sp++; length=(png_uint_32) StringToLong(sp); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu",(unsigned long) length); while (*sp != ' ' && *sp != '\n') sp++; /* allocate space */ if (length == 0) { png_warning(ping,"invalid profile length"); return(MagickFalse); } profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { png_warning(ping, "unable to copy profile"); return(MagickFalse); } /* copy profile, skipping white space and column 1 "=" signs */ dp=GetStringInfoDatum(profile); nibbles=length*2; for (i=0; i < (ssize_t) nibbles; i++) { while (*sp < '0' || (*sp > '9' && *sp < 'a') || *sp > 'f') { if (*sp == '\0') { png_warning(ping, "ran out of profile data"); return(MagickFalse); } sp++; } if (i%2 == 0) *dp=(unsigned char) (16*unhex[(int) *sp++]); else (*dp++)+=unhex[(int) *sp++]; } /* We have already read "Raw profile type. */ (void) SetImageProfile(image,&text[ii].key[17],profile); profile=DestroyStringInfo(profile); if (image_info->verbose) (void) printf(" Found a generic profile, type %s\n",&text[ii].key[17]); return MagickTrue; } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) static int read_user_chunk_callback(png_struct *ping, png_unknown_chunkp chunk) { Image *image; /* The unknown chunk structure contains the chunk data: png_byte name[5]; png_byte *data; png_size_t size; Note that libpng has already taken care of the CRC handling. */ LogMagickEvent(CoderEvent,GetMagickModule(), " read_user_chunk: found %c%c%c%c chunk", chunk->name[0],chunk->name[1],chunk->name[2],chunk->name[3]); if (chunk->name[0] == 101 && (chunk->name[1] == 88 || chunk->name[1] == 120 ) && chunk->name[2] == 73 && chunk-> name[3] == 102) { /* process eXIf or exIf chunk */ PNGErrorInfo *error_info; StringInfo *profile; unsigned char *p; png_byte *s; int i; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " recognized eXIf|exIf chunk"); image=(Image *) png_get_user_chunk_ptr(ping); error_info=(PNGErrorInfo *) png_get_error_ptr(ping); profile=BlobToStringInfo((const void *) NULL,chunk->size+6); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(error_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1); } p=GetStringInfoDatum(profile); /* Initialize profile with "Exif\0\0" */ *p++ ='E'; *p++ ='x'; *p++ ='i'; *p++ ='f'; *p++ ='\0'; *p++ ='\0'; /* copy chunk->data to profile */ s=chunk->data; for (i=0; i < (ssize_t) chunk->size; i++) *p++ = *s++; (void) SetImageProfile(image,"exif",profile); return(1); } /* vpAg (deprecated, replaced by caNv) */ if (chunk->name[0] == 118 && chunk->name[1] == 112 && chunk->name[2] == 65 && chunk->name[3] == 103) { /* recognized vpAg */ if (chunk->size != 9) return(-1); /* Error return */ if (chunk->data[8] != 0) return(0); /* ImageMagick requires pixel units */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t) ((chunk->data[0] << 24) | (chunk->data[1] << 16) | (chunk->data[2] << 8) | chunk->data[3]); image->page.height=(size_t) ((chunk->data[4] << 24) | (chunk->data[5] << 16) | (chunk->data[6] << 8) | chunk->data[7]); return(1); } /* caNv */ if (chunk->name[0] == 99 && chunk->name[1] == 97 && chunk->name[2] == 78 && chunk->name[3] == 118) { /* recognized caNv */ if (chunk->size != 16) return(-1); /* Error return */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t) ((chunk->data[0] << 24) | (chunk->data[1] << 16) | (chunk->data[2] << 8) | chunk->data[3]); image->page.height=(size_t) ((chunk->data[4] << 24) | (chunk->data[5] << 16) | (chunk->data[6] << 8) | chunk->data[7]); image->page.x=(size_t) ((chunk->data[8] << 24) | (chunk->data[9] << 16) | (chunk->data[10] << 8) | chunk->data[11]); image->page.y=(size_t) ((chunk->data[12] << 24) | (chunk->data[13] << 16) | (chunk->data[14] << 8) | chunk->data[15]); /* Return one of the following: */ /* return(-n); chunk had an error */ /* return(0); did not recognize */ /* return(n); success */ return(1); } return(0); /* Did not recognize */ } #endif #if defined(PNG_tIME_SUPPORTED) static void read_tIME_chunk(Image *image,png_struct *ping,png_info *info) { png_timep time; if (png_get_tIME(ping,info,&time)) { char timestamp[21]; FormatLocaleString(timestamp,21,"%04d-%02d-%02dT%02d:%02d:%02dZ", time->year,time->month,time->day,time->hour,time->minute,time->second); SetImageProperty(image,"png:tIME",timestamp); } } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOnePNGImage() reads a Portable Network Graphics (PNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ReadOnePNGImage method is: % % Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { /* Read one PNG image */ /* To do: Read the tEXt/Creation Time chunk into the date:create property */ Image *image; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; int intent, /* "PNG Rendering intent", which is ICC intent + 1 */ num_raw_profiles, num_text, num_text_total, num_passes, number_colors, pass, ping_bit_depth, ping_color_type, ping_file_depth, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans, unit_type; double file_gamma; LongPixelPacket transparent_color; MagickBooleanType logging, ping_found_cHRM, ping_found_gAMA, ping_found_iCCP, ping_found_sRGB, ping_found_sRGB_cHRM, ping_preserve_iCCP, status; MemoryInfo *volatile pixel_info; png_bytep ping_trans_alpha; png_color_16p ping_background, ping_trans_color; png_info *end_info, *ping_info; png_struct *ping; png_textp text; png_uint_32 ping_height, ping_width, x_resolution, y_resolution; ssize_t ping_rowbytes, y; register unsigned char *p; register IndexPacket *indexes; register ssize_t i, x; register PixelPacket *q; size_t length, row_offset; ssize_t j; unsigned char *ping_pixels; #ifdef PNG_UNKNOWN_CHUNKS_SUPPORTED png_byte unused_chunks[]= { 104, 73, 83, 84, (png_byte) '\0', /* hIST */ 105, 84, 88, 116, (png_byte) '\0', /* iTXt */ 112, 67, 65, 76, (png_byte) '\0', /* pCAL */ 115, 67, 65, 76, (png_byte) '\0', /* sCAL */ 115, 80, 76, 84, (png_byte) '\0', /* sPLT */ #if !defined(PNG_tIME_SUPPORTED) 116, 73, 77, 69, (png_byte) '\0', /* tIME */ #endif #ifdef PNG_APNG_SUPPORTED /* libpng was built with APNG patch; */ /* ignore the APNG chunks */ 97, 99, 84, 76, (png_byte) '\0', /* acTL */ 102, 99, 84, 76, (png_byte) '\0', /* fcTL */ 102, 100, 65, 84, (png_byte) '\0', /* fdAT */ #endif }; #endif /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,32); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,32); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOnePNGImage()\n" " IM version = %s\n" " Libpng version = %s", im_vers, libpng_vers); if (logging != MagickFalse) { if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule()," Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", zlib_runv); } } #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif #if (PNG_LIBPNG_VER >= 10400) # ifndef PNG_TRANSFORM_GRAY_TO_RGB /* Added at libpng-1.4.0beta67 */ if (image_info->verbose) { printf("Your PNG library (libpng-%s) is an old beta version.\n", PNG_LIBPNG_VER_STRING); printf("Please update it.\n"); } # endif #endif image=mng_info->image; if (logging != MagickFalse) { (void)LogMagickEvent(CoderEvent,GetMagickModule(), " Before reading:\n" " image->matte=%d\n" " image->rendering_intent=%d\n" " image->colorspace=%d\n" " image->gamma=%f", (int) image->matte, (int) image->rendering_intent, (int) image->colorspace, image->gamma); } intent=Magick_RenderingIntent_to_PNG_RenderingIntent(image->rendering_intent); /* Set to an out-of-range color unless tRNS chunk is present */ transparent_color.red=65537; transparent_color.green=65537; transparent_color.blue=65537; transparent_color.opacity=65537; number_colors=0; num_text = 0; num_text_total = 0; num_raw_profiles = 0; ping_found_cHRM = MagickFalse; ping_found_gAMA = MagickFalse; ping_found_iCCP = MagickFalse; ping_found_sRGB = MagickFalse; ping_found_sRGB_cHRM = MagickFalse; ping_preserve_iCCP = MagickFalse; /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_read_struct_2(PNG_LIBPNG_VER_STRING, image, MagickPNGErrorHandler,MagickPNGWarningHandler, NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_read_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_read_struct(&ping,(png_info **) NULL,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } end_info=png_create_info_struct(ping); if (end_info == (png_info *) NULL) { png_destroy_read_struct(&ping,&ping_info,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixel_info=(MemoryInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG image is corrupt. */ png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() with error."); if (image != (Image *) NULL) InheritException(exception,&image->exception); return(GetFirstImageInList(image)); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for reading. */ mng_info->image_found++; png_set_sig_bytes(ping,8); if (LocaleCompare(image_info->magick,"MNG") == 0) { #if defined(PNG_MNG_FEATURES_SUPPORTED) (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); png_set_read_fn(ping,image,png_get_data); #else #if defined(PNG_READ_EMPTY_PLTE_SUPPORTED) png_permit_empty_plte(ping,MagickTrue); png_set_read_fn(ping,image,png_get_data); #else mng_info->image=image; mng_info->bytes_in_read_buffer=0; mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; png_set_read_fn(ping,mng_info,mng_get_data); #endif #endif } else png_set_read_fn(ping,image,png_get_data); { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",value) == MagickFalse) { value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) ping_preserve_iCCP=MagickTrue; #if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) /* Don't let libpng check for ICC/sRGB profile because we're going * to do that anyway. This feature was added at libpng-1.6.12. * If logging, go ahead and check and issue a warning as appropriate. */ if (logging == MagickFalse) png_set_option(ping, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) else { /* Ignore the iCCP chunk */ png_set_keep_unknown_chunks(ping, 1, mng_iCCP, 1); } #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) /* Ignore unused chunks and all unknown chunks except for exIf, caNv, and vpAg */ # if PNG_LIBPNG_VER < 10700 /* Avoid libpng16 warning */ png_set_keep_unknown_chunks(ping, 2, NULL, 0); # else png_set_keep_unknown_chunks(ping, 1, NULL, 0); # endif png_set_keep_unknown_chunks(ping, 2, mng_exIf, 1); png_set_keep_unknown_chunks(ping, 2, mng_caNv, 1); png_set_keep_unknown_chunks(ping, 2, mng_vpAg, 1); png_set_keep_unknown_chunks(ping, 1, unused_chunks, (int)sizeof(unused_chunks)/5); /* Callback for other unknown chunks */ png_set_read_user_chunk_fn(ping, image, read_user_chunk_callback); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED #if (PNG_LIBPNG_VER >= 10400) /* Limit the size of the chunk storage cache used for sPLT, text, * and unknown chunks. */ png_set_chunk_cache_max(ping, 32767); #endif #endif #ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature */ png_set_check_for_invalid_index (ping, 0); #endif #if (PNG_LIBPNG_VER < 10400) # if defined(PNG_USE_PNGGCCRD) && defined(PNG_ASSEMBLER_CODE_SUPPORTED) && \ (PNG_LIBPNG_VER >= 10200) && (PNG_LIBPNG_VER < 10220) && defined(__i386__) /* Disable thread-unsafe features of pnggccrd */ if (png_access_version_number() >= 10200) { png_uint_32 mmx_disable_mask=0; png_uint_32 asm_flags; mmx_disable_mask |= ( PNG_ASM_FLAG_MMX_READ_COMBINE_ROW \ | PNG_ASM_FLAG_MMX_READ_FILTER_SUB \ | PNG_ASM_FLAG_MMX_READ_FILTER_AVG \ | PNG_ASM_FLAG_MMX_READ_FILTER_PAETH ); asm_flags=png_get_asm_flags(ping); png_set_asm_flags(ping, asm_flags & ~mmx_disable_mask); } # endif #endif png_read_info(ping,ping_info); /* Read and check IHDR chunk data */ png_get_IHDR(ping,ping_info,&ping_width,&ping_height, &ping_bit_depth,&ping_color_type, &ping_interlace_method,&ping_compression_method, &ping_filter_method); ping_file_depth = ping_bit_depth; /* Swap bytes if requested */ if (ping_file_depth == 16) { const char *value; value=GetImageOption(image_info,"png:swap-bytes"); if (value == NULL) value=GetImageArtifact(image,"png:swap-bytes"); if (value != NULL) png_set_swap(ping); } /* Save bit-depth and color-type in case we later want to write a PNG00 */ { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_color_type); (void) SetImageProperty(image,"png:IHDR.color-type-orig",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_bit_depth); (void) SetImageProperty(image,"png:IHDR.bit-depth-orig",msg); } (void) png_get_tRNS(ping, ping_info, &ping_trans_alpha, &ping_num_trans, &ping_trans_color); (void) png_get_bKGD(ping, ping_info, &ping_background); if (ping_bit_depth < 8) { png_set_packing(ping); ping_bit_depth = 8; } image->depth=ping_bit_depth; image->depth=GetImageQuantumDepth(image,MagickFalse); image->interlace=ping_interlace_method != 0 ? PNGInterlace : NoInterlace; if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { image->rendering_intent=UndefinedIntent; intent=Magick_RenderingIntent_to_PNG_RenderingIntent(UndefinedIntent); (void) ResetMagickMemory(&image->chromaticity,0, sizeof(image->chromaticity)); } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG width: %.20g, height: %.20g\n" " PNG color_type: %d, bit_depth: %d\n" " PNG compression_method: %d\n" " PNG interlace_method: %d, filter_method: %d", (double) ping_width, (double) ping_height, ping_color_type, ping_bit_depth, ping_compression_method, ping_interlace_method,ping_filter_method); } if (png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_gAMA)) { ping_found_gAMA=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG gAMA chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { ping_found_cHRM=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG cHRM chunk."); } if (ping_found_iCCP != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { ping_found_sRGB=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG sRGB chunk."); } #ifdef PNG_READ_iCCP_SUPPORTED if (ping_found_iCCP !=MagickTrue && ping_found_sRGB != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_iCCP)) { int compression; #if (PNG_LIBPNG_VER < 10500) png_charp info; #else png_bytep info; #endif png_charp name; png_uint_32 profile_length; (void) png_get_iCCP(ping,ping_info,&name,(int *) &compression,&info, &profile_length); if (profile_length != 0) { StringInfo *profile; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG iCCP chunk."); profile=BlobToStringInfo(info,profile_length); if (profile == (StringInfo *) NULL) { png_warning(ping, "ICC profile is NULL"); profile=DestroyStringInfo(profile); } else { if (ping_preserve_iCCP == MagickFalse) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } break; } } } if (sRGB_info[icheck].len == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); (void) SetImageProfile(image,"icc",profile); } } else /* Preserve-iCCP */ { (void) SetImageProfile(image,"icc",profile); } profile=DestroyStringInfo(profile); } } } #endif #if defined(PNG_READ_sRGB_SUPPORTED) { if (ping_found_iCCP==MagickFalse && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { if (png_get_sRGB(ping,ping_info,&intent)) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (intent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG sRGB chunk: rendering_intent: %d",intent); } } else if (mng_info->have_global_srgb) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (mng_info->global_srgb_intent); } } #endif { if (!png_get_gAMA(ping,ping_info,&file_gamma)) if (mng_info->have_global_gama) png_set_gAMA(ping,ping_info,mng_info->global_gamma); if (png_get_gAMA(ping,ping_info,&file_gamma)) { image->gamma=(float) file_gamma; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG gAMA chunk: gamma: %f",file_gamma); } } if (!png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { if (mng_info->have_global_chrm != MagickFalse) { (void) png_set_cHRM(ping,ping_info, mng_info->global_chrm.white_point.x, mng_info->global_chrm.white_point.y, mng_info->global_chrm.red_primary.x, mng_info->global_chrm.red_primary.y, mng_info->global_chrm.green_primary.x, mng_info->global_chrm.green_primary.y, mng_info->global_chrm.blue_primary.x, mng_info->global_chrm.blue_primary.y); } } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { (void) png_get_cHRM(ping,ping_info, &image->chromaticity.white_point.x, &image->chromaticity.white_point.y, &image->chromaticity.red_primary.x, &image->chromaticity.red_primary.y, &image->chromaticity.green_primary.x, &image->chromaticity.green_primary.y, &image->chromaticity.blue_primary.x, &image->chromaticity.blue_primary.y); ping_found_cHRM=MagickTrue; if (image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f) ping_found_sRGB_cHRM=MagickTrue; } if (image->rendering_intent != UndefinedIntent) { if (ping_found_sRGB != MagickTrue && (ping_found_gAMA != MagickTrue || (image->gamma > .45 && image->gamma < .46)) && (ping_found_cHRM != MagickTrue || ping_found_sRGB_cHRM != MagickFalse) && ping_found_iCCP != MagickTrue) { png_set_sRGB(ping,ping_info, Magick_RenderingIntent_to_PNG_RenderingIntent (image->rendering_intent)); file_gamma=1.000f/2.200f; ping_found_sRGB=MagickTrue; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting sRGB as if in input"); } } #if defined(PNG_oFFs_SUPPORTED) if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { image->page.x=(ssize_t) png_get_x_offset_pixels(ping, ping_info); image->page.y=(ssize_t) png_get_y_offset_pixels(ping, ping_info); if (logging != MagickFalse) if (image->page.x || image->page.y) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG oFFs chunk: x: %.20g, y: %.20g.",(double) image->page.x,(double) image->page.y); } #endif #if defined(PNG_pHYs_SUPPORTED) if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { if (mng_info->have_global_phys) { png_set_pHYs(ping,ping_info, mng_info->global_x_pixels_per_unit, mng_info->global_y_pixels_per_unit, mng_info->global_phys_unit_type); } } x_resolution=0; y_resolution=0; unit_type=0; if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { /* Set image resolution. */ (void) png_get_pHYs(ping,ping_info,&x_resolution,&y_resolution, &unit_type); image->x_resolution=(double) x_resolution; image->y_resolution=(double) y_resolution; if (unit_type == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=(double) x_resolution/100.0; image->y_resolution=(double) y_resolution/100.0; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) x_resolution,(double) y_resolution,unit_type); } #endif if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); if ((number_colors == 0) && ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)) { if (mng_info->global_plte_length) { png_set_PLTE(ping,ping_info,mng_info->global_plte, (int) mng_info->global_plte_length); if (!png_get_valid(ping,ping_info,PNG_INFO_tRNS)) if (mng_info->global_trns_length) { if (mng_info->global_trns_length > mng_info->global_plte_length) { png_warning(ping, "global tRNS has more entries than global PLTE"); } else { png_set_tRNS(ping,ping_info,mng_info->global_trns, (int) mng_info->global_trns_length,NULL); } } #ifdef PNG_READ_bKGD_SUPPORTED if ( #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED mng_info->have_saved_bkgd_index || #endif png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { png_color_16 background; #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED if (mng_info->have_saved_bkgd_index) background.index=mng_info->saved_bkgd_index; #endif if (png_get_valid(ping, ping_info, PNG_INFO_bKGD)) background.index=ping_background->index; background.red=(png_uint_16) mng_info->global_plte[background.index].red; background.green=(png_uint_16) mng_info->global_plte[background.index].green; background.blue=(png_uint_16) mng_info->global_plte[background.index].blue; background.gray=(png_uint_16) mng_info->global_plte[background.index].green; png_set_bKGD(ping,ping_info,&background); } #endif } else png_error(ping,"No global PLTE in file"); } } #ifdef PNG_READ_bKGD_SUPPORTED if (mng_info->have_global_bkgd && (!png_get_valid(ping,ping_info,PNG_INFO_bKGD))) image->background_color=mng_info->mng_global_bkgd; if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { unsigned int bkgd_scale; /* Set image background color. * Scale background components to 16-bit, then scale * to quantum depth */ bkgd_scale = 1; if (ping_file_depth == 1) bkgd_scale = 255; else if (ping_file_depth == 2) bkgd_scale = 85; else if (ping_file_depth == 4) bkgd_scale = 17; if (ping_file_depth <= 8) bkgd_scale *= 257; ping_background->red *= bkgd_scale; ping_background->green *= bkgd_scale; ping_background->blue *= bkgd_scale; if (logging != MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG bKGD chunk, raw ping_background=(%d,%d,%d).\n" " bkgd_scale=%d. ping_background=(%d,%d,%d).", ping_background->red,ping_background->green, ping_background->blue, bkgd_scale,ping_background->red, ping_background->green,ping_background->blue); } image->background_color.red= ScaleShortToQuantum(ping_background->red); image->background_color.green= ScaleShortToQuantum(ping_background->green); image->background_color.blue= ScaleShortToQuantum(ping_background->blue); image->background_color.opacity=OpaqueOpacity; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->background_color=(%.20g,%.20g,%.20g).", (double) image->background_color.red, (double) image->background_color.green, (double) image->background_color.blue); } #endif /* PNG_READ_bKGD_SUPPORTED */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { /* Image has a tRNS chunk. */ int max_sample; size_t one=1; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG tRNS chunk."); max_sample = (int) ((one << ping_file_depth) - 1); if ((ping_color_type == PNG_COLOR_TYPE_GRAY && (int)ping_trans_color->gray > max_sample) || (ping_color_type == PNG_COLOR_TYPE_RGB && ((int)ping_trans_color->red > max_sample || (int)ping_trans_color->green > max_sample || (int)ping_trans_color->blue > max_sample))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Ignoring PNG tRNS chunk with out-of-range sample."); png_free_data(ping, ping_info, PNG_FREE_TRNS, 0); png_set_invalid(ping,ping_info,PNG_INFO_tRNS); image->matte=MagickFalse; } else { int scale_to_short; scale_to_short = 65535L/((1UL << ping_file_depth)-1); /* Scale transparent_color to short */ transparent_color.red= scale_to_short*ping_trans_color->red; transparent_color.green= scale_to_short*ping_trans_color->green; transparent_color.blue= scale_to_short*ping_trans_color->blue; transparent_color.opacity= scale_to_short*ping_trans_color->gray; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Raw tRNS graylevel = %d, scaled graylevel = %d.", ping_trans_color->gray,transparent_color.opacity); } transparent_color.red=transparent_color.opacity; transparent_color.green=transparent_color.opacity; transparent_color.blue=transparent_color.opacity; } } } #if defined(PNG_READ_sBIT_SUPPORTED) if (mng_info->have_global_sbit) { if (!png_get_valid(ping,ping_info,PNG_INFO_sBIT)) png_set_sBIT(ping,ping_info,&mng_info->global_sbit); } #endif num_passes=png_set_interlace_handling(ping); png_read_update_info(ping,ping_info); ping_rowbytes=png_get_rowbytes(ping,ping_info); /* Initialize image structure. */ mng_info->image_box.left=0; mng_info->image_box.right=(ssize_t) ping_width; mng_info->image_box.top=0; mng_info->image_box.bottom=(ssize_t) ping_height; if (mng_info->mng_type == 0) { mng_info->mng_width=ping_width; mng_info->mng_height=ping_height; mng_info->frame=mng_info->image_box; mng_info->clip=mng_info->image_box; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } image->compression=ZipCompression; image->columns=ping_width; image->rows=ping_height; if (((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || ((int) ping_bit_depth < 16 && (int) ping_color_type == PNG_COLOR_TYPE_GRAY)) { size_t one; image->storage_class=PseudoClass; one=1; image->colors=one << ping_file_depth; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->colors > 256) image->colors=256; #else if (image->colors > 65536L) image->colors=65536L; #endif if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); image->colors=(size_t) number_colors; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG PLTE chunk: number_colors: %d.",number_colors); } } if (image->storage_class == PseudoClass) { /* Initialize image colormap. */ if (AcquireImageColormap(image,image->colors) == MagickFalse) png_error(ping,"Memory allocation failed"); if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); for (i=0; i < (ssize_t) number_colors; i++) { image->colormap[i].red=ScaleCharToQuantum(palette[i].red); image->colormap[i].green=ScaleCharToQuantum(palette[i].green); image->colormap[i].blue=ScaleCharToQuantum(palette[i].blue); } for ( ; i < (ssize_t) image->colors; i++) { image->colormap[i].red=0; image->colormap[i].green=0; image->colormap[i].blue=0; } } else { Quantum scale; scale = 65535/((1UL << ping_file_depth)-1); #if (MAGICKCORE_QUANTUM_DEPTH > 16) scale = ScaleShortToQuantum(scale); #endif for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=(Quantum) (i*scale); image->colormap[i].green=(Quantum) (i*scale); image->colormap[i].blue=(Quantum) (i*scale); } } } /* Set some properties for reporting by "identify" */ { char msg[MaxTextExtent]; /* encode ping_width, ping_height, ping_file_depth, ping_color_type, ping_interlace_method in value */ (void) FormatLocaleString(msg,MaxTextExtent, "%d, %d",(int) ping_width, (int) ping_height); (void) SetImageProperty(image,"png:IHDR.width,height",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_file_depth); (void) SetImageProperty(image,"png:IHDR.bit_depth",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d (%s)", (int) ping_color_type, Magick_ColorType_from_PNG_ColorType((int)ping_color_type)); (void) SetImageProperty(image,"png:IHDR.color_type",msg); if (ping_interlace_method == 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Not interlaced)", (int) ping_interlace_method); } else if (ping_interlace_method == 1) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Adam7 method)", (int) ping_interlace_method); } else { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Unknown method)", (int) ping_interlace_method); } (void) SetImageProperty(image,"png:IHDR.interlace_method",msg); if (number_colors != 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d", (int) number_colors); (void) SetImageProperty(image,"png:PLTE.number_colors",msg); } } #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,ping_info); #endif /* Read image scanlines. */ if (image->delay != 0) mng_info->scenes_found++; if ((mng_info->mng_type == 0 && (image->ping != MagickFalse)) || ( (image_info->number_scenes != 0) && (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)))) { /* This happens later in non-ping decodes */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) image->storage_class=DirectClass; image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping PNG image data for scene %.20g",(double) mng_info->scenes_found-1); png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()."); return(image); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG IDAT chunk(s)"); if (num_passes > 1) pixel_info=AcquireVirtualMemory(image->rows,ping_rowbytes* sizeof(*ping_pixels)); else pixel_info=AcquireVirtualMemory(ping_rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Memory allocation failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting PNG pixels to pixel packets"); /* Convert PNG pixels to pixel packets. */ { MagickBooleanType found_transparent_pixel; found_transparent_pixel=MagickFalse; if (image->storage_class == DirectClass) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Failed to allocate quantum_info"); (void) SetQuantumEndian(image,quantum_info,MSBEndian); for (pass=0; pass < num_passes; pass++) { /* Convert image to DirectClass pixel packets. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; for (y=0; y < (ssize_t) image->rows; y++) { if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; else { if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayAlphaQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBAQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, IndexQuantum,ping_pixels+row_offset,exception); else /* ping_color_type == PNG_COLOR_TYPE_RGB */ (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBQuantum,ping_pixels+row_offset,exception); } if (found_transparent_pixel == MagickFalse) { /* Is there a transparent pixel in the row? */ if (y== 0 && logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Looking for cheap transparent pixel"); for (x=(ssize_t) image->columns-1; x >= 0; x--) { if ((ping_color_type == PNG_COLOR_TYPE_RGBA || ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) && (GetPixelOpacity(q) != OpaqueOpacity)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } if ((ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_GRAY) && (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } q++; } } if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag, (MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } } quantum_info=DestroyQuantumInfo(quantum_info); } else /* image->storage_class != DirectClass */ for (pass=0; pass < num_passes; pass++) { Quantum *quantum_scanline; register Quantum *r; /* Convert grayscale image to PseudoClass pixel packets. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting grayscale pixels to pixel packets"); image->matte=ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA ? MagickTrue : MagickFalse; quantum_scanline=(Quantum *) AcquireQuantumMemory(image->columns, (image->matte ? 2 : 1)*sizeof(*quantum_scanline)); if (quantum_scanline == (Quantum *) NULL) png_error(ping,"Memory allocation failed"); for (y=0; y < (ssize_t) image->rows; y++) { Quantum alpha; if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); p=ping_pixels+row_offset; r=quantum_scanline; switch (ping_bit_depth) { case 8: { if (ping_color_type == 4) for (x=(ssize_t) image->columns-1; x >= 0; x--) { *r++=*p++; /* In image.h, OpaqueOpacity is 0 * TransparentOpacity is QuantumRange * In a PNG datastream, Opaque is QuantumRange * and Transparent is 0. */ alpha=ScaleCharToQuantum((unsigned char)*p++); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } else for (x=(ssize_t) image->columns-1; x >= 0; x--) *r++=*p++; break; } case 16: { for (x=(ssize_t) image->columns-1; x >= 0; x--) { #if (MAGICKCORE_QUANTUM_DEPTH >= 16) size_t quantum; if (image->colors > 256) quantum=((*p++) << 8); else quantum=0; quantum|=(*p++); *r=ScaleShortToQuantum(quantum); r++; if (ping_color_type == 4) { if (image->colors > 256) quantum=((*p++) << 8); else quantum=0; quantum|=(*p++); alpha=ScaleShortToQuantum(quantum); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } #else /* MAGICKCORE_QUANTUM_DEPTH == 8 */ *r++=(*p++); p++; /* strip low byte */ if (ping_color_type == 4) { alpha=*p++; SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; p++; q++; } #endif } break; } default: break; } /* Transfer image scanline. */ r=quantum_scanline; for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*r++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline); } image->matte=found_transparent_pixel; if (logging != MagickFalse) { if (found_transparent_pixel != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found transparent pixel"); else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No transparent pixel was found"); ping_color_type&=0x03; } } } if (image->storage_class == PseudoClass) { MagickBooleanType matte; matte=image->matte; image->matte=MagickFalse; (void) SyncImage(image); image->matte=matte; } png_read_end(ping,end_info); if (image_info->number_scenes != 0 && mng_info->scenes_found-1 < (ssize_t) image_info->first_scene && image->delay != 0) { png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); image->colors=2; (void) SetImageBackgroundColor(image); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() early."); return(image); } if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { ClassType storage_class; /* Image has a transparent background. */ storage_class=image->storage_class; image->matte=MagickTrue; /* Balfour fix from imagemagick discourse server, 5 Feb 2010 */ if (storage_class == PseudoClass) { if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { for (x=0; x < ping_num_trans; x++) { image->colormap[x].opacity = ScaleCharToQuantum((unsigned char)(255-ping_trans_alpha[x])); } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY) { for (x=0; x < (int) image->colors; x++) { if (ScaleQuantumToShort(image->colormap[x].red) == transparent_color.opacity) { image->colormap[x].opacity = (Quantum) TransparentOpacity; } } } (void) SyncImage(image); } #if 1 /* Should have already been done above, but glennrp problem P10 * needs this. */ else { for (y=0; y < (ssize_t) image->rows; y++) { image->storage_class=storage_class; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); /* Caution: on a Q8 build, this does not distinguish between * 16-bit colors that differ only in the low byte */ for (x=(ssize_t) image->columns-1; x >= 0; x--) { if (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue) { SetPixelOpacity(q,TransparentOpacity); } #if 0 /* I have not found a case where this is needed. */ else { SetPixelOpacity(q)=(Quantum) OpaqueOpacity; } #endif q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif image->storage_class=DirectClass; } if ((ping_color_type == PNG_COLOR_TYPE_GRAY) || (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { double image_gamma = image->gamma; (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%f",(float) image_gamma); if (image_gamma > 0.75) { /* Set image->rendering_intent to Undefined, * image->colorspace to GRAY, and reset image->chromaticity. */ image->intensity = Rec709LuminancePixelIntensityMethod; SetImageColorspace(image,GRAYColorspace); } else { RenderingIntent save_rendering_intent = image->rendering_intent; ChromaticityInfo save_chromaticity = image->chromaticity; SetImageColorspace(image,GRAYColorspace); image->rendering_intent = save_rendering_intent; image->chromaticity = save_chromaticity; } image->gamma = image_gamma; } (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->colorspace=%d",(int) image->colorspace); for (j = 0; j < 2; j++) { if (j == 0) status = png_get_text(ping,ping_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; else status = png_get_text(ping,end_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; if (status != MagickFalse) for (i=0; i < (ssize_t) num_text; i++) { /* Check for a profile */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG text chunk"); if (strlen(text[i].key) > 16 && memcmp(text[i].key, "Raw profile type ",17) == 0) { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember(text[i].key+17,value) == MagickFalse) { (void) Magick_png_read_raw_profile(ping,image,image_info,text, (int) i); num_raw_profiles++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Read raw profile %s",text[i].key+17); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping raw profile %s",text[i].key+17); } } else { char *value; length=text[i].text_length; value=(char *) AcquireQuantumMemory(length+MaxTextExtent, sizeof(*value)); if (value == (char *) NULL) png_error(ping,"Memory allocation failed"); *value='\0'; (void) ConcatenateMagickString(value,text[i].text,length+2); /* Don't save "density" or "units" property if we have a pHYs * chunk */ if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs) || (LocaleCompare(text[i].key,"density") != 0 && LocaleCompare(text[i].key,"units") != 0)) (void) SetImageProperty(image,text[i].key,value); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu\n" " Keyword: %s", (unsigned long) length, text[i].key); } value=DestroyString(value); } } num_text_total += num_text; } #ifdef MNG_OBJECT_BUFFERS /* Store the object if necessary. */ if (object_id && !mng_info->frozen[object_id]) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) { /* create a new object buffer. */ mng_info->ob[object_id]=(MngBuffer *) AcquireMagickMemory(sizeof(MngBuffer)); if (mng_info->ob[object_id] != (MngBuffer *) NULL) { mng_info->ob[object_id]->image=(Image *) NULL; mng_info->ob[object_id]->reference_count=1; } } if ((mng_info->ob[object_id] == (MngBuffer *) NULL) || mng_info->ob[object_id]->frozen) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) png_error(ping,"Memory allocation failed"); if (mng_info->ob[object_id]->frozen) png_error(ping,"Cannot overwrite frozen MNG object buffer"); } else { if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image=DestroyImage (mng_info->ob[object_id]->image); mng_info->ob[object_id]->image=CloneImage(image,0,0,MagickTrue, &image->exception); if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image->file=(FILE *) NULL; else png_error(ping, "Cloning image for object buffer failed"); if (ping_width > 250000L || ping_height > 250000L) png_error(ping,"PNG Image dimensions are too large."); mng_info->ob[object_id]->width=ping_width; mng_info->ob[object_id]->height=ping_height; mng_info->ob[object_id]->color_type=ping_color_type; mng_info->ob[object_id]->sample_depth=ping_bit_depth; mng_info->ob[object_id]->interlace_method=ping_interlace_method; mng_info->ob[object_id]->compression_method= ping_compression_method; mng_info->ob[object_id]->filter_method=ping_filter_method; if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp plte; /* Copy the PLTE to the object buffer. */ png_get_PLTE(ping,ping_info,&plte,&number_colors); mng_info->ob[object_id]->plte_length=number_colors; for (i=0; i < number_colors; i++) { mng_info->ob[object_id]->plte[i]=plte[i]; } } else mng_info->ob[object_id]->plte_length=0; } } #endif /* Set image->matte to MagickTrue if the input colortype supports * alpha or if a valid tRNS chunk is present, no matter whether there * is actual transparency present. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; #if 0 /* I'm not sure what's wrong here but it does not work. */ if (image->matte != MagickFalse) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) SetImageType(image,GrayscaleMatteType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteMatteType); else (void) SetImageType(image,TrueColorMatteType); } else { if (ping_color_type == PNG_COLOR_TYPE_GRAY) (void) SetImageType(image,GrayscaleType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteType); else (void) SetImageType(image,TrueColorType); } #endif /* Set more properties for identify to retrieve */ { char msg[MaxTextExtent]; if (num_text_total != 0) { /* libpng doesn't tell us whether they were tEXt, zTXt, or iTXt */ (void) FormatLocaleString(msg,MaxTextExtent, "%d tEXt/zTXt/iTXt chunks were found", num_text_total); (void) SetImageProperty(image,"png:text",msg); } if (num_raw_profiles != 0) { (void) FormatLocaleString(msg,MaxTextExtent, "%d were found", num_raw_profiles); (void) SetImageProperty(image,"png:text-encoded profiles",msg); } /* cHRM chunk: */ if (ping_found_cHRM != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Chromaticity, above)"); (void) SetImageProperty(image,"png:cHRM",msg); } /* bKGD chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Background color, above)"); (void) SetImageProperty(image,"png:bKGD",msg); } (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found"); /* iCCP chunk: */ if (ping_found_iCCP != MagickFalse) (void) SetImageProperty(image,"png:iCCP",msg); if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) (void) SetImageProperty(image,"png:tRNS",msg); #if defined(PNG_sRGB_SUPPORTED) /* sRGB chunk: */ if (ping_found_sRGB != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "intent=%d (%s)", (int) intent, Magick_RenderingIntentString_from_PNG_RenderingIntent(intent)); (void) SetImageProperty(image,"png:sRGB",msg); } #endif /* gAMA chunk: */ if (ping_found_gAMA != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "gamma=%.8g (See Gamma, above)", file_gamma); (void) SetImageProperty(image,"png:gAMA",msg); } #if defined(PNG_pHYs_SUPPORTED) /* pHYs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { (void) FormatLocaleString(msg,MaxTextExtent, "x_res=%.10g, y_res=%.10g, units=%d", (double) x_resolution,(double) y_resolution, unit_type); (void) SetImageProperty(image,"png:pHYs",msg); } #endif #if defined(PNG_oFFs_SUPPORTED) /* oFFs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { (void) FormatLocaleString(msg,MaxTextExtent,"x_off=%.20g, y_off=%.20g", (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:oFFs",msg); } #endif #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,end_info); #endif /* caNv chunk: */ if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || (image->page.x != 0 || image->page.y != 0)) { (void) FormatLocaleString(msg,MaxTextExtent, "width=%.20g, height=%.20g, x_offset=%.20g, y_offset=%.20g", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:caNv",msg); } /* vpAg chunk: */ if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows)) { (void) FormatLocaleString(msg,MaxTextExtent, "width=%.20g, height=%.20g", (double) image->page.width,(double) image->page.height); (void) SetImageProperty(image,"png:vpAg",msg); } } /* Relinquish resources. */ png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block, revert to * Throwing an Exception when an error occurs. */ return(image); /* end of reading one PNG image */ } static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; ssize_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadPNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) ThrowReaderException(FileOpenError,"UnableToOpenFile"); /* Verify PNG signature. */ count=ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\211PNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOnePNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if ((image->columns == 0) || (image->rows == 0)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error."); ThrowReaderException(CorruptImageError,"CorruptImage"); } if ((IssRGBColorspace(image->colorspace) != MagickFalse) && ((image->gamma < .45) || (image->gamma > .46)) && !(image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f)) SetImageColorspace(image,RGBColorspace); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " page.w: %.20g, page.h: %.20g,page.x: %.20g, page.y: %.20g.", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadPNGImage()"); return(image); } #if defined(JNG_SUPPORTED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOneJNGImage() reads a JPEG Network Graphics (JNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadOneJNGImage method is: % % Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { Image *alpha_image, *color_image, *image, *jng_image; ImageInfo *alpha_image_info, *color_image_info; MagickBooleanType logging; int unique_filenames; ssize_t y; MagickBooleanType status; png_uint_32 jng_height, jng_width; png_byte jng_color_type, jng_image_sample_depth, jng_image_compression_method, jng_image_interlace_method, jng_alpha_sample_depth, jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method; register const PixelPacket *s; register ssize_t i, x; register PixelPacket *q; register unsigned char *p; unsigned int read_JSEP, reading_idat; size_t length; jng_alpha_compression_method=0; jng_alpha_sample_depth=8; jng_color_type=0; jng_height=0; jng_width=0; alpha_image=(Image *) NULL; color_image=(Image *) NULL; alpha_image_info=(ImageInfo *) NULL; color_image_info=(ImageInfo *) NULL; unique_filenames=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneJNGImage()"); image=mng_info->image; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireNextImage()"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; /* Signature bytes have already been read. */ read_JSEP=MagickFalse; reading_idat=MagickFalse; for (;;) { char type[MaxTextExtent]; unsigned char *chunk; unsigned int count; /* Read a new JNG chunk. */ status=SetImageProgress(image,LoadImagesTag,TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) break; type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=ReadBlobMSBLong(image); count=(unsigned int) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading JNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX || count == 0) ThrowReaderException(CorruptImageError,"CorruptImage"); p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) chunk[i]=(unsigned char) ReadBlobByte(image); p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ if (memcmp(type,mng_JHDR,4) == 0) { if (length == 16) { jng_width=(size_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); jng_height=(size_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); if ((jng_width == 0) || (jng_height == 0)) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); jng_color_type=p[8]; jng_image_sample_depth=p[9]; jng_image_compression_method=p[10]; jng_image_interlace_method=p[11]; image->interlace=jng_image_interlace_method != 0 ? PNGInterlace : NoInterlace; jng_alpha_sample_depth=p[12]; jng_alpha_compression_method=p[13]; jng_alpha_filter_method=p[14]; jng_alpha_interlace_method=p[15]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_width: %16lu, jng_height: %16lu\n" " jng_color_type: %16d, jng_image_sample_depth: %3d\n" " jng_image_compression_method:%3d", (unsigned long) jng_width, (unsigned long) jng_height, jng_color_type, jng_image_sample_depth, jng_image_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_image_interlace_method: %3d" " jng_alpha_sample_depth: %3d", jng_image_interlace_method, jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_alpha_compression_method:%3d\n" " jng_alpha_filter_method: %3d\n" " jng_alpha_interlace_method: %3d", jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method); } } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((reading_idat == MagickFalse) && (read_JSEP == MagickFalse) && ((memcmp(type,mng_JDAT,4) == 0) || (memcmp(type,mng_JdAA,4) == 0) || (memcmp(type,mng_IDAT,4) == 0) || (memcmp(type,mng_JDAA,4) == 0))) { /* o create color_image o open color_blob, attached to color_image o if (color type has alpha) open alpha_blob, attached to alpha_image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating color_blob."); color_image_info=(ImageInfo *)AcquireMagickMemory(sizeof(ImageInfo)); if (color_image_info == (ImageInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); GetImageInfo(color_image_info); color_image=AcquireImage(color_image_info); if (color_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) AcquireUniqueFilename(color_image->filename); unique_filenames++; status=OpenBlob(color_image_info,color_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { color_image=DestroyImage(color_image); return(DestroyImageList(image)); } if ((image_info->ping == MagickFalse) && (jng_color_type >= 12)) { alpha_image_info=(ImageInfo *) AcquireMagickMemory(sizeof(ImageInfo)); if (alpha_image_info == (ImageInfo *) NULL) { color_image=DestroyImage(color_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } GetImageInfo(alpha_image_info); alpha_image=AcquireImage(alpha_image_info); if (alpha_image == (Image *) NULL) { alpha_image_info=DestroyImageInfo(alpha_image_info); color_image=DestroyImage(color_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating alpha_blob."); (void) AcquireUniqueFilename(alpha_image->filename); unique_filenames++; status=OpenBlob(alpha_image_info,alpha_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); color_image=DestroyImage(color_image); return(DestroyImageList(image)); } if (jng_alpha_compression_method == 0) { unsigned char data[18]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing IHDR chunk to alpha_blob."); (void) WriteBlob(alpha_image,8,(const unsigned char *) "\211PNG\r\n\032\n"); (void) WriteBlobMSBULong(alpha_image,13L); PNGType(data,mng_IHDR); LogPNGChunk(logging,mng_IHDR,13L); PNGLong(data+4,jng_width); PNGLong(data+8,jng_height); data[12]=jng_alpha_sample_depth; data[13]=0; /* color_type gray */ data[14]=0; /* compression method 0 */ data[15]=0; /* filter_method 0 */ data[16]=0; /* interlace_method 0 */ (void) WriteBlob(alpha_image,17,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,17)); } } reading_idat=MagickTrue; } if (memcmp(type,mng_JDAT,4) == 0) { /* Copy chunk to color_image->blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAT chunk data to color_blob."); if (length != 0) { (void) WriteBlob(color_image,length,chunk); chunk=(unsigned char *) RelinquishMagickMemory(chunk); } continue; } if (memcmp(type,mng_IDAT,4) == 0) { png_byte data[5]; /* Copy IDAT header and chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying IDAT chunk data to alpha_blob."); (void) WriteBlobMSBULong(alpha_image,(size_t) length); PNGType(data,mng_IDAT); LogPNGChunk(logging,mng_IDAT,length); (void) WriteBlob(alpha_image,4,data); (void) WriteBlob(alpha_image,length,chunk); (void) WriteBlobMSBULong(alpha_image, crc32(crc32(0,data,4),chunk,(uInt) length)); } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_JDAA,4) == 0) || (memcmp(type,mng_JdAA,4) == 0)) { /* Copy chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAA chunk data to alpha_blob."); (void) WriteBlob(alpha_image,length,chunk); } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_JSEP,4) == 0) { read_JSEP=MagickTrue; if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { if (length == 2) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=image->background_color.red; image->background_color.blue=image->background_color.red; } if (length == 6) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=ScaleCharToQuantum(p[3]); image->background_color.blue=ScaleCharToQuantum(p[5]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) image->gamma=((float) mng_get_long(p))*0.00001; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { if (length == 32) { image->chromaticity.white_point.x=0.00001*mng_get_long(p); image->chromaticity.white_point.y=0.00001*mng_get_long(&p[4]); image->chromaticity.red_primary.x=0.00001*mng_get_long(&p[8]); image->chromaticity.red_primary.y=0.00001*mng_get_long(&p[12]); image->chromaticity.green_primary.x=0.00001*mng_get_long(&p[16]); image->chromaticity.green_primary.y=0.00001*mng_get_long(&p[20]); image->chromaticity.blue_primary.x=0.00001*mng_get_long(&p[24]); image->chromaticity.blue_primary.y=0.00001*mng_get_long(&p[28]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { if (length == 1) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_oFFs,4) == 0) { if (length > 8) { image->page.x=(ssize_t) mng_get_long(p); image->page.y=(ssize_t) mng_get_long(&p[4]); if ((int) p[8] != 0) { image->page.x/=10000; image->page.y/=10000; } } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { image->x_resolution=(double) mng_get_long(p); image->y_resolution=(double) mng_get_long(&p[4]); if ((int) p[8] == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=image->x_resolution/100.0f; image->y_resolution=image->y_resolution/100.0f; } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if 0 if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #endif if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (memcmp(type,mng_IEND,4)) continue; break; } /* IEND found */ /* Finish up reading image data: o read main image from color_blob. o close color_blob. o if (color_type has alpha) if alpha_encoding is PNG read secondary image from alpha_blob via ReadPNG if alpha_encoding is JPEG read secondary image from alpha_blob via ReadJPEG o close alpha_blob. o copy intensity of secondary image into opacity samples of main image. o destroy the secondary image. */ if (color_image_info == (ImageInfo *) NULL) { assert(color_image == (Image *) NULL); assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } if (color_image == (Image *) NULL) { assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } (void) SeekBlob(color_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading jng_image from color_blob."); assert(color_image_info != (ImageInfo *) NULL); (void) FormatLocaleString(color_image_info->filename,MaxTextExtent,"%s", color_image->filename); color_image_info->ping=MagickFalse; /* To do: avoid this */ jng_image=ReadImage(color_image_info,exception); (void) RelinquishUniqueFileResource(color_image->filename); unique_filenames--; color_image=DestroyImage(color_image); color_image_info=DestroyImageInfo(color_image_info); if (jng_image == (Image *) NULL) return(DestroyImageList(image)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying jng_image pixels to main image."); image->columns=jng_width; image->rows=jng_height; length=image->columns*sizeof(PixelPacket); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1,&image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); (void) CopyMagickMemory(q,s,length); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } jng_image=DestroyImage(jng_image); if (image_info->ping == MagickFalse) { if (jng_color_type >= 12) { if (jng_alpha_compression_method == 0) { png_byte data[5]; (void) WriteBlobMSBULong(alpha_image,0x00000000L); PNGType(data,mng_IEND); LogPNGChunk(logging,mng_IEND,0L); (void) WriteBlob(alpha_image,4,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,4)); } (void) SeekBlob(alpha_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading opacity from alpha_blob."); (void) FormatLocaleString(alpha_image_info->filename,MaxTextExtent, "%s",alpha_image->filename); jng_image=ReadImage(alpha_image_info,exception); if (jng_image != (Image *) NULL) for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (image->matte != MagickFalse) for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) SetPixelOpacity(q,QuantumRange- GetPixelRed(s)); else for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) { SetPixelAlpha(q,GetPixelRed(s)); if (GetPixelOpacity(q) != OpaqueOpacity) image->matte=MagickTrue; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } (void) RelinquishUniqueFileResource(alpha_image->filename); unique_filenames--; alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); if (jng_image != (Image *) NULL) jng_image=DestroyImage(jng_image); } } /* Read the JNG image. */ if (mng_info->mng_type == 0) { mng_info->mng_width=jng_width; mng_info->mng_height=jng_height; } if (image->page.width == 0 && image->page.height == 0) { image->page.width=jng_width; image->page.height=jng_height; } if (image->page.x == 0 && image->page.y == 0) { image->page.x=mng_info->x_off[mng_info->object_id]; image->page.y=mng_info->y_off[mng_info->object_id]; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } mng_info->image_found++; status=SetImageProgress(image,LoadImagesTag,2*TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) return(DestroyImageList(image)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage(); unique_filenames=%d",unique_filenames); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJNGImage() reads a JPEG Network Graphics (JNG) image file % (including the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadJNGImage method is: % % Image *ReadJNGImage(const ImageInfo *image_info, ExceptionInfo % *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadJNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; size_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadJNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); if (LocaleCompare(image_info->magick,"JNG") != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify JNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\213JNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(*mng_info)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneJNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (image->columns == 0 || image->rows == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); ThrowReaderException(CorruptImageError,"CorruptImage"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadJNGImage()"); return(image); } #endif static Image *ReadOneMNGImage(MngInfo* mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { char page_geometry[MaxTextExtent]; Image *image; MagickBooleanType logging; volatile int first_mng_object, object_id, term_chunk_found, skip_to_iend; volatile ssize_t image_count=0; MagickBooleanType status; MagickOffsetType offset; MngBox default_fb, fb, previous_fb; #if defined(MNG_INSERT_LAYERS) PixelPacket mng_background_color; #endif register unsigned char *p; register ssize_t i; size_t count; ssize_t loop_level; volatile short skipping_loop; #if defined(MNG_INSERT_LAYERS) unsigned int mandatory_back=0; #endif volatile unsigned int #ifdef MNG_OBJECT_BUFFERS mng_background_object=0, #endif mng_type=0; /* 0: PNG or JNG; 1: MNG; 2: MNG-LC; 3: MNG-VLC */ size_t default_frame_timeout, frame_timeout, #if defined(MNG_INSERT_LAYERS) image_height, image_width, #endif length; /* These delays are all measured in image ticks_per_second, * not in MNG ticks_per_second */ volatile size_t default_frame_delay, final_delay, final_image_delay, frame_delay, #if defined(MNG_INSERT_LAYERS) insert_layers, #endif mng_iterations=1, simplicity=0, subframe_height=0, subframe_width=0; previous_fb.top=0; previous_fb.bottom=0; previous_fb.left=0; previous_fb.right=0; default_fb.top=0; default_fb.bottom=0; default_fb.left=0; default_fb.right=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneMNGImage()"); image=mng_info->image; if (LocaleCompare(image_info->magick,"MNG") == 0) { char magic_number[MaxTextExtent]; /* Verify MNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (memcmp(magic_number,"\212MNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize some nonzero members of the MngInfo structure. */ for (i=0; i < MNG_MAX_OBJECTS; i++) { mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } mng_info->exists[0]=MagickTrue; } skipping_loop=(-1); first_mng_object=MagickTrue; mng_type=0; #if defined(MNG_INSERT_LAYERS) insert_layers=MagickFalse; /* should be False when converting or mogrifying */ #endif default_frame_delay=0; default_frame_timeout=0; frame_delay=0; final_delay=1; mng_info->ticks_per_second=1UL*image->ticks_per_second; object_id=0; skip_to_iend=MagickFalse; term_chunk_found=MagickFalse; mng_info->framing_mode=1; #if defined(MNG_INSERT_LAYERS) mandatory_back=MagickFalse; #endif #if defined(MNG_INSERT_LAYERS) mng_background_color=image->background_color; #endif default_fb=mng_info->frame; previous_fb=mng_info->frame; do { char type[MaxTextExtent]; if (LocaleCompare(image_info->magick,"MNG") == 0) { unsigned char *chunk; /* Read a new chunk. */ type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=ReadBlobMSBLong(image); count=(size_t) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading MNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX) { status=MagickFalse; break; } if (count == 0) ThrowReaderException(CorruptImageError,"CorruptImage"); p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) chunk[i]=(unsigned char) ReadBlobByte(image); p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ #if !defined(JNG_SUPPORTED) if (memcmp(type,mng_JHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->jhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"JNGCompressNotSupported","`%s'",image->filename); mng_info->jhdr_warning++; } #endif if (memcmp(type,mng_DHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->dhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DeltaPNGNotSupported","`%s'",image->filename); mng_info->dhdr_warning++; } if (memcmp(type,mng_MEND,4) == 0) break; if (skip_to_iend) { if (memcmp(type,mng_IEND,4) == 0) skip_to_iend=MagickFalse; if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skip to IEND."); continue; } if (memcmp(type,mng_MHDR,4) == 0) { if (length != 28) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"CorruptImage"); } mng_info->mng_width=(size_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); mng_info->mng_height=(size_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG width: %.20g",(double) mng_info->mng_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG height: %.20g",(double) mng_info->mng_height); } p+=8; mng_info->ticks_per_second=(size_t) mng_get_long(p); if (mng_info->ticks_per_second == 0) default_frame_delay=0; else default_frame_delay=1UL*image->ticks_per_second/ mng_info->ticks_per_second; frame_delay=default_frame_delay; simplicity=0; /* Skip nominal layer count, frame count, and play time */ p+=16; simplicity=(size_t) mng_get_long(p); mng_type=1; /* Full MNG */ if ((simplicity != 0) && ((simplicity | 11) == 11)) mng_type=2; /* LC */ if ((simplicity != 0) && ((simplicity | 9) == 9)) mng_type=3; /* VLC */ #if defined(MNG_INSERT_LAYERS) if (mng_type != 3) insert_layers=MagickTrue; #endif if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); mng_info->image=image; } if ((mng_info->mng_width > 65535L) || (mng_info->mng_height > 65535L)) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit"); } (void) FormatLocaleString(page_geometry,MaxTextExtent, "%.20gx%.20g+0+0",(double) mng_info->mng_width,(double) mng_info->mng_height); mng_info->frame.left=0; mng_info->frame.right=(ssize_t) mng_info->mng_width; mng_info->frame.top=0; mng_info->frame.bottom=(ssize_t) mng_info->mng_height; mng_info->clip=default_fb=previous_fb=mng_info->frame; for (i=0; i < MNG_MAX_OBJECTS; i++) mng_info->object_clip[i]=mng_info->frame; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_TERM,4) == 0) { int repeat=0; if (length != 0) repeat=p[0]; if (repeat == 3 && length > 8) { final_delay=(png_uint_32) mng_get_long(&p[2]); mng_iterations=(png_uint_32) mng_get_long(&p[6]); if (mng_iterations == PNG_UINT_31_MAX) mng_iterations=0; image->iterations=mng_iterations; term_chunk_found=MagickTrue; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " repeat=%d, final_delay=%.20g, iterations=%.20g", repeat,(double) final_delay, (double) image->iterations); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_DEFI,4) == 0) { if (mng_type == 3) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DEFI chunk found in MNG-VLC datastream","`%s'", image->filename); if (length > 1) { object_id=(p[0] << 8) | p[1]; if (mng_type == 2 && object_id != 0) (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError,"Nonzero object_id in MNG-LC datastream", "`%s'", image->filename); if (object_id > MNG_MAX_OBJECTS) { /* Instead of using a warning we should allocate a larger MngInfo structure and continue. */ (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError, "object id too large","`%s'",image->filename); object_id=MNG_MAX_OBJECTS; } if (mng_info->exists[object_id]) if (mng_info->frozen[object_id]) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "DEFI cannot redefine a frozen MNG object","`%s'", image->filename); continue; } mng_info->exists[object_id]=MagickTrue; if (length > 2) mng_info->invisible[object_id]=p[2]; /* Extract object offset info. */ if (length > 11) { mng_info->x_off[object_id]=(ssize_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); mng_info->y_off[object_id]=(ssize_t) ((p[8] << 24) | (p[9] << 16) | (p[10] << 8) | p[11]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_off[%d]: %.20g, y_off[%d]: %.20g", object_id,(double) mng_info->x_off[object_id], object_id,(double) mng_info->y_off[object_id]); } } /* Extract object clipping info. */ if (length > 27) mng_info->object_clip[object_id]= mng_read_box(mng_info->frame,0, &p[12]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { mng_info->have_global_bkgd=MagickFalse; if (length > 5) { mng_info->mng_global_bkgd.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_info->mng_global_bkgd.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_info->mng_global_bkgd.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_info->have_global_bkgd=MagickTrue; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_BACK,4) == 0) { #if defined(MNG_INSERT_LAYERS) if (length > 6) mandatory_back=p[6]; else mandatory_back=0; if (mandatory_back && length > 5) { mng_background_color.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_background_color.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_background_color.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_background_color.opacity=OpaqueOpacity; } #ifdef MNG_OBJECT_BUFFERS if (length > 8) mng_background_object=(p[7] << 8) | p[8]; #endif #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_PLTE,4) == 0) { /* Read global PLTE. */ if (length && (length < 769)) { if (mng_info->global_plte == (png_colorp) NULL) mng_info->global_plte=(png_colorp) AcquireQuantumMemory(256, sizeof(*mng_info->global_plte)); for (i=0; i < (ssize_t) (length/3); i++) { mng_info->global_plte[i].red=p[3*i]; mng_info->global_plte[i].green=p[3*i+1]; mng_info->global_plte[i].blue=p[3*i+2]; } mng_info->global_plte_length=(unsigned int) (length/3); } #ifdef MNG_LOOSE for ( ; i < 256; i++) { mng_info->global_plte[i].red=i; mng_info->global_plte[i].green=i; mng_info->global_plte[i].blue=i; } if (length != 0) mng_info->global_plte_length=256; #endif else mng_info->global_plte_length=0; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_tRNS,4) == 0) { /* read global tRNS */ if (length > 0 && length < 257) for (i=0; i < (ssize_t) length; i++) mng_info->global_trns[i]=p[i]; #ifdef MNG_LOOSE for ( ; i < 256; i++) mng_info->global_trns[i]=255; #endif mng_info->global_trns_length=(unsigned int) length; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) { ssize_t igamma; igamma=mng_get_long(p); mng_info->global_gamma=((float) igamma)*0.00001; mng_info->have_global_gama=MagickTrue; } else mng_info->have_global_gama=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { /* Read global cHRM */ if (length == 32) { mng_info->global_chrm.white_point.x=0.00001*mng_get_long(p); mng_info->global_chrm.white_point.y=0.00001*mng_get_long(&p[4]); mng_info->global_chrm.red_primary.x=0.00001*mng_get_long(&p[8]); mng_info->global_chrm.red_primary.y=0.00001* mng_get_long(&p[12]); mng_info->global_chrm.green_primary.x=0.00001* mng_get_long(&p[16]); mng_info->global_chrm.green_primary.y=0.00001* mng_get_long(&p[20]); mng_info->global_chrm.blue_primary.x=0.00001* mng_get_long(&p[24]); mng_info->global_chrm.blue_primary.y=0.00001* mng_get_long(&p[28]); mng_info->have_global_chrm=MagickTrue; } else mng_info->have_global_chrm=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { /* Read global sRGB. */ if (length != 0) { mng_info->global_srgb_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); mng_info->have_global_srgb=MagickTrue; } else mng_info->have_global_srgb=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ /* Read global iCCP. */ if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_FRAM,4) == 0) { if (mng_type == 3) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"FRAM chunk found in MNG-VLC datastream","`%s'", image->filename); if ((mng_info->framing_mode == 2) || (mng_info->framing_mode == 4)) image->delay=frame_delay; frame_delay=default_frame_delay; frame_timeout=default_frame_timeout; fb=default_fb; if (length > 0) if (p[0]) mng_info->framing_mode=p[0]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_mode=%d",mng_info->framing_mode); if (length > 6) { /* Note the delay and frame clipping boundaries. */ p++; /* framing mode */ while (*p && ((p-chunk) < (ssize_t) length)) p++; /* frame name */ p++; /* frame name terminator */ if ((p-chunk) < (ssize_t) (length-4)) { int change_delay, change_timeout, change_clipping; change_delay=(*p++); change_timeout=(*p++); change_clipping=(*p++); p++; /* change_sync */ if (change_delay && (p-chunk) < (ssize_t) (length-4)) { frame_delay=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_delay/=mng_info->ticks_per_second; else frame_delay=PNG_UINT_31_MAX; if (change_delay == 2) default_frame_delay=frame_delay; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_delay=%.20g",(double) frame_delay); } if (change_timeout && (p-chunk) < (ssize_t) (length-4)) { frame_timeout=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_timeout/=mng_info->ticks_per_second; else frame_timeout=PNG_UINT_31_MAX; if (change_timeout == 2) default_frame_timeout=frame_timeout; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_timeout=%.20g",(double) frame_timeout); } if (change_clipping && (p-chunk) < (ssize_t) (length-17)) { fb=mng_read_box(previous_fb,(char) p[0],&p[1]); p+=17; previous_fb=fb; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Frame_clip: L=%.20g R=%.20g T=%.20g B=%.20g", (double) fb.left,(double) fb.right,(double) fb.top, (double) fb.bottom); if (change_clipping == 2) default_fb=fb; } } } mng_info->clip=fb; mng_info->clip=mng_minimum_box(fb,mng_info->frame); subframe_width=(size_t) (mng_info->clip.right -mng_info->clip.left); subframe_height=(size_t) (mng_info->clip.bottom -mng_info->clip.top); /* Insert a background layer behind the frame if framing_mode is 4. */ #if defined(MNG_INSERT_LAYERS) if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " subframe_width=%.20g, subframe_height=%.20g",(double) subframe_width,(double) subframe_height); if (insert_layers && (mng_info->framing_mode == 4) && (subframe_width) && (subframe_height)) { /* Allocate next image structure. */ if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; image->delay=0; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert backgd layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLIP,4) == 0) { unsigned int first_object, last_object; /* Read CLIP. */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(int) first_object; i <= (int) last_object; i++) { if (mng_info->exists[i] && !mng_info->frozen[i]) { MngBox box; box=mng_info->object_clip[i]; if ((p-chunk) < (ssize_t) (length-17)) mng_info->object_clip[i]= mng_read_box(box,(char) p[0],&p[1]); } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_SAVE,4) == 0) { for (i=1; i < MNG_MAX_OBJECTS; i++) if (mng_info->exists[i]) { mng_info->frozen[i]=MagickTrue; #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) mng_info->ob[i]->frozen=MagickTrue; #endif } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_DISC,4) == 0) || (memcmp(type,mng_SEEK,4) == 0)) { /* Read DISC or SEEK. */ if ((length == 0) || !memcmp(type,mng_SEEK,4)) { for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); } else { register ssize_t j; for (j=1; j < (ssize_t) length; j+=2) { i=p[j-1] << 8 | p[j]; MngInfoDiscardObject(mng_info,i); } } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_MOVE,4) == 0) { size_t first_object, last_object; /* read MOVE */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(ssize_t) first_object; i <= (ssize_t) last_object; i++) { if (mng_info->exists[i] && !mng_info->frozen[i] && (p-chunk) < (ssize_t) (length-8)) { MngPair new_pair; MngPair old_pair; old_pair.a=mng_info->x_off[i]; old_pair.b=mng_info->y_off[i]; new_pair=mng_read_pair(old_pair,(int) p[0],&p[1]); mng_info->x_off[i]=new_pair.a; mng_info->y_off[i]=new_pair.b; } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_LOOP,4) == 0) { ssize_t loop_iters=1; if (length > 4) { loop_level=chunk[0]; mng_info->loop_active[loop_level]=1; /* mark loop active */ /* Record starting point. */ loop_iters=mng_get_long(&chunk[1]); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " LOOP level %.20g has %.20g iterations ", (double) loop_level, (double) loop_iters); if (loop_iters == 0) skipping_loop=loop_level; else { mng_info->loop_jump[loop_level]=TellBlob(image); mng_info->loop_count[loop_level]=loop_iters; } mng_info->loop_iteration[loop_level]=0; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_ENDL,4) == 0) { if (length > 0) { loop_level=chunk[0]; if (skipping_loop > 0) { if (skipping_loop == loop_level) { /* Found end of zero-iteration loop. */ skipping_loop=(-1); mng_info->loop_active[loop_level]=0; } } else { if (mng_info->loop_active[loop_level] == 1) { mng_info->loop_count[loop_level]--; mng_info->loop_iteration[loop_level]++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ENDL: LOOP level %.20g has %.20g remaining iters ", (double) loop_level,(double) mng_info->loop_count[loop_level]); if (mng_info->loop_count[loop_level] != 0) { offset=SeekBlob(image, mng_info->loop_jump[loop_level], SEEK_SET); if (offset < 0) { chunk=(unsigned char *) RelinquishMagickMemory( chunk); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } } else { short last_level; /* Finished loop. */ mng_info->loop_active[loop_level]=0; last_level=(-1); for (i=0; i < loop_level; i++) if (mng_info->loop_active[i] == 1) last_level=(short) i; loop_level=last_level; } } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLON,4) == 0) { if (mng_info->clon_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CLON is not implemented yet","`%s'", image->filename); mng_info->clon_warning++; } if (memcmp(type,mng_MAGN,4) == 0) { png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; if (length > 1) magn_first=(p[0] << 8) | p[1]; else magn_first=0; if (length > 3) magn_last=(p[2] << 8) | p[3]; else magn_last=magn_first; #ifndef MNG_OBJECT_BUFFERS if (magn_first || magn_last) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "MAGN is not implemented yet for nonzero objects", "`%s'",image->filename); mng_info->magn_warning++; } #endif if (length > 4) magn_methx=p[4]; else magn_methx=0; if (length > 6) magn_mx=(p[5] << 8) | p[6]; else magn_mx=1; if (magn_mx == 0) magn_mx=1; if (length > 8) magn_my=(p[7] << 8) | p[8]; else magn_my=magn_mx; if (magn_my == 0) magn_my=1; if (length > 10) magn_ml=(p[9] << 8) | p[10]; else magn_ml=magn_mx; if (magn_ml == 0) magn_ml=1; if (length > 12) magn_mr=(p[11] << 8) | p[12]; else magn_mr=magn_mx; if (magn_mr == 0) magn_mr=1; if (length > 14) magn_mt=(p[13] << 8) | p[14]; else magn_mt=magn_my; if (magn_mt == 0) magn_mt=1; if (length > 16) magn_mb=(p[15] << 8) | p[16]; else magn_mb=magn_my; if (magn_mb == 0) magn_mb=1; if (length > 17) magn_methy=p[17]; else magn_methy=magn_methx; if (magn_methx > 5 || magn_methy > 5) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Unknown MAGN method in MNG datastream","`%s'", image->filename); mng_info->magn_warning++; } #ifdef MNG_OBJECT_BUFFERS /* Magnify existing objects in the range magn_first to magn_last */ #endif if (magn_first == 0 || magn_last == 0) { /* Save the magnification factors for object 0 */ mng_info->magn_mb=magn_mb; mng_info->magn_ml=magn_ml; mng_info->magn_mr=magn_mr; mng_info->magn_mt=magn_mt; mng_info->magn_mx=magn_mx; mng_info->magn_my=magn_my; mng_info->magn_methx=magn_methx; mng_info->magn_methy=magn_methy; } } if (memcmp(type,mng_PAST,4) == 0) { if (mng_info->past_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"PAST is not implemented yet","`%s'", image->filename); mng_info->past_warning++; } if (memcmp(type,mng_SHOW,4) == 0) { if (mng_info->show_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"SHOW is not implemented yet","`%s'", image->filename); mng_info->show_warning++; } if (memcmp(type,mng_sBIT,4) == 0) { if (length < 4) mng_info->have_global_sbit=MagickFalse; else { mng_info->global_sbit.gray=p[0]; mng_info->global_sbit.red=p[0]; mng_info->global_sbit.green=p[1]; mng_info->global_sbit.blue=p[2]; mng_info->global_sbit.alpha=p[3]; mng_info->have_global_sbit=MagickTrue; } } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { mng_info->global_x_pixels_per_unit= (size_t) mng_get_long(p); mng_info->global_y_pixels_per_unit= (size_t) mng_get_long(&p[4]); mng_info->global_phys_unit_type=p[8]; mng_info->have_global_phys=MagickTrue; } else mng_info->have_global_phys=MagickFalse; } if (memcmp(type,mng_pHYg,4) == 0) { if (mng_info->phyg_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"pHYg is not implemented.","`%s'",image->filename); mng_info->phyg_warning++; } if (memcmp(type,mng_BASI,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->basi_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"BASI is not implemented yet","`%s'", image->filename); mng_info->basi_warning++; #ifdef MNG_BASI_SUPPORTED if (length > 11) { basi_width=(size_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); basi_height=(size_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); basi_color_type=p[8]; basi_compression_method=p[9]; basi_filter_type=p[10]; basi_interlace_method=p[11]; } if (length > 13) basi_red=(p[12] << 8) & p[13]; else basi_red=0; if (length > 15) basi_green=(p[14] << 8) & p[15]; else basi_green=0; if (length > 17) basi_blue=(p[16] << 8) & p[17]; else basi_blue=0; if (length > 19) basi_alpha=(p[18] << 8) & p[19]; else { if (basi_sample_depth == 16) basi_alpha=65535L; else basi_alpha=255; } if (length > 20) basi_viewable=p[20]; else basi_viewable=0; #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_IHDR,4) #if defined(JNG_SUPPORTED) && memcmp(type,mng_JHDR,4) #endif ) { /* Not an IHDR or JHDR chunk */ if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } /* Process IHDR */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing %c%c%c%c chunk",type[0],type[1],type[2],type[3]); mng_info->exists[object_id]=MagickTrue; mng_info->viewable[object_id]=MagickTrue; if (mng_info->invisible[object_id]) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping invisible object"); skip_to_iend=MagickTrue; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if defined(MNG_INSERT_LAYERS) if (length < 8) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } image_width=(size_t) mng_get_long(p); image_height=(size_t) mng_get_long(&p[4]); #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); /* Insert a transparent background layer behind the entire animation if it is not full screen. */ #if defined(MNG_INSERT_LAYERS) if (insert_layers && mng_type && first_mng_object) { if ((mng_info->clip.left > 0) || (mng_info->clip.top > 0) || (image_width < mng_info->mng_width) || (mng_info->clip.right < (ssize_t) mng_info->mng_width) || (image_height < mng_info->mng_height) || (mng_info->clip.bottom < (ssize_t) mng_info->mng_height)) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; /* Make a background rectangle. */ image->delay=0; image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Inserted transparent background layer, W=%.20g, H=%.20g", (double) mng_info->mng_width,(double) mng_info->mng_height); } } /* Insert a background layer behind the upcoming image if framing_mode is 3, and we haven't already inserted one. */ if (insert_layers && (mng_info->framing_mode == 3) && (subframe_width) && (subframe_height) && (simplicity == 0 || (simplicity & 0x08))) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->delay=0; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert background layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif /* MNG_INSERT_LAYERS */ first_mng_object=MagickFalse; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; if (term_chunk_found) { image->start_loop=MagickTrue; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; if (mng_info->framing_mode == 1 || mng_info->framing_mode == 3) { image->delay=frame_delay; frame_delay=default_frame_delay; } else image->delay=0; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=mng_info->x_off[object_id]; image->page.y=mng_info->y_off[object_id]; image->iterations=mng_iterations; /* Seek back to the beginning of the IHDR or JHDR chunk's length field. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Seeking back to beginning of %c%c%c%c chunk",type[0],type[1], type[2],type[3]); offset=SeekBlob(image,-((ssize_t) length+12),SEEK_CUR); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } mng_info->image=image; mng_info->mng_type=mng_type; mng_info->object_id=object_id; if (memcmp(type,mng_IHDR,4) == 0) image=ReadOnePNGImage(mng_info,image_info,exception); #if defined(JNG_SUPPORTED) else image=ReadOneJNGImage(mng_info,image_info,exception); #endif if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } if (image->columns == 0 || image->rows == 0) { (void) CloseBlob(image); return(DestroyImageList(image)); } mng_info->image=image; if (mng_type) { MngBox crop_box; if (mng_info->magn_methx || mng_info->magn_methy) { png_uint_32 magnified_height, magnified_width; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing MNG MAGN chunk"); if (mng_info->magn_methx == 1) { magnified_width=mng_info->magn_ml; if (image->columns > 1) magnified_width += mng_info->magn_mr; if (image->columns > 2) magnified_width += (png_uint_32) ((image->columns-2)*(mng_info->magn_mx)); } else { magnified_width=(png_uint_32) image->columns; if (image->columns > 1) magnified_width += mng_info->magn_ml-1; if (image->columns > 2) magnified_width += mng_info->magn_mr-1; if (image->columns > 3) magnified_width += (png_uint_32) ((image->columns-3)*(mng_info->magn_mx-1)); } if (mng_info->magn_methy == 1) { magnified_height=mng_info->magn_mt; if (image->rows > 1) magnified_height += mng_info->magn_mb; if (image->rows > 2) magnified_height += (png_uint_32) ((image->rows-2)*(mng_info->magn_my)); } else { magnified_height=(png_uint_32) image->rows; if (image->rows > 1) magnified_height += mng_info->magn_mt-1; if (image->rows > 2) magnified_height += mng_info->magn_mb-1; if (image->rows > 3) magnified_height += (png_uint_32) ((image->rows-3)*(mng_info->magn_my-1)); } if (magnified_height > image->rows || magnified_width > image->columns) { Image *large_image; int yy; ssize_t m, y; register ssize_t x; register PixelPacket *n, *q; PixelPacket *next, *prev; png_uint_16 magn_methx, magn_methy; /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocate magnified image"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); large_image=SyncNextImageInList(image); large_image->columns=magnified_width; large_image->rows=magnified_height; magn_methx=mng_info->magn_methx; magn_methy=mng_info->magn_methy; #if (MAGICKCORE_QUANTUM_DEPTH > 16) #define QM unsigned short if (magn_methx != 1 || magn_methy != 1) { /* Scale pixels to unsigned shorts to prevent overflow of intermediate values of interpolations */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleQuantumToShort( GetPixelRed(q))); SetPixelGreen(q,ScaleQuantumToShort( GetPixelGreen(q))); SetPixelBlue(q,ScaleQuantumToShort( GetPixelBlue(q))); SetPixelOpacity(q,ScaleQuantumToShort( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #else #define QM Quantum #endif if (image->matte != MagickFalse) (void) SetImageBackgroundColor(large_image); else { large_image->background_color.opacity=OpaqueOpacity; (void) SetImageBackgroundColor(large_image); if (magn_methx == 4) magn_methx=2; if (magn_methx == 5) magn_methx=3; if (magn_methy == 4) magn_methy=2; if (magn_methy == 5) magn_methy=3; } /* magnify the rows into the right side of the large image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the rows to %.20g",(double) large_image->rows); m=(ssize_t) mng_info->magn_mt; yy=0; length=(size_t) image->columns; next=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*next)); prev=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*prev)); if ((prev == (PixelPacket *) NULL) || (next == (PixelPacket *) NULL)) { image=DestroyImageList(image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } n=GetAuthenticPixels(image,0,0,image->columns,1,exception); (void) CopyMagickMemory(next,n,length); for (y=0; y < (ssize_t) image->rows; y++) { if (y == 0) m=(ssize_t) mng_info->magn_mt; else if (magn_methy > 1 && y == (ssize_t) image->rows-2) m=(ssize_t) mng_info->magn_mb; else if (magn_methy <= 1 && y == (ssize_t) image->rows-1) m=(ssize_t) mng_info->magn_mb; else if (magn_methy > 1 && y == (ssize_t) image->rows-1) m=1; else m=(ssize_t) mng_info->magn_my; n=prev; prev=next; next=n; if (y < (ssize_t) image->rows-1) { n=GetAuthenticPixels(image,0,y+1,image->columns,1, exception); (void) CopyMagickMemory(next,n,length); } for (i=0; i < m; i++, yy++) { register PixelPacket *pixels; assert(yy < (ssize_t) large_image->rows); pixels=prev; n=next; q=GetAuthenticPixels(large_image,0,yy,large_image->columns, 1,exception); q+=(large_image->columns-image->columns); for (x=(ssize_t) image->columns-1; x >= 0; x--) { /* To do: get color as function of indexes[x] */ /* if (image->storage_class == PseudoClass) { } */ if (magn_methy <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methy == 2 || magn_methy == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } else { /* Interpolate */ SetPixelRed(q, ((QM) (((ssize_t) (2*i*(GetPixelRed(n) -GetPixelRed(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelRed(pixels))))); SetPixelGreen(q, ((QM) (((ssize_t) (2*i*(GetPixelGreen(n) -GetPixelGreen(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelGreen(pixels))))); SetPixelBlue(q, ((QM) (((ssize_t) (2*i*(GetPixelBlue(n) -GetPixelBlue(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelBlue(pixels))))); if (image->matte != MagickFalse) SetPixelOpacity(q, ((QM) (((ssize_t) (2*i*(GetPixelOpacity(n) -GetPixelOpacity(pixels)+m)) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))))); } if (magn_methy == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) SetPixelOpacity(q, (*pixels).opacity+0); else SetPixelOpacity(q, (*n).opacity+0); } } else /* if (magn_methy == 3 || magn_methy == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methy == 5) { SetPixelOpacity(q, (QM) (((ssize_t) (2*i* (GetPixelOpacity(n) -GetPixelOpacity(pixels)) +m))/((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } n++; q++; pixels++; } /* x */ if (SyncAuthenticPixels(large_image,exception) == 0) break; } /* i */ } /* y */ prev=(PixelPacket *) RelinquishMagickMemory(prev); next=(PixelPacket *) RelinquishMagickMemory(next); length=image->columns; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Delete original image"); DeleteImageFromList(&image); image=large_image; mng_info->image=image; /* magnify the columns */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the columns to %.20g",(double) image->columns); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *pixels; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); pixels=q+(image->columns-length); n=pixels+1; for (x=(ssize_t) (image->columns-length); x < (ssize_t) image->columns; x++) { /* To do: Rewrite using Get/Set***PixelComponent() */ if (x == (ssize_t) (image->columns-length)) m=(ssize_t) mng_info->magn_ml; else if (magn_methx > 1 && x == (ssize_t) image->columns-2) m=(ssize_t) mng_info->magn_mr; else if (magn_methx <= 1 && x == (ssize_t) image->columns-1) m=(ssize_t) mng_info->magn_mr; else if (magn_methx > 1 && x == (ssize_t) image->columns-1) m=1; else m=(ssize_t) mng_info->magn_mx; for (i=0; i < m; i++) { if (magn_methx <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methx == 2 || magn_methx == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } /* To do: Rewrite using Get/Set***PixelComponent() */ else { /* Interpolate */ SetPixelRed(q, (QM) ((2*i*( GetPixelRed(n) -GetPixelRed(pixels))+m) /((ssize_t) (m*2))+ GetPixelRed(pixels))); SetPixelGreen(q, (QM) ((2*i*( GetPixelGreen(n) -GetPixelGreen(pixels))+m) /((ssize_t) (m*2))+ GetPixelGreen(pixels))); SetPixelBlue(q, (QM) ((2*i*( GetPixelBlue(n) -GetPixelBlue(pixels))+m) /((ssize_t) (m*2))+ GetPixelBlue(pixels))); if (image->matte != MagickFalse) SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))); } if (magn_methx == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelOpacity(q, GetPixelOpacity(pixels)+0); } else { SetPixelOpacity(q, GetPixelOpacity(n)+0); } } } else /* if (magn_methx == 3 || magn_methx == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methx == 5) { /* Interpolate */ SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m)/ ((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } q++; } n++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } #if (MAGICKCORE_QUANTUM_DEPTH > 16) if (magn_methx != 1 || magn_methy != 1) { /* Rescale pixels to Quantum */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleShortToQuantum( GetPixelRed(q))); SetPixelGreen(q,ScaleShortToQuantum( GetPixelGreen(q))); SetPixelBlue(q,ScaleShortToQuantum( GetPixelBlue(q))); SetPixelOpacity(q,ScaleShortToQuantum( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished MAGN processing"); } } /* Crop_box is with respect to the upper left corner of the MNG. */ crop_box.left=mng_info->image_box.left+mng_info->x_off[object_id]; crop_box.right=mng_info->image_box.right+mng_info->x_off[object_id]; crop_box.top=mng_info->image_box.top+mng_info->y_off[object_id]; crop_box.bottom=mng_info->image_box.bottom+mng_info->y_off[object_id]; crop_box=mng_minimum_box(crop_box,mng_info->clip); crop_box=mng_minimum_box(crop_box,mng_info->frame); crop_box=mng_minimum_box(crop_box,mng_info->object_clip[object_id]); if ((crop_box.left != (mng_info->image_box.left +mng_info->x_off[object_id])) || (crop_box.right != (mng_info->image_box.right +mng_info->x_off[object_id])) || (crop_box.top != (mng_info->image_box.top +mng_info->y_off[object_id])) || (crop_box.bottom != (mng_info->image_box.bottom +mng_info->y_off[object_id]))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Crop the PNG image"); if ((crop_box.left < crop_box.right) && (crop_box.top < crop_box.bottom)) { Image *im; RectangleInfo crop_info; /* Crop_info is with respect to the upper left corner of the image. */ crop_info.x=(crop_box.left-mng_info->x_off[object_id]); crop_info.y=(crop_box.top-mng_info->y_off[object_id]); crop_info.width=(size_t) (crop_box.right-crop_box.left); crop_info.height=(size_t) (crop_box.bottom-crop_box.top); image->page.width=image->columns; image->page.height=image->rows; image->page.x=0; image->page.y=0; im=CropImage(image,&crop_info,exception); if (im != (Image *) NULL) { image->columns=im->columns; image->rows=im->rows; im=DestroyImage(im); image->page.width=image->columns; image->page.height=image->rows; image->page.x=crop_box.left; image->page.y=crop_box.top; } } else { /* No pixels in crop area. The MNG spec still requires a layer, though, so make a single transparent pixel in the top left corner. */ image->columns=1; image->rows=1; image->colors=2; (void) SetImageBackgroundColor(image); image->page.width=1; image->page.height=1; image->page.x=0; image->page.y=0; } } #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED image=mng_info->image; #endif } #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy, and promote any depths > 8 to 16. */ if (image->depth > 16) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif GetImageException(image,exception); if (image_info->number_scenes != 0) { if (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)) break; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading image datastream."); } while (LocaleCompare(image_info->magick,"MNG") == 0); (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading all image datastreams."); #if defined(MNG_INSERT_LAYERS) if (insert_layers && !mng_info->image_found && (mng_info->mng_width) && (mng_info->mng_height)) { /* Insert a background layer if nothing else was found. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No images found. Inserting a background layer."); if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocation failed, returning NULL."); return(DestroyImageList(image)); } image=SyncNextImageInList(image); } image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; image->matte=MagickFalse; if (image_info->ping == MagickFalse) (void) SetImageBackgroundColor(image); mng_info->image_found++; } #endif image->iterations=mng_iterations; if (mng_iterations == 1) image->start_loop=MagickTrue; while (GetPreviousImageInList(image) != (Image *) NULL) { image_count++; if (image_count > 10*mng_info->image_found) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," No beginning"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted, beginning of list not found", "`%s'",image_info->filename); return(DestroyImageList(image)); } image=GetPreviousImageInList(image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Corrupt list"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted; next_image is NULL","`%s'", image_info->filename); } } if (mng_info->ticks_per_second && mng_info->image_found > 1 && GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " First image null"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"image->next for first image is NULL but shouldn't be.", "`%s'",image_info->filename); } if (mng_info->image_found == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No visible images found."); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"No visible images in file","`%s'",image_info->filename); return(DestroyImageList(image)); } if (mng_info->ticks_per_second) final_delay=1UL*MagickMax(image->ticks_per_second,1L)* final_delay/mng_info->ticks_per_second; else image->start_loop=MagickTrue; /* Find final nonzero image delay */ final_image_delay=0; while (GetNextImageInList(image) != (Image *) NULL) { if (image->delay) final_image_delay=image->delay; image=GetNextImageInList(image); } if (final_delay < final_image_delay) final_delay=final_image_delay; image->delay=final_delay; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->delay=%.20g, final_delay=%.20g",(double) image->delay, (double) final_delay); if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Before coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g",(double) image->delay); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g",(double) scene++,(double) image->delay); } } image=GetFirstImageInList(image); #ifdef MNG_COALESCE_LAYERS if (insert_layers) { Image *next_image, *next; size_t scene; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Coalesce Images"); scene=image->scene; next_image=CoalesceImages(image,&image->exception); if (next_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); image=DestroyImageList(image); image=next_image; for (next=image; next != (Image *) NULL; next=next_image) { next->page.width=mng_info->mng_width; next->page.height=mng_info->mng_height; next->page.x=0; next->page.y=0; next->scene=scene++; next_image=GetNextImageInList(next); if (next_image == (Image *) NULL) break; if (next->delay == 0) { scene--; next_image->previous=GetPreviousImageInList(next); if (GetPreviousImageInList(next) == (Image *) NULL) image=next_image; else next->previous->next=next_image; next=DestroyImage(next); } } } #endif while (GetNextImageInList(image) != (Image *) NULL) image=GetNextImageInList(image); image->dispose=BackgroundDispose; if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " After coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g dispose=%.20g",(double) image->delay, (double) image->dispose); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g dispose=%.20g",(double) scene++, (double) image->delay,(double) image->dispose); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage();"); return(image); } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadMNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneMNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadMNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadMNGImage()"); return(GetFirstImageInList(image)); } #else /* PNG_LIBPNG_VER > 10011 */ static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "PNG library is too old","`%s'",image_info->filename); return(Image *) NULL; } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { return(ReadPNGImage(image_info,exception)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPNGImage() adds properties for the PNG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPNGImage method is: % % size_t RegisterPNGImage(void) % */ ModuleExport size_t RegisterPNGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char *PNGNote= { "See http://www.libpng.org/ for details about the PNG format." }, *JNGNote= { "See http://www.libpng.org/pub/mng/ for details about the JNG\n" "format." }, *MNGNote= { "See http://www.libpng.org/pub/mng/ for details about the MNG\n" "format." }; *version='\0'; #if defined(PNG_LIBPNG_VER_STRING) (void) ConcatenateMagickString(version,"libpng ",MaxTextExtent); (void) ConcatenateMagickString(version,PNG_LIBPNG_VER_STRING,MaxTextExtent); if (LocaleCompare(PNG_LIBPNG_VER_STRING,png_get_header_ver(NULL)) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,png_get_libpng_ver(NULL), MaxTextExtent); } #endif entry=SetMagickInfo("MNG"); entry->seekable_stream=MagickTrue; /* To do: eliminate this. */ #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadMNGImage; entry->encoder=(EncodeImageHandler *) WriteMNGImage; #endif entry->magick=(IsImageFormatHandler *) IsMNG; entry->description=ConstantString("Multiple-image Network Graphics"); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("video/x-mng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(MNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("Portable Network Graphics"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); if (*version != '\0') entry->version=ConstantString(version); entry->note=ConstantString(PNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG8"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString( "8-bit indexed with optional binary transparency"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG24"); *version='\0'; #if defined(ZLIB_VERSION) (void) ConcatenateMagickString(version,"zlib ",MaxTextExtent); (void) ConcatenateMagickString(version,ZLIB_VERSION,MaxTextExtent); if (LocaleCompare(ZLIB_VERSION,zlib_version) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,zlib_version,MaxTextExtent); } #endif if (*version != '\0') entry->version=ConstantString(version); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 24-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG32"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 32-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG48"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 48-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG64"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 64-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG00"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString( "PNG inheriting bit-depth, color-type from original if possible"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JNG"); #if defined(JNG_SUPPORTED) #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJNGImage; entry->encoder=(EncodeImageHandler *) WriteJNGImage; #endif #endif entry->magick=(IsImageFormatHandler *) IsJNG; entry->adjoin=MagickFalse; entry->description=ConstantString("JPEG Network Graphics"); entry->mime_type=ConstantString("image/x-jng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(JNGNote); (void) RegisterMagickInfo(entry); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE ping_semaphore=AllocateSemaphoreInfo(); #endif return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPNGImage() removes format registrations made by the % PNG module from the list of supported formats. % % The format of the UnregisterPNGImage method is: % % UnregisterPNGImage(void) % */ ModuleExport void UnregisterPNGImage(void) { (void) UnregisterMagickInfo("MNG"); (void) UnregisterMagickInfo("PNG"); (void) UnregisterMagickInfo("PNG8"); (void) UnregisterMagickInfo("PNG24"); (void) UnregisterMagickInfo("PNG32"); (void) UnregisterMagickInfo("PNG48"); (void) UnregisterMagickInfo("PNG64"); (void) UnregisterMagickInfo("PNG00"); (void) UnregisterMagickInfo("JNG"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE if (ping_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&ping_semaphore); #endif } #if defined(MAGICKCORE_PNG_DELEGATE) #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteMNGImage() writes an image in the Portable Network Graphics % Group's "Multiple-image Network Graphics" encoded image format. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteMNGImage method is: % % MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % % To do (as of version 5.5.2, November 26, 2002 -- glennrp -- see also % "To do" under ReadPNGImage): % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % Write the iCCP chunk at MNG level when (icc profile length > 0) % % Improve selection of color type (use indexed-colour or indexed-colour % with tRNS when 256 or fewer unique RGBA values are present). % % Figure out what to do with "dispose=<restore-to-previous>" (dispose == 3) % This will be complicated if we limit ourselves to generating MNG-LC % files. For now we ignore disposal method 3 and simply overlay the next % image on it. % % Check for identical PLTE's or PLTE/tRNS combinations and use a % global MNG PLTE or PLTE/tRNS combination when appropriate. % [mostly done 15 June 1999 but still need to take care of tRNS] % % Check for identical sRGB and replace with a global sRGB (and remove % gAMA/cHRM if sRGB is found; check for identical gAMA/cHRM and % replace with global gAMA/cHRM (or with sRGB if appropriate; replace % local gAMA/cHRM with local sRGB if appropriate). % % Check for identical sBIT chunks and write global ones. % % Provide option to skip writing the signature tEXt chunks. % % Use signatures to detect identical objects and reuse the first % instance of such objects instead of writing duplicate objects. % % Use a smaller-than-32k value of compression window size when % appropriate. % % Encode JNG datastreams. Mostly done as of 5.5.2; need to write % ancillary text chunks and save profiles. % % Provide an option to force LC files (to ensure exact framing rate) % instead of VLC. % % Provide an option to force VLC files instead of LC, even when offsets % are present. This will involve expanding the embedded images with a % transparent region at the top and/or left. */ static void Magick_png_write_raw_profile(const ImageInfo *image_info,png_struct *ping, png_info *ping_info, unsigned char *profile_type, unsigned char *profile_description, unsigned char *profile_data, png_uint_32 length) { png_textp text; register ssize_t i; unsigned char *sp; png_charp dp; png_uint_32 allocated_length, description_length; unsigned char hex[16]={'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; if (LocaleNCompare((char *) profile_type+1, "ng-chunk-",9) == 0) return; if (image_info->verbose) { (void) printf("writing raw profile: type=%s, length=%.20g\n", (char *) profile_type, (double) length); } #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping,(png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif description_length=(png_uint_32) strlen((const char *) profile_description); allocated_length=(png_uint_32) (length*2 + (length >> 5) + 20 + description_length); #if PNG_LIBPNG_VER >= 10400 text[0].text=(png_charp) png_malloc(ping, (png_alloc_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_alloc_size_t) 80); #else text[0].text=(png_charp) png_malloc(ping, (png_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_size_t) 80); #endif text[0].key[0]='\0'; (void) ConcatenateMagickString(text[0].key, "Raw profile type ",MaxTextExtent); (void) ConcatenateMagickString(text[0].key,(const char *) profile_type,62); sp=profile_data; dp=text[0].text; *dp++='\n'; (void) CopyMagickString(dp,(const char *) profile_description, allocated_length); dp+=description_length; *dp++='\n'; (void) FormatLocaleString(dp,allocated_length- (png_size_t) (dp-text[0].text),"%8lu ",(unsigned long) length); dp+=8; for (i=0; i < (ssize_t) length; i++) { if (i%36 == 0) *dp++='\n'; *(dp++)=(char) hex[((*sp >> 4) & 0x0f)]; *(dp++)=(char) hex[((*sp++ ) & 0x0f)]; } *dp++='\n'; *dp='\0'; text[0].text_length=(png_size_t) (dp-text[0].text); text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? -1 : 0; if (text[0].text_length <= allocated_length) png_set_text(ping,ping_info,text,1); png_free(ping,text[0].text); png_free(ping,text[0].key); png_free(ping,text); } static MagickBooleanType Magick_png_write_chunk_from_profile(Image *image, const char *string, MagickBooleanType logging) { char *name; const StringInfo *profile; unsigned char *data; png_uint_32 length; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (const StringInfo *) NULL) { StringInfo *ping_profile; if (LocaleNCompare(name,string,11) == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found %s profile",name); ping_profile=CloneStringInfo(profile); data=GetStringInfoDatum(ping_profile), length=(png_uint_32) GetStringInfoLength(ping_profile); data[4]=data[3]; data[3]=data[2]; data[2]=data[1]; data[1]=data[0]; (void) WriteBlobMSBULong(image,length-5); /* data length */ (void) WriteBlob(image,length-1,data+1); (void) WriteBlobMSBULong(image,crc32(0,data+1,(uInt) length-1)); ping_profile=DestroyStringInfo(ping_profile); } } name=GetNextImageProfile(image); } return(MagickTrue); } #if defined(PNG_tIME_SUPPORTED) static void write_tIME_chunk(Image *image,png_struct *ping,png_info *info, const char *date) { unsigned int day, hour, minute, month, second, year; png_time ptime; time_t ttime; if (date != (const char *) NULL) { if (sscanf(date,"%d-%d-%dT%d:%d:%dZ",&year,&month,&day,&hour,&minute, &second) != 6) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError, "Invalid date format specified for png:tIME","`%s'", image->filename); return; } ptime.year=(png_uint_16) year; ptime.month=(png_byte) month; ptime.day=(png_byte) day; ptime.hour=(png_byte) hour; ptime.minute=(png_byte) minute; ptime.second=(png_byte) second; } else { time(&ttime); png_convert_from_time_t(&ptime,ttime); } png_set_tIME(ping,info,&ptime); } #endif /* Write one PNG image */ static MagickBooleanType WriteOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { char s[2]; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; const char *name, *property, *value; const StringInfo *profile; int num_passes, pass, ping_wrote_caNv; png_byte ping_trans_alpha[256]; png_color palette[257]; png_color_16 ping_background, ping_trans_color; png_info *ping_info; png_struct *ping; png_uint_32 ping_height, ping_width; ssize_t y; MagickBooleanType image_matte, logging, matte, ping_have_blob, ping_have_cheap_transparency, ping_have_color, ping_have_non_bw, ping_have_PLTE, ping_have_bKGD, ping_have_eXIf, ping_have_iCCP, ping_have_pHYs, ping_have_sRGB, ping_have_tRNS, ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, /* ping_exclude_EXIF, */ ping_exclude_eXIf, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tIME, /* ping_exclude_tRNS, */ ping_exclude_vpAg, ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, ping_preserve_iCCP, ping_need_colortype_warning, status, tried_332, tried_333, tried_444; MemoryInfo *volatile pixel_info; QuantumInfo *quantum_info; register ssize_t i, x; unsigned char *ping_pixels; volatile int image_colors, ping_bit_depth, ping_color_type, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans; volatile size_t image_depth, old_bit_depth; size_t quality, rowbytes, save_image_depth; int j, number_colors, number_opaque, number_semitransparent, number_transparent, ping_pHYs_unit_type; png_uint_32 ping_pHYs_x_resolution, ping_pHYs_y_resolution; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOnePNGImage()"); /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,MaxTextExtent); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,MaxTextExtent); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," IM version = %s", im_vers); (void) LogMagickEvent(CoderEvent,GetMagickModule()," Libpng version = %s", libpng_vers); if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule()," Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", zlib_runv); } } /* Initialize some stuff */ ping_bit_depth=0, ping_color_type=0, ping_interlace_method=0, ping_compression_method=0, ping_filter_method=0, ping_num_trans = 0; ping_background.red = 0; ping_background.green = 0; ping_background.blue = 0; ping_background.gray = 0; ping_background.index = 0; ping_trans_color.red=0; ping_trans_color.green=0; ping_trans_color.blue=0; ping_trans_color.gray=0; ping_pHYs_unit_type = 0; ping_pHYs_x_resolution = 0; ping_pHYs_y_resolution = 0; ping_have_blob=MagickFalse; ping_have_cheap_transparency=MagickFalse; ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; ping_have_PLTE=MagickFalse; ping_have_bKGD=MagickFalse; ping_have_eXIf=MagickTrue; ping_have_iCCP=MagickFalse; ping_have_pHYs=MagickFalse; ping_have_sRGB=MagickFalse; ping_have_tRNS=MagickFalse; ping_exclude_bKGD=mng_info->ping_exclude_bKGD; ping_exclude_caNv=mng_info->ping_exclude_caNv; ping_exclude_cHRM=mng_info->ping_exclude_cHRM; ping_exclude_date=mng_info->ping_exclude_date; /* ping_exclude_EXIF=mng_info->ping_exclude_EXIF; */ ping_exclude_eXIf=mng_info->ping_exclude_eXIf; ping_exclude_gAMA=mng_info->ping_exclude_gAMA; ping_exclude_iCCP=mng_info->ping_exclude_iCCP; /* ping_exclude_iTXt=mng_info->ping_exclude_iTXt; */ ping_exclude_oFFs=mng_info->ping_exclude_oFFs; ping_exclude_pHYs=mng_info->ping_exclude_pHYs; ping_exclude_sRGB=mng_info->ping_exclude_sRGB; ping_exclude_tEXt=mng_info->ping_exclude_tEXt; ping_exclude_tIME=mng_info->ping_exclude_tIME; /* ping_exclude_tRNS=mng_info->ping_exclude_tRNS; */ ping_exclude_vpAg=mng_info->ping_exclude_vpAg; ping_exclude_zCCP=mng_info->ping_exclude_zCCP; /* hex-encoded iCCP in zTXt */ ping_exclude_zTXt=mng_info->ping_exclude_zTXt; ping_preserve_colormap = mng_info->ping_preserve_colormap; ping_preserve_iCCP = mng_info->ping_preserve_iCCP; ping_need_colortype_warning = MagickFalse; property=(const char *) NULL; /* Recognize the ICC sRGB profile and convert it to the sRGB chunk, * i.e., eliminate the ICC profile and set image->rendering_intent. * Note that this will not involve any changes to the actual pixels * but merely passes information to applications that read the resulting * PNG image. * * To do: recognize other variants of the sRGB profile, using the CRC to * verify all recognized variants including the 7 already known. * * Work around libpng16+ rejecting some "known invalid sRGB profiles". * * Use something other than image->rendering_intent to record the fact * that the sRGB profile was found. * * Record the ICC version (currently v2 or v4) of the incoming sRGB ICC * profile. Record the Blackpoint Compensation, if any. */ if (ping_exclude_sRGB == MagickFalse && ping_preserve_iCCP == MagickFalse) { char *name; const StringInfo *profile; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } ping_exclude_iCCP = MagickTrue; ping_exclude_zCCP = MagickTrue; ping_have_sRGB = MagickTrue; break; } } } if (sRGB_info[icheck].len == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); } } name=GetNextImageProfile(image); } } number_opaque = 0; number_semitransparent = 0; number_transparent = 0; if (logging != MagickFalse) { if (image->storage_class == UndefinedClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=UndefinedClass"); if (image->storage_class == DirectClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=DirectClass"); if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=PseudoClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->magick= %s",image_info->magick); (void) LogMagickEvent(CoderEvent,GetMagickModule(), image->taint ? " image->taint=MagickTrue": " image->taint=MagickFalse"); } if (image->storage_class == PseudoClass && (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (mng_info->write_png_colortype != 1 && mng_info->write_png_colortype != 5))) { (void) SyncImage(image); image->storage_class = DirectClass; } if (ping_preserve_colormap == MagickFalse) { if (image->storage_class != PseudoClass && image->colormap != NULL) { /* Free the bogus colormap; it can cause trouble later */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Freeing bogus colormap"); (void) RelinquishMagickMemory(image->colormap); image->colormap=NULL; } } if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); /* Sometimes we get PseudoClass images whose RGB values don't match the colors in the colormap. This code syncs the RGB values. */ if (image->depth <= 8 && image->taint && image->storage_class == PseudoClass) (void) SyncImage(image); #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->depth > 8) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reducing PNG bit depth to 8 since this is a Q8 build."); image->depth=8; } #endif /* Respect the -depth option */ if (image->depth < 4) { register PixelPacket *r; ExceptionInfo *exception; exception=(&image->exception); if (image->depth > 2) { /* Scale to 4-bit */ LBR04PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR04PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR04PacketRGBO(image->colormap[i]); } } } else if (image->depth > 1) { /* Scale to 2-bit */ LBR02PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR02PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR02PacketRGBO(image->colormap[i]); } } } else { /* Scale to 1-bit */ LBR01PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR01PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR01PacketRGBO(image->colormap[i]); } } } } /* To do: set to next higher multiple of 8 */ if (image->depth < 8) image->depth=8; #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy */ if (image->depth > 8) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (image->depth == 16 && mng_info->write_png_depth != 16) if (mng_info->write_png8 || LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif image_colors = (int) image->colors; if (mng_info->write_png_colortype && (mng_info->write_png_colortype > 4 || (mng_info->write_png_depth >= 8 && mng_info->write_png_colortype < 4 && image->matte == MagickFalse))) { /* Avoid the expensive BUILD_PALETTE operation if we're sure that we * are not going to need the result. */ number_opaque = (int) image->colors; if (mng_info->write_png_colortype == 1 || mng_info->write_png_colortype == 5) ping_have_color=MagickFalse; else ping_have_color=MagickTrue; ping_have_non_bw=MagickFalse; if (image->matte != MagickFalse) { number_transparent = 2; number_semitransparent = 1; } else { number_transparent = 0; number_semitransparent = 0; } } if (mng_info->write_png_colortype < 7) { /* BUILD_PALETTE * * Normally we run this just once, but in the case of writing PNG8 * we reduce the transparency to binary and run again, then if there * are still too many colors we reduce to a simple 4-4-4-1, then 3-3-3-1 * RGBA palette and run again, and then to a simple 3-3-2-1 RGBA * palette. Then (To do) we take care of a final reduction that is only * needed if there are still 256 colors present and one of them has both * transparent and opaque instances. */ tried_332 = MagickFalse; tried_333 = MagickFalse; tried_444 = MagickFalse; for (j=0; j<6; j++) { /* * Sometimes we get DirectClass images that have 256 colors or fewer. * This code will build a colormap. * * Also, sometimes we get PseudoClass images with an out-of-date * colormap. This code will replace the colormap with a new one. * Sometimes we get PseudoClass images that have more than 256 colors. * This code will delete the colormap and change the image to * DirectClass. * * If image->matte is MagickFalse, we ignore the opacity channel * even though it sometimes contains left-over non-opaque values. * * Also we gather some information (number of opaque, transparent, * and semitransparent pixels, and whether the image has any non-gray * pixels or only black-and-white pixels) that we might need later. * * Even if the user wants to force GrayAlpha or RGBA (colortype 4 or 6) * we need to check for bogus non-opaque values, at least. */ ExceptionInfo *exception; int n; PixelPacket opaque[260], semitransparent[260], transparent[260]; register IndexPacket *indexes; register const PixelPacket *s, *q; register PixelPacket *r; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Enter BUILD_PALETTE:"); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->columns=%.20g",(double) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->rows=%.20g",(double) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); if (image->storage_class == PseudoClass && image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Original colormap:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < 256; i++) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } for (i=image->colors - 10; i < (ssize_t) image->colors; i++) { if (i > 255) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d",(int) image->colors); if (image->colors == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " (zero means unknown)"); if (ping_preserve_colormap == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Regenerate the colormap"); } exception=(&image->exception); image_colors=0; number_opaque = 0; number_semitransparent = 0; number_transparent = 0; for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte == MagickFalse || GetPixelOpacity(q) == OpaqueOpacity) { if (number_opaque < 259) { if (number_opaque == 0) { GetPixelRGB(q, opaque); opaque[0].opacity=OpaqueOpacity; number_opaque=1; } for (i=0; i< (ssize_t) number_opaque; i++) { if (IsColorEqual(q, opaque+i)) break; } if (i == (ssize_t) number_opaque && number_opaque < 259) { number_opaque++; GetPixelRGB(q, opaque+i); opaque[i].opacity=OpaqueOpacity; } } } else if (q->opacity == TransparentOpacity) { if (number_transparent < 259) { if (number_transparent == 0) { GetPixelRGBO(q, transparent); ping_trans_color.red= (unsigned short) GetPixelRed(q); ping_trans_color.green= (unsigned short) GetPixelGreen(q); ping_trans_color.blue= (unsigned short) GetPixelBlue(q); ping_trans_color.gray= (unsigned short) GetPixelRed(q); number_transparent = 1; } for (i=0; i< (ssize_t) number_transparent; i++) { if (IsColorEqual(q, transparent+i)) break; } if (i == (ssize_t) number_transparent && number_transparent < 259) { number_transparent++; GetPixelRGBO(q, transparent+i); } } } else { if (number_semitransparent < 259) { if (number_semitransparent == 0) { GetPixelRGBO(q, semitransparent); number_semitransparent = 1; } for (i=0; i< (ssize_t) number_semitransparent; i++) { if (IsColorEqual(q, semitransparent+i) && GetPixelOpacity(q) == semitransparent[i].opacity) break; } if (i == (ssize_t) number_semitransparent && number_semitransparent < 259) { number_semitransparent++; GetPixelRGBO(q, semitransparent+i); } } } q++; } } if (mng_info->write_png8 == MagickFalse && ping_exclude_bKGD == MagickFalse) { /* Add the background color to the palette, if it * isn't already there. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Check colormap for background (%d,%d,%d)", (int) image->background_color.red, (int) image->background_color.green, (int) image->background_color.blue); } for (i=0; i<number_opaque; i++) { if (opaque[i].red == image->background_color.red && opaque[i].green == image->background_color.green && opaque[i].blue == image->background_color.blue) break; } if (number_opaque < 259 && i == number_opaque) { opaque[i] = image->background_color; ping_background.index = i; number_opaque++; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d",(int) i); } } else if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in the colormap to add background color"); } image_colors=number_opaque+number_transparent+number_semitransparent; if (logging != MagickFalse) { if (image_colors > 256) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has more than 256 colors"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has %d colors",image_colors); } if (ping_preserve_colormap != MagickFalse) break; if (mng_info->write_png_colortype != 7) /* We won't need this info */ { ping_have_color=MagickFalse; ping_have_non_bw=MagickFalse; if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "incompatible colorspace"); ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; } if(image_colors > 256) { for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != GetPixelGreen(s) || GetPixelRed(s) != GetPixelBlue(s)) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } s++; } if (ping_have_color != MagickFalse) break; /* Worst case is black-and-white; we are looking at every * pixel twice. */ if (ping_have_non_bw == MagickFalse) { s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != 0 && GetPixelRed(s) != QuantumRange) { ping_have_non_bw=MagickTrue; break; } s++; } } } } } if (image_colors < 257) { PixelPacket colormap[260]; /* * Initialize image colormap. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Sort the new colormap"); /* Sort palette, transparent first */; n = 0; for (i=0; i<number_transparent; i++) colormap[n++] = transparent[i]; for (i=0; i<number_semitransparent; i++) colormap[n++] = semitransparent[i]; for (i=0; i<number_opaque; i++) colormap[n++] = opaque[i]; ping_background.index += (number_transparent + number_semitransparent); /* image_colors < 257; search the colormap instead of the pixels * to get ping_have_color and ping_have_non_bw */ for (i=0; i<n; i++) { if (ping_have_color == MagickFalse) { if (colormap[i].red != colormap[i].green || colormap[i].red != colormap[i].blue) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } } if (ping_have_non_bw == MagickFalse) { if (colormap[i].red != 0 && colormap[i].red != QuantumRange) ping_have_non_bw=MagickTrue; } } if ((mng_info->ping_exclude_tRNS == MagickFalse || (number_transparent == 0 && number_semitransparent == 0)) && (((mng_info->write_png_colortype-1) == PNG_COLOR_TYPE_PALETTE) || (mng_info->write_png_colortype == 0))) { if (logging != MagickFalse) { if (n != (ssize_t) image_colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_colors (%d) and n (%d) don't match", image_colors, n); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireImageColormap"); } image->colors = image_colors; if (AcquireImageColormap(image,image_colors) == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); for (i=0; i< (ssize_t) image_colors; i++) image->colormap[i] = colormap[i]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d (%d)", (int) image->colors, image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Update the pixel indexes"); } /* Sync the pixel indices with the new colormap */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i< (ssize_t) image_colors; i++) { if ((image->matte == MagickFalse || image->colormap[i].opacity == GetPixelOpacity(q)) && image->colormap[i].red == GetPixelRed(q) && image->colormap[i].green == GetPixelGreen(q) && image->colormap[i].blue == GetPixelBlue(q)) { SetPixelIndex(indexes+x,i); break; } } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d", (int) image->colors); if (image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < (ssize_t) image->colors; i++) { if (i < 300 || i >= (ssize_t) image->colors - 10) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } if (number_transparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent = %d", number_transparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent > 256"); if (number_opaque < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque = %d", number_opaque); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque > 256"); if (number_semitransparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent = %d", number_semitransparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent > 256"); if (ping_have_non_bw == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are black or white"); else if (ping_have_color == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are gray"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " At least one pixel or the background is non-gray"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Exit BUILD_PALETTE:"); } if (mng_info->write_png8 == MagickFalse) break; /* Make any reductions necessary for the PNG8 format */ if (image_colors <= 256 && image_colors != 0 && image->colormap != NULL && number_semitransparent == 0 && number_transparent <= 1) break; /* PNG8 can't have semitransparent colors so we threshold the * opacity to 0 or OpaqueOpacity, and PNG8 can only have one * transparent color so if more than one is transparent we merge * them into image->background_color. */ if (number_semitransparent != 0 || number_transparent > 1) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Thresholding the alpha channel to binary"); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) > TransparentOpacity/2) { SetPixelOpacity(r,TransparentOpacity); SetPixelRgb(r,&image->background_color); } else SetPixelOpacity(r,OpaqueOpacity); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image_colors != 0 && image_colors <= 256 && image->colormap != NULL) for (i=0; i<image_colors; i++) image->colormap[i].opacity = (image->colormap[i].opacity > TransparentOpacity/2 ? TransparentOpacity : OpaqueOpacity); } continue; } /* PNG8 can't have more than 256 colors so we quantize the pixels and * background color to the 4-4-4-1, 3-3-3-1 or 3-3-2-1 palette. If the * image is mostly gray, the 4-4-4-1 palette is likely to end up with 256 * colors or less. */ if (tried_444 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 4-4-4"); tried_444 = MagickTrue; LBR04PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 4-4-4"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR04PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 4-4-4"); for (i=0; i<image_colors; i++) { LBR04PacketRGB(image->colormap[i]); } } continue; } if (tried_333 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-3"); tried_333 = MagickTrue; LBR03PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-3-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR03PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-3-1"); for (i=0; i<image_colors; i++) { LBR03PacketRGB(image->colormap[i]); } } continue; } if (tried_332 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-2"); tried_332 = MagickTrue; /* Red and green were already done so we only quantize the blue * channel */ LBR02PacketBlue(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR02PixelBlue(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-2-1"); for (i=0; i<image_colors; i++) { LBR02PacketBlue(image->colormap[i]); } } continue; } if (image_colors == 0 || image_colors > 256) { /* Take care of special case with 256 opaque colors + 1 transparent * color. We don't need to quantize to 2-3-2-1; we only need to * eliminate one color, so we'll merge the two darkest red * colors (0x49, 0, 0) -> (0x24, 0, 0). */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red background colors to 3-3-2-1"); if (ScaleQuantumToChar(image->background_color.red) == 0x49 && ScaleQuantumToChar(image->background_color.green) == 0x00 && ScaleQuantumToChar(image->background_color.blue) == 0x00) { image->background_color.red=ScaleCharToQuantum(0x24); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (ScaleQuantumToChar(GetPixelRed(r)) == 0x49 && ScaleQuantumToChar(GetPixelGreen(r)) == 0x00 && ScaleQuantumToChar(GetPixelBlue(r)) == 0x00 && GetPixelOpacity(r) == OpaqueOpacity) { SetPixelRed(r,ScaleCharToQuantum(0x24)); } r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else { for (i=0; i<image_colors; i++) { if (ScaleQuantumToChar(image->colormap[i].red) == 0x49 && ScaleQuantumToChar(image->colormap[i].green) == 0x00 && ScaleQuantumToChar(image->colormap[i].blue) == 0x00) { image->colormap[i].red=ScaleCharToQuantum(0x24); } } } } } } /* END OF BUILD_PALETTE */ /* If we are excluding the tRNS chunk and there is transparency, * then we must write a Gray-Alpha (color-type 4) or RGBA (color-type 6) * PNG. */ if (mng_info->ping_exclude_tRNS != MagickFalse && (number_transparent != 0 || number_semitransparent != 0)) { unsigned int colortype=mng_info->write_png_colortype; if (ping_have_color == MagickFalse) mng_info->write_png_colortype = 5; else mng_info->write_png_colortype = 7; if (colortype != 0 && mng_info->write_png_colortype != colortype) ping_need_colortype_warning=MagickTrue; } /* See if cheap transparency is possible. It is only possible * when there is a single transparent color, no semitransparent * color, and no opaque color that has the same RGB components * as the transparent color. We only need this information if * we are writing a PNG with colortype 0 or 2, and we have not * excluded the tRNS chunk. */ if (number_transparent == 1 && mng_info->write_png_colortype < 4) { ping_have_cheap_transparency = MagickTrue; if (number_semitransparent != 0) ping_have_cheap_transparency = MagickFalse; else if (image_colors == 0 || image_colors > 256 || image->colormap == NULL) { ExceptionInfo *exception; register const PixelPacket *q; exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { q=GetVirtualPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity != TransparentOpacity && (unsigned short) GetPixelRed(q) == ping_trans_color.red && (unsigned short) GetPixelGreen(q) == ping_trans_color.green && (unsigned short) GetPixelBlue(q) == ping_trans_color.blue) { ping_have_cheap_transparency = MagickFalse; break; } q++; } if (ping_have_cheap_transparency == MagickFalse) break; } } else { /* Assuming that image->colormap[0] is the one transparent color * and that all others are opaque. */ if (image_colors > 1) for (i=1; i<image_colors; i++) if (image->colormap[i].red == image->colormap[0].red && image->colormap[i].green == image->colormap[0].green && image->colormap[i].blue == image->colormap[0].blue) { ping_have_cheap_transparency = MagickFalse; break; } } if (logging != MagickFalse) { if (ping_have_cheap_transparency == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is not possible."); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is possible."); } } else ping_have_cheap_transparency = MagickFalse; image_depth=image->depth; quantum_info = (QuantumInfo *) NULL; number_colors=0; image_colors=(int) image->colors; image_matte=image->matte; if (mng_info->write_png_colortype < 5) mng_info->IsPalette=image->storage_class == PseudoClass && image_colors <= 256 && image->colormap != NULL; else mng_info->IsPalette = MagickFalse; if ((mng_info->write_png_colortype == 4 || mng_info->write_png8) && (image->colors == 0 || image->colormap == NULL)) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Cannot write PNG8 or color-type 3; colormap is NULL", "`%s'",image->filename); return(MagickFalse); } /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_write_struct_2(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler,(void *) NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_write_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_write_struct(&ping,(png_info **) NULL); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } png_set_write_fn(ping,image,png_put_data,png_flush_data); pixel_info=(MemoryInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG write failed. */ #ifdef PNG_DEBUG if (image_info->verbose) (void) printf("PNG write has failed.\n"); #endif png_destroy_write_struct(&ping,&ping_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); return(MagickFalse); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for writing. */ #if defined(PNG_MNG_FEATURES_SUPPORTED) if (mng_info->write_mng) { (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); # ifdef PNG_WRITE_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature when writing a MNG because * zero-length PLTE is OK */ png_set_check_for_invalid_index (ping, 0); # endif } #else # ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if (mng_info->write_mng) png_permit_empty_plte(ping,MagickTrue); # endif #endif x=0; ping_width=(png_uint_32) image->columns; ping_height=(png_uint_32) image->rows; if (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32) image_depth=8; if (mng_info->write_png48 || mng_info->write_png64) image_depth=16; if (mng_info->write_png_depth != 0) image_depth=mng_info->write_png_depth; /* Adjust requested depth to next higher valid depth if necessary */ if (image_depth > 8) image_depth=16; if ((image_depth > 4) && (image_depth < 8)) image_depth=8; if (image_depth == 3) image_depth=4; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " width=%.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " height=%.20g",(double) ping_height); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative ping_bit_depth=%.20g",(double) image_depth); } save_image_depth=image_depth; ping_bit_depth=(png_byte) save_image_depth; #if defined(PNG_pHYs_SUPPORTED) if (ping_exclude_pHYs == MagickFalse) { if ((image->x_resolution != 0) && (image->y_resolution != 0) && (!mng_info->write_mng || !mng_info->equal_physs)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); if (image->units == PixelsPerInchResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution= (png_uint_32) ((100.0*image->x_resolution+0.5)/2.54); ping_pHYs_y_resolution= (png_uint_32) ((100.0*image->y_resolution+0.5)/2.54); } else if (image->units == PixelsPerCentimeterResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution=(png_uint_32) (100.0*image->x_resolution+0.5); ping_pHYs_y_resolution=(png_uint_32) (100.0*image->y_resolution+0.5); } else { ping_pHYs_unit_type=PNG_RESOLUTION_UNKNOWN; ping_pHYs_x_resolution=(png_uint_32) image->x_resolution; ping_pHYs_y_resolution=(png_uint_32) image->y_resolution; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Set up PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) ping_pHYs_x_resolution,(double) ping_pHYs_y_resolution, (int) ping_pHYs_unit_type); ping_have_pHYs = MagickTrue; } } #endif if (ping_exclude_bKGD == MagickFalse) { if ((!mng_info->adjoin || !mng_info->equal_backgrounds)) { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_background.red=(png_uint_16) (ScaleQuantumToShort(image->background_color.red) & mask); ping_background.green=(png_uint_16) (ScaleQuantumToShort(image->background_color.green) & mask); ping_background.blue=(png_uint_16) (ScaleQuantumToShort(image->background_color.blue) & mask); ping_background.gray=(png_uint_16) ping_background.green; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (1)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth=%d",ping_bit_depth); } ping_have_bKGD = MagickTrue; } /* Select the color type. */ matte=image_matte; old_bit_depth=0; if (mng_info->IsPalette && mng_info->write_png8) { /* To do: make this a function cause it's used twice, except for reducing the sample depth from 8. */ number_colors=image_colors; ping_have_tRNS=MagickFalse; /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors (%d)", number_colors, image_colors); for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), #if MAGICKCORE_QUANTUM_DEPTH == 8 " %3ld (%3d,%3d,%3d)", #else " %5ld (%5d,%5d,%5d)", #endif (long) i,palette[i].red,palette[i].green,palette[i].blue); } ping_have_PLTE=MagickTrue; image_depth=ping_bit_depth; ping_num_trans=0; if (matte != MagickFalse) { /* Identify which colormap entry is transparent. */ assert(number_colors <= 256); assert(image->colormap != NULL); for (i=0; i < (ssize_t) number_transparent; i++) ping_trans_alpha[i]=0; ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else ping_have_tRNS=MagickTrue; } if (ping_exclude_bKGD == MagickFalse) { /* * Identify which colormap entry is the background color. */ for (i=0; i < (ssize_t) MagickMax(1L*number_colors-1L,1L); i++) if (IsPNGColorEqual(ping_background,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); } } } /* end of write_png8 */ else if (mng_info->write_png_colortype == 1) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; } else if (mng_info->write_png24 || mng_info->write_png48 || mng_info->write_png_colortype == 3) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; } else if (mng_info->write_png32 || mng_info->write_png64 || mng_info->write_png_colortype == 7) { image_matte=MagickTrue; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; } else /* mng_info->write_pngNN not specified */ { image_depth=ping_bit_depth; if (mng_info->write_png_colortype != 0) { ping_color_type=(png_byte) mng_info->write_png_colortype-1; if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) image_matte=MagickTrue; else image_matte=MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG colortype %d was specified:",(int) ping_color_type); } else /* write_png_colortype not specified */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selecting PNG colortype:"); ping_color_type=(png_byte) ((matte != MagickFalse)? PNG_COLOR_TYPE_RGB_ALPHA:PNG_COLOR_TYPE_RGB); if (image_info->type == TrueColorType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } if (image_info->type == TrueColorMatteType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; image_matte=MagickTrue; } if (image_info->type == PaletteType || image_info->type == PaletteMatteType) ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (mng_info->write_png_colortype == 0 && image_info->type == UndefinedType) { if (ping_have_color == MagickFalse) { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY_ALPHA; image_matte=MagickTrue; } } else { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGBA; image_matte=MagickTrue; } } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selected PNG colortype=%d",ping_color_type); if (ping_bit_depth < 8) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) ping_bit_depth=8; } old_bit_depth=ping_bit_depth; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->matte == MagickFalse && ping_have_non_bw == MagickFalse) ping_bit_depth=1; } if (ping_color_type == PNG_COLOR_TYPE_PALETTE) { size_t one = 1; ping_bit_depth=1; if (image->colors == 0) { /* DO SOMETHING */ png_error(ping,"image has 0 colors"); } while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG bit depth: %d",ping_bit_depth); } if (ping_bit_depth < (int) mng_info->write_png_depth) ping_bit_depth = mng_info->write_png_depth; } image_depth=ping_bit_depth; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG color type: %s (%.20g)", PngColorTypeToString(ping_color_type), (double) ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->type: %.20g",(double) image_info->type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_depth: %.20g",(double) image_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth: %.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth: %.20g",(double) ping_bit_depth); } if (matte != MagickFalse) { if (mng_info->IsPalette) { if (mng_info->write_png_colortype == 0) { ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; if (ping_have_color != MagickFalse) ping_color_type=PNG_COLOR_TYPE_RGBA; } /* * Determine if there is any transparent color. */ if (number_transparent + number_semitransparent == 0) { /* No transparent pixels are present. Change 4 or 6 to 0 or 2. */ image_matte=MagickFalse; if (mng_info->write_png_colortype == 0) ping_color_type&=0x03; } else { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_trans_color.red=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].red) & mask); ping_trans_color.green=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].green) & mask); ping_trans_color.blue=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].blue) & mask); ping_trans_color.gray=(png_uint_16) (ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image, image->colormap))) & mask); ping_trans_color.index=(png_byte) 0; ping_have_tRNS=MagickTrue; } if (ping_have_tRNS != MagickFalse) { /* * Determine if there is one and only one transparent color * and if so if it is fully transparent. */ if (ping_have_cheap_transparency == MagickFalse) ping_have_tRNS=MagickFalse; } if (ping_have_tRNS != MagickFalse) { if (mng_info->write_png_colortype == 0) ping_color_type &= 0x03; /* changes 4 or 6 to 0 or 2 */ if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } else { if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } matte=image_matte; if (ping_have_tRNS != MagickFalse) image_matte=MagickFalse; if ((mng_info->IsPalette) && mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE && ping_have_color == MagickFalse && (image_matte == MagickFalse || image_depth >= 8)) { size_t one=1; if (image_matte != MagickFalse) ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; else if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_GRAY_ALPHA) { ping_color_type=PNG_COLOR_TYPE_GRAY; if (save_image_depth == 16 && image_depth == 8) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (0)"); } ping_trans_color.gray*=0x0101; } } if (image_depth > MAGICKCORE_QUANTUM_DEPTH) image_depth=MAGICKCORE_QUANTUM_DEPTH; if ((image_colors == 0) || ((ssize_t) (image_colors-1) > (ssize_t) MaxColormapSize)) image_colors=(int) (one << image_depth); if (image_depth > 8) ping_bit_depth=16; else { ping_bit_depth=8; if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { if(!mng_info->write_png_depth) { ping_bit_depth=1; while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY && image_colors < 17 && mng_info->IsPalette) { /* Check if grayscale is reducible */ int depth_4_ok=MagickTrue, depth_2_ok=MagickTrue, depth_1_ok=MagickTrue; for (i=0; i < (ssize_t) image_colors; i++) { unsigned char intensity; intensity=ScaleQuantumToChar(image->colormap[i].red); if ((intensity & 0x0f) != ((intensity & 0xf0) >> 4)) depth_4_ok=depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x03) != ((intensity & 0x0c) >> 2)) depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x01) != ((intensity & 0x02) >> 1)) depth_1_ok=MagickFalse; } if (depth_1_ok && mng_info->write_png_depth <= 1) ping_bit_depth=1; else if (depth_2_ok && mng_info->write_png_depth <= 2) ping_bit_depth=2; else if (depth_4_ok && mng_info->write_png_depth <= 4) ping_bit_depth=4; } } image_depth=ping_bit_depth; } else if (mng_info->IsPalette) { number_colors=image_colors; if (image_depth <= 8) { /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (!(mng_info->have_write_global_plte && matte == MagickFalse)) { for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors", number_colors); ping_have_PLTE=MagickTrue; } /* color_type is PNG_COLOR_TYPE_PALETTE */ if (mng_info->write_png_depth == 0) { size_t one; ping_bit_depth=1; one=1; while ((one << ping_bit_depth) < (size_t) number_colors) ping_bit_depth <<= 1; } ping_num_trans=0; if (matte != MagickFalse) { /* * Set up trans_colors array. */ assert(number_colors <= 256); ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (1)"); } ping_have_tRNS=MagickTrue; for (i=0; i < ping_num_trans; i++) { ping_trans_alpha[i]= (png_byte) (255- ScaleQuantumToChar(image->colormap[i].opacity)); } } } } } else { if (image_depth < 8) image_depth=8; if ((save_image_depth == 16) && (image_depth == 8)) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color from (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } ping_trans_color.red*=0x0101; ping_trans_color.green*=0x0101; ping_trans_color.blue*=0x0101; ping_trans_color.gray*=0x0101; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } if (ping_bit_depth < (ssize_t) mng_info->write_png_depth) ping_bit_depth = (ssize_t) mng_info->write_png_depth; /* Adjust background and transparency samples in sub-8-bit grayscale files. */ if (ping_bit_depth < 8 && ping_color_type == PNG_COLOR_TYPE_GRAY) { png_uint_16 maxval; size_t one=1; maxval=(png_uint_16) ((one << ping_bit_depth)-1); if (ping_exclude_bKGD == MagickFalse) { ping_background.gray=(png_uint_16) ((maxval/65535.)*(ScaleQuantumToShort((Quantum) GetPixelLuma(image,&image->background_color)))+.5); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (2)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.gray is %d", (int) ping_background.gray); } ping_have_bKGD = MagickTrue; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color.gray from %d", (int)ping_trans_color.gray); ping_trans_color.gray=(png_uint_16) ((maxval/255.)*( ping_trans_color.gray)+.5); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to %d", (int)ping_trans_color.gray); } if (ping_exclude_bKGD == MagickFalse) { if (mng_info->IsPalette && (int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { /* Identify which colormap entry is the background color. */ number_colors=image_colors; for (i=0; i < (ssize_t) MagickMax(1L*number_colors,1L); i++) if (IsPNGColorEqual(image->background_color,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk with index=%d",(int) i); } if (i < (ssize_t) number_colors) { ping_have_bKGD = MagickTrue; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background =(%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); } } else /* Can't happen */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in PLTE to add bKGD color"); ping_have_bKGD = MagickFalse; } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color type: %s (%d)", PngColorTypeToString(ping_color_type), ping_color_type); /* Initialize compression level and filtering. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up deflate compression"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression buffer size: 32768"); } png_set_compression_buffer_size(ping,32768L); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression mem level: 9"); png_set_compression_mem_level(ping, 9); /* Untangle the "-quality" setting: Undefined is 0; the default is used. Default is 75 10's digit: 0 or omitted: Use Z_HUFFMAN_ONLY strategy with the zlib default compression level 1-9: the zlib compression level 1's digit: 0-4: the PNG filter method 5: libpng adaptive filtering if compression level > 5 libpng filter type "none" if compression level <= 5 or if image is grayscale or palette 6: libpng adaptive filtering 7: "LOCO" filtering (intrapixel differing) if writing a MNG, otherwise "none". Did not work in IM-6.7.0-9 and earlier because of a missing "else". 8: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), adaptive filtering. Unused prior to IM-6.7.0-10, was same as 6 9: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), no PNG filters Unused prior to IM-6.7.0-10, was same as 6 Note that using the -quality option, not all combinations of PNG filter type, zlib compression level, and zlib compression strategy are possible. This is addressed by using "-define png:compression-strategy", etc., which takes precedence over -quality. */ quality=image_info->quality == UndefinedCompressionQuality ? 75UL : image_info->quality; if (quality <= 9) { if (mng_info->write_png_compression_strategy == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; } else if (mng_info->write_png_compression_level == 0) { int level; level=(int) MagickMin((ssize_t) quality/10,9); mng_info->write_png_compression_level = level+1; } if (mng_info->write_png_compression_strategy == 0) { if ((quality %10) == 8 || (quality %10) == 9) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy=Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif } if (mng_info->write_png_compression_filter == 0) mng_info->write_png_compression_filter=((int) quality % 10) + 1; if (logging != MagickFalse) { if (mng_info->write_png_compression_level) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression level: %d", (int) mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_strategy) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression strategy: %d", (int) mng_info->write_png_compression_strategy-1); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up filtering"); if (mng_info->write_png_compression_filter == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: ADAPTIVE"); else if (mng_info->write_png_compression_filter == 0 || mng_info->write_png_compression_filter == 1) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: NONE"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: %d", (int) mng_info->write_png_compression_filter-1); } if (mng_info->write_png_compression_level != 0) png_set_compression_level(ping,mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_filter == 6) { if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || (quality < 50)) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); } else if (mng_info->write_png_compression_filter == 7 || mng_info->write_png_compression_filter == 10) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); else if (mng_info->write_png_compression_filter == 8) { #if defined(PNG_MNG_FEATURES_SUPPORTED) && defined(PNG_INTRAPIXEL_DIFFERENCING) if (mng_info->write_mng) { if (((int) ping_color_type == PNG_COLOR_TYPE_RGB) || ((int) ping_color_type == PNG_COLOR_TYPE_RGBA)) ping_filter_method=PNG_INTRAPIXEL_DIFFERENCING; } #endif png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); } else if (mng_info->write_png_compression_filter == 9) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else if (mng_info->write_png_compression_filter != 0) png_set_filter(ping,PNG_FILTER_TYPE_BASE, mng_info->write_png_compression_filter-1); if (mng_info->write_png_compression_strategy != 0) png_set_compression_strategy(ping, mng_info->write_png_compression_strategy-1); ping_interlace_method=image_info->interlace != NoInterlace; if (mng_info->write_mng) png_set_sig_bytes(ping,8); /* Bail out if cannot meet defined png:bit-depth or png:color-type */ if (mng_info->write_png_colortype != 0) { if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY) if (ping_have_color != MagickFalse) { ping_color_type = PNG_COLOR_TYPE_RGB; if (ping_bit_depth < 8) ping_bit_depth=8; } if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY_ALPHA) if (ping_have_color != MagickFalse) ping_color_type = PNG_COLOR_TYPE_RGB_ALPHA; } if (ping_need_colortype_warning != MagickFalse || ((mng_info->write_png_depth && (int) mng_info->write_png_depth != ping_bit_depth) || (mng_info->write_png_colortype && ((int) mng_info->write_png_colortype-1 != ping_color_type && mng_info->write_png_colortype != 7 && !(mng_info->write_png_colortype == 5 && ping_color_type == 0))))) { if (logging != MagickFalse) { if (ping_need_colortype_warning != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image has transparency but tRNS chunk was excluded"); } if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth=%u, Computed depth=%u", mng_info->write_png_depth, ping_bit_depth); } if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type=%u, Computed color type=%u", mng_info->write_png_colortype-1, ping_color_type); } } png_warning(ping, "Cannot write image with defined png:bit-depth or png:color-type."); } if (image_matte != MagickFalse && image->matte == MagickFalse) { /* Add an opaque matte channel */ image->matte = MagickTrue; (void) SetImageOpacity(image,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Added an opaque matte channel"); } if (number_transparent != 0 || number_semitransparent != 0) { if (ping_color_type < 4) { ping_have_tRNS=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting ping_have_tRNS=MagickTrue."); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG header chunks"); png_set_IHDR(ping,ping_info,ping_width,ping_height, ping_bit_depth,ping_color_type, ping_interlace_method,ping_compression_method, ping_filter_method); if (ping_color_type == 3 && ping_have_PLTE != MagickFalse) { if (mng_info->have_write_global_plte && matte == MagickFalse) { png_set_PLTE(ping,ping_info,NULL,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up empty PLTE chunk"); } else png_set_PLTE(ping,ping_info,palette,number_colors); if (logging != MagickFalse) { for (i=0; i< (ssize_t) number_colors; i++) { if (i < ping_num_trans) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d), tRNS[%d] = (%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue, (int) i, (int) ping_trans_alpha[i]); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue); } } } /* Only write the iCCP chunk if we are not writing the sRGB chunk. */ if (ping_exclude_sRGB != MagickFalse || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if ((ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) && (ping_exclude_iCCP == MagickFalse || ping_exclude_zCCP == MagickFalse)) { ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { #ifdef PNG_WRITE_iCCP_SUPPORTED if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { if (ping_exclude_iCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up iCCP chunk"); png_set_iCCP(ping,ping_info,(const png_charp) name,0, #if (PNG_LIBPNG_VER < 10500) (png_charp) GetStringInfoDatum(profile), #else (const png_byte *) GetStringInfoDatum(profile), #endif (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } else #endif if (ping_exclude_zCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up zTXT chunk with uuencoded ICC"); Magick_png_write_raw_profile(image_info,ping,ping_info, (unsigned char *) name,(unsigned char *) name, GetStringInfoDatum(profile), (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk with %s profile",name); name=GetNextImageProfile(image); } } } #if defined(PNG_WRITE_sRGB_SUPPORTED) if ((mng_info->have_write_global_srgb == 0) && ping_have_iCCP != MagickTrue && (ping_have_sRGB != MagickFalse || png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if (ping_exclude_sRGB == MagickFalse) { /* Note image rendering intent. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up sRGB chunk"); (void) png_set_sRGB(ping,ping_info,( Magick_RenderingIntent_to_PNG_RenderingIntent( image->rendering_intent))); ping_have_sRGB = MagickTrue; } } if ((!mng_info->write_mng) || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) #endif { if (ping_exclude_gAMA == MagickFalse && ping_have_iCCP == MagickFalse && ping_have_sRGB == MagickFalse && (ping_exclude_sRGB == MagickFalse || (image->gamma < .45 || image->gamma > .46))) { if ((mng_info->have_write_global_gama == 0) && (image->gamma != 0.0)) { /* Note image gamma. To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up gAMA chunk"); png_set_gAMA(ping,ping_info,image->gamma); } } if (ping_exclude_cHRM == MagickFalse && ping_have_sRGB == MagickFalse) { if ((mng_info->have_write_global_chrm == 0) && (image->chromaticity.red_primary.x != 0.0)) { /* Note image chromaticity. Note: if cHRM+gAMA == sRGB write sRGB instead. */ PrimaryInfo bp, gp, rp, wp; wp=image->chromaticity.white_point; rp=image->chromaticity.red_primary; gp=image->chromaticity.green_primary; bp=image->chromaticity.blue_primary; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up cHRM chunk"); png_set_cHRM(ping,ping_info,wp.x,wp.y,rp.x,rp.y,gp.x,gp.y, bp.x,bp.y); } } } if (ping_exclude_bKGD == MagickFalse) { if (ping_have_bKGD != MagickFalse) { png_set_bKGD(ping,ping_info,&ping_background); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background color = (%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " index = %d, gray=%d", (int) ping_background.index, (int) ping_background.gray); } } } if (ping_exclude_pHYs == MagickFalse) { if (ping_have_pHYs != MagickFalse) { png_set_pHYs(ping,ping_info, ping_pHYs_x_resolution, ping_pHYs_y_resolution, ping_pHYs_unit_type); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_resolution=%lu", (unsigned long) ping_pHYs_x_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " y_resolution=%lu", (unsigned long) ping_pHYs_y_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " unit_type=%lu", (unsigned long) ping_pHYs_unit_type); } } } #if defined(PNG_tIME_SUPPORTED) if (ping_exclude_tIME == MagickFalse) { const char *timestamp; if (image->taint == MagickFalse) { timestamp=GetImageOption(image_info,"png:tIME"); if (timestamp == (const char *) NULL) timestamp=GetImageProperty(image,"png:tIME"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reset tIME in tainted image"); timestamp=GetImageProperty(image,"date:modify"); } if (timestamp != (const char *) NULL) write_tIME_chunk(image,ping,ping_info,timestamp); } #endif if (mng_info->need_blob != MagickFalse) { if (OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception) == MagickFalse) png_error(ping,"WriteBlob Failed"); ping_have_blob=MagickTrue; (void) ping_have_blob; } png_write_info_before_PLTE(ping, ping_info); if (ping_have_tRNS != MagickFalse && ping_color_type < 4) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Calling png_set_tRNS with num_trans=%d",ping_num_trans); } if (ping_color_type == 3) (void) png_set_tRNS(ping, ping_info, ping_trans_alpha, ping_num_trans, NULL); else { (void) png_set_tRNS(ping, ping_info, NULL, 0, &ping_trans_color); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS color =(%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } /* write any png-chunk-b profiles */ (void) Magick_png_write_chunk_from_profile(image,"PNG-chunk-b",logging); png_write_info(ping,ping_info); /* write any PNG-chunk-m profiles */ (void) Magick_png_write_chunk_from_profile(image,"PNG-chunk-m",logging); ping_wrote_caNv = MagickFalse; /* write caNv chunk */ if (ping_exclude_caNv == MagickFalse) { if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || image->page.x != 0 || image->page.y != 0) { unsigned char chunk[20]; (void) WriteBlobMSBULong(image,16L); /* data length=8 */ PNGType(chunk,mng_caNv); LogPNGChunk(logging,mng_caNv,16L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); PNGsLong(chunk+12,(png_int_32) image->page.x); PNGsLong(chunk+16,(png_int_32) image->page.y); (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); ping_wrote_caNv = MagickTrue; } } #if defined(PNG_oFFs_SUPPORTED) if (ping_exclude_oFFs == MagickFalse && ping_wrote_caNv == MagickFalse) { if (image->page.x || image->page.y) { png_set_oFFs(ping,ping_info,(png_int_32) image->page.x, (png_int_32) image->page.y, 0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up oFFs chunk with x=%d, y=%d, units=0", (int) image->page.x, (int) image->page.y); } } #endif /* write vpAg chunk (deprecated, replaced by caNv) */ if (ping_exclude_vpAg == MagickFalse && ping_wrote_caNv == MagickFalse) { if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows)) { unsigned char chunk[14]; (void) WriteBlobMSBULong(image,9L); /* data length=8 */ PNGType(chunk,mng_vpAg); LogPNGChunk(logging,mng_vpAg,9L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); chunk[12]=0; /* unit = pixels */ (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } } #if (PNG_LIBPNG_VER == 10206) /* avoid libpng-1.2.6 bug by setting PNG_HAVE_IDAT flag */ #define PNG_HAVE_IDAT 0x04 ping->mode |= PNG_HAVE_IDAT; #undef PNG_HAVE_IDAT #endif png_set_packing(ping); /* Allocate memory. */ rowbytes=image->columns; if (image_depth > 8) rowbytes*=2; switch (ping_color_type) { case PNG_COLOR_TYPE_RGB: rowbytes*=3; break; case PNG_COLOR_TYPE_GRAY_ALPHA: rowbytes*=2; break; case PNG_COLOR_TYPE_RGBA: rowbytes*=4; break; default: break; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocating %.20g bytes of memory for pixels",(double) rowbytes); } pixel_info=AcquireVirtualMemory(rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Allocation of memory for pixels failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); /* Initialize image scanlines. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Memory allocation for quantum_info failed"); quantum_info->format=UndefinedQuantumFormat; SetQuantumDepth(image,quantum_info,image_depth); (void) SetQuantumEndian(image,quantum_info,MSBEndian); num_passes=png_set_interlace_handling(ping); if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (mng_info->IsPalette || (image_info->type == BilevelType)) && image_matte == MagickFalse && ping_have_non_bw == MagickFalse) { /* Palette, Bilevel, or Opaque Monochrome */ register const PixelPacket *p; SetQuantumDepth(image,quantum_info,8); for (pass=0; pass < num_passes; pass++) { /* Convert PseudoClass image to a PNG monochrome image. */ for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (0)"); p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (mng_info->IsPalette) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_PALETTE && mng_info->write_png_depth && mng_info->write_png_depth != old_bit_depth) { /* Undo pixel scaling */ for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) (*(ping_pixels+i) >> (8-old_bit_depth)); } } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); } if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE) for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) ((*(ping_pixels+i) > 127) ? 255 : 0); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (1)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else /* Not Palette, Bilevel, or Opaque Monochrome */ { if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (image_matte != MagickFalse || (ping_bit_depth >= MAGICKCORE_QUANTUM_DEPTH)) && (mng_info->IsPalette) && ping_have_color == MagickFalse) { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (mng_info->IsPalette) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY PNG pixels (2)"); } else /* PNG_COLOR_TYPE_GRAY_ALPHA */ { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (2)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels,&image->exception); } if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (2)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { if ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->storage_class == DirectClass) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (3)"); } else if (image_matte != MagickFalse) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBAQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (3)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } else /* not ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) */ { if ((ping_color_type != PNG_COLOR_TYPE_GRAY) && (ping_color_type != PNG_COLOR_TYPE_GRAY_ALPHA)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is not GRAY or GRAY_ALPHA",pass); SetQuantumDepth(image,quantum_info,8); image_depth=8; } for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is RGB, 16-bit GRAY, or GRAY_ALPHA",pass); p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { SetQuantumDepth(image,quantum_info,image->depth); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (4)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,IndexQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y <= 2) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of non-gray pixels (4)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_pixels[0]=%d,ping_pixels[1]=%d", (int)ping_pixels[0],(int)ping_pixels[1]); } } png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } } } if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Wrote PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Width: %.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Height: %.20g",(double) ping_height); if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth: %d",mng_info->write_png_depth); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG bit-depth written: %d",ping_bit_depth); if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type: %d",mng_info->write_png_colortype-1); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color-type written: %d",ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG Interlace method: %d",ping_interlace_method); } /* Generate text chunks after IDAT. */ if (ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) { ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { png_textp text; value=GetImageProperty(image,property); /* Don't write any "png:" or "jpeg:" properties; those are just for * "identify" or for passing through to another JPEG */ if ((LocaleNCompare(property,"png:",4) != 0 && LocaleNCompare(property,"jpeg:",5) != 0) && /* Suppress density and units if we wrote a pHYs chunk */ (ping_exclude_pHYs != MagickFalse || LocaleCompare(property,"density") != 0 || LocaleCompare(property,"units") != 0) && /* Suppress the IM-generated Date:create and Date:modify */ (ping_exclude_date == MagickFalse || LocaleNCompare(property, "Date:",5) != 0)) { if (value != (const char *) NULL) { #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping, (png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif text[0].key=(char *) property; text[0].text=(char *) value; text[0].text_length=strlen(value); if (ping_exclude_tEXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_zTXt; else if (ping_exclude_zTXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_NONE; else { text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? PNG_TEXT_COMPRESSION_NONE : PNG_TEXT_COMPRESSION_zTXt ; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " keyword: '%s'",text[0].key); } png_set_text(ping,ping_info,text,1); png_free(ping,text); } } property=GetNextImageProperty(image); } } /* write any PNG-chunk-e profiles */ (void) Magick_png_write_chunk_from_profile(image,"PNG-chunk-e",logging); /* write exIf profile */ if (ping_have_eXIf != MagickFalse && ping_exclude_eXIf == MagickFalse) { char *name; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { if (LocaleCompare(name,"exif") == 0) { const StringInfo *profile; profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { png_uint_32 length; unsigned char chunk[4], *data; StringInfo *ping_profile; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Have eXIf profile"); ping_profile=CloneStringInfo(profile); data=GetStringInfoDatum(ping_profile), length=(png_uint_32) GetStringInfoLength(ping_profile); #if 0 /* eXIf chunk is registered */ PNGType(chunk,mng_eXIf); #else /* eXIf chunk not yet registered; write exIf instead */ PNGType(chunk,mng_exIf); #endif if (length < 7) break; /* othewise crashes */ /* skip the "Exif\0\0" JFIF Exif Header ID */ length -= 6; LogPNGChunk(logging,chunk,length); (void) WriteBlobMSBULong(image,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,data+6); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4), data+6, (uInt) length)); break; } } name=GetNextImageProfile(image); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG end info"); png_write_end(ping,ping_info); if (mng_info->need_fram && (int) image->dispose == BackgroundDispose) { if (mng_info->page.x || mng_info->page.y || (ping_width != mng_info->page.width) || (ping_height != mng_info->page.height)) { unsigned char chunk[32]; /* Write FRAM 4 with clipping boundaries followed by FRAM 1. */ (void) WriteBlobMSBULong(image,27L); /* data length=27 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,27L); chunk[4]=4; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=1; /* flag for changing delay, for next frame only */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=1; /* flag for changing frame clipping for next frame */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) (0L)); /* temporary 0 delay */ chunk[14]=0; /* clipping boundaries delta type */ PNGLong(chunk+15,(png_uint_32) (mng_info->page.x)); /* left cb */ PNGLong(chunk+19, (png_uint_32) (mng_info->page.x + ping_width)); PNGLong(chunk+23,(png_uint_32) (mng_info->page.y)); /* top cb */ PNGLong(chunk+27, (png_uint_32) (mng_info->page.y + ping_height)); (void) WriteBlob(image,31,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,31)); mng_info->old_framing_mode=4; mng_info->framing_mode=1; } else mng_info->framing_mode=3; } if (mng_info->write_mng && !mng_info->need_fram && ((int) image->dispose == 3)) png_error(ping, "Cannot convert GIF with disposal method 3 to MNG-LC"); /* Free PNG resources. */ png_destroy_write_struct(&ping,&ping_info); pixel_info=RelinquishVirtualMemory(pixel_info); /* Store bit depth actually written */ s[0]=(char) ping_bit_depth; s[1]='\0'; (void) SetImageProperty(image,"png:bit-depth-written",s); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block. Revert to * Throwing an Exception when an error occurs. */ return(MagickTrue); /* End write one PNG image */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePNGImage() writes a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WritePNGImage method is: % % MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % Returns MagickTrue on success, MagickFalse on failure. % % Communicating with the PNG encoder: % % While the datastream written is always in PNG format and normally would % be given the "png" file extension, this method also writes the following % pseudo-formats which are subsets of png: % % o PNG8: An 8-bit indexed PNG datastream is written. If the image has % a depth greater than 8, the depth is reduced. If transparency % is present, the tRNS chunk must only have values 0 and 255 % (i.e., transparency is binary: fully opaque or fully % transparent). If other values are present they will be % 50%-thresholded to binary transparency. If more than 256 % colors are present, they will be quantized to the 4-4-4-1, % 3-3-3-1, or 3-3-2-1 palette. The underlying RGB color % of any resulting fully-transparent pixels is changed to % the image's background color. % % If you want better quantization or dithering of the colors % or alpha than that, you need to do it before calling the % PNG encoder. The pixels contain 8-bit indices even if % they could be represented with 1, 2, or 4 bits. Grayscale % images will be written as indexed PNG files even though the % PNG grayscale type might be slightly more efficient. Please % note that writing to the PNG8 format may result in loss % of color and alpha data. % % o PNG24: An 8-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. The only loss incurred % is reduction of sample depth to 8. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG32: An 8-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 255. The alpha % channel is present even if the image is fully opaque. % The only loss in data is the reduction of the sample depth % to 8. % % o PNG48: A 16-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG64: A 16-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 65535. The alpha % channel is present even if the image is fully opaque. % % o PNG00: A PNG that inherits its colortype and bit-depth from the input % image, if the input was a PNG, is written. If these values % cannot be found, or if the pixels have been changed in a way % that makes this impossible, then "PNG00" falls back to the % regular "PNG" format. % % o -define: For more precise control of the PNG output, you can use the % Image options "png:bit-depth" and "png:color-type". These % can be set from the commandline with "-define" and also % from the application programming interfaces. The options % are case-independent and are converted to lowercase before % being passed to this encoder. % % png:color-type can be 0, 2, 3, 4, or 6. % % When png:color-type is 0 (Grayscale), png:bit-depth can % be 1, 2, 4, 8, or 16. % % When png:color-type is 2 (RGB), png:bit-depth can % be 8 or 16. % % When png:color-type is 3 (Indexed), png:bit-depth can % be 1, 2, 4, or 8. This refers to the number of bits % used to store the index. The color samples always have % bit-depth 8 in indexed PNG files. % % When png:color-type is 4 (Gray-Matte) or 6 (RGB-Matte), % png:bit-depth can be 8 or 16. % % If the image cannot be written without loss with the % requested bit-depth and color-type, a PNG file will not % be written, a warning will be issued, and the encoder will % return MagickFalse. % % Since image encoders should not be responsible for the "heavy lifting", % the user should make sure that ImageMagick has already reduced the % image depth and number of colors and limit transparency to binary % transparency prior to attempting to write the image with depth, color, % or transparency limitations. % % To do: Enforce the previous paragraph. % % Note that another definition, "png:bit-depth-written" exists, but it % is not intended for external use. It is only used internally by the % PNG encoder to inform the JNG encoder of the depth of the alpha channel. % % It is possible to request that the PNG encoder write previously-formatted % ancillary chunks in the output PNG file, using the "-profile" commandline % option as shown below or by setting the profile via a programming % interface: % % -profile PNG-chunk-x:<file> % % where x is a location flag and <file> is a file containing the chunk % name in the first 4 bytes, then a colon (":"), followed by the chunk data. % This encoder will compute the chunk length and CRC, so those must not % be included in the file. % % "x" can be "b" (before PLTE), "m" (middle, i.e., between PLTE and IDAT), % or "e" (end, i.e., after IDAT). If you want to write multiple chunks % of the same type, then add a short unique string after the "x" to prevent % subsequent profiles from overwriting the preceding ones, e.g., % % -profile PNG-chunk-b01:file01 -profile PNG-chunk-b02:file02 % % As of version 6.6.6 the following optimizations are always done: % % o 32-bit depth is reduced to 16. % o 16-bit depth is reduced to 8 if all pixels contain samples whose % high byte and low byte are identical. % o Palette is sorted to remove unused entries and to put a % transparent color first, if BUILD_PNG_PALETTE is defined. % o Opaque matte channel is removed when writing an indexed PNG. % o Grayscale images are reduced to 1, 2, or 4 bit depth if % this can be done without loss and a larger bit depth N was not % requested via the "-define png:bit-depth=N" option. % o If matte channel is present but only one transparent color is % present, RGB+tRNS is written instead of RGBA % o Opaque matte channel is removed (or added, if color-type 4 or 6 % was requested when converting an opaque image). % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType excluding, logging, status; MngInfo *mng_info; const char *value; int source; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WritePNGImage()"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; mng_info->equal_backgrounds=MagickTrue; /* See if user has requested a specific PNG subformat */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; mng_info->write_png48=LocaleCompare(image_info->magick,"PNG48") == 0; mng_info->write_png64=LocaleCompare(image_info->magick,"PNG64") == 0; value=GetImageOption(image_info,"png:format"); if (value != (char *) NULL || LocaleCompare(image_info->magick,"PNG00") == 0) { mng_info->write_png8 = MagickFalse; mng_info->write_png24 = MagickFalse; mng_info->write_png32 = MagickFalse; mng_info->write_png48 = MagickFalse; mng_info->write_png64 = MagickFalse; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format=%s",value); if (LocaleCompare(value,"png8") == 0) mng_info->write_png8 = MagickTrue; else if (LocaleCompare(value,"png24") == 0) mng_info->write_png24 = MagickTrue; else if (LocaleCompare(value,"png32") == 0) mng_info->write_png32 = MagickTrue; else if (LocaleCompare(value,"png48") == 0) mng_info->write_png48 = MagickTrue; else if (LocaleCompare(value,"png64") == 0) mng_info->write_png64 = MagickTrue; else if ((LocaleCompare(value,"png00") == 0) || LocaleCompare(image_info->magick,"PNG00") == 0) { /* Retrieve png:IHDR.bit-depth-orig and png:IHDR.color-type-orig */ value=GetImageProperty(image,"png:IHDR.bit-depth-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited bit depth=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; } value=GetImageProperty(image,"png:IHDR.color-type-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited color type=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; } } } if (mng_info->write_png8) { mng_info->write_png_colortype = /* 3 */ 4; mng_info->write_png_depth = 8; image->depth = 8; } if (mng_info->write_png24) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 8; image->depth = 8; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png32) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 8; image->depth = 8; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } if (mng_info->write_png48) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 16; image->depth = 16; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png64) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 16; image->depth = 16; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } value=GetImageOption(image_info,"png:bit-depth"); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:bit-depth", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:bit-depth=%d was defined.\n",mng_info->write_png_depth); } value=GetImageOption(image_info,"png:color-type"); if (value != (char *) NULL) { /* We must store colortype+1 because 0 is a valid colortype */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:color-type", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:color-type=%d was defined.\n",mng_info->write_png_colortype-1); } /* Check for chunks to be excluded: * * The default is to not exclude any known chunks except for any * listed in the "unused_chunks" array, above. * * Chunks can be listed for exclusion via a "png:exclude-chunk" * define (in the image properties or in the image artifacts) * or via a mng_info member. For convenience, in addition * to or instead of a comma-separated list of chunks, the * "exclude-chunk" string can be simply "all" or "none". * * The exclude-chunk define takes priority over the mng_info. * * A "png:include-chunk" define takes priority over both the * mng_info and the "png:exclude-chunk" define. Like the * "exclude-chunk" string, it can define "all" or "none" as * well as a comma-separated list. Chunks that are unknown to * ImageMagick are always excluded, regardless of their "copy-safe" * status according to the PNG specification, and even if they * appear in the "include-chunk" list. Such defines appearing among * the image options take priority over those found among the image * artifacts. * * Finally, all chunks listed in the "unused_chunks" array are * automatically excluded, regardless of the other instructions * or lack thereof. * * if you exclude sRGB but not gAMA (recommended), then sRGB chunk * will not be written and the gAMA chunk will only be written if it * is not between .45 and .46, or approximately (1.0/2.2). * * If you exclude tRNS and the image has transparency, the colortype * is forced to be 4 or 6 (GRAY_ALPHA or RGB_ALPHA). * * The -strip option causes StripImage() to set the png:include-chunk * artifact to "none,trns,gama". */ mng_info->ping_exclude_bKGD=MagickFalse; mng_info->ping_exclude_caNv=MagickFalse; mng_info->ping_exclude_cHRM=MagickFalse; mng_info->ping_exclude_date=MagickFalse; mng_info->ping_exclude_eXIf=MagickFalse; mng_info->ping_exclude_EXIF=MagickFalse; /* hex-encoded EXIF in zTXt */ mng_info->ping_exclude_gAMA=MagickFalse; mng_info->ping_exclude_iCCP=MagickFalse; /* mng_info->ping_exclude_iTXt=MagickFalse; */ mng_info->ping_exclude_oFFs=MagickFalse; mng_info->ping_exclude_pHYs=MagickFalse; mng_info->ping_exclude_sRGB=MagickFalse; mng_info->ping_exclude_tEXt=MagickFalse; mng_info->ping_exclude_tIME=MagickFalse; mng_info->ping_exclude_tRNS=MagickFalse; mng_info->ping_exclude_vpAg=MagickFalse; mng_info->ping_exclude_zCCP=MagickFalse; /* hex-encoded iCCP in zTXt */ mng_info->ping_exclude_zTXt=MagickFalse; mng_info->ping_preserve_colormap=MagickFalse; value=GetImageOption(image_info,"png:preserve-colormap"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-colormap"); if (value != NULL) mng_info->ping_preserve_colormap=MagickTrue; mng_info->ping_preserve_iCCP=MagickFalse; value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) mng_info->ping_preserve_iCCP=MagickTrue; /* These compression-level, compression-strategy, and compression-filter * defines take precedence over values from the -quality option. */ value=GetImageOption(image_info,"png:compression-level"); if (value == NULL) value=GetImageArtifact(image,"png:compression-level"); if (value != NULL) { /* To do: use a "LocaleInteger:()" function here. */ /* We have to add 1 to everything because 0 is a valid input, * and we want to use 0 (the default) to mean undefined. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_level = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_level = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_level = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_level = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_level = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_level = 6; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_compression_level = 7; else if (LocaleCompare(value,"7") == 0) mng_info->write_png_compression_level = 8; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_compression_level = 9; else if (LocaleCompare(value,"9") == 0) mng_info->write_png_compression_level = 10; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-level", "=%s",value); } value=GetImageOption(image_info,"png:compression-strategy"); if (value == NULL) value=GetImageArtifact(image,"png:compression-strategy"); if (value != NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_strategy = Z_FILTERED+1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; else if (LocaleCompare(value,"3") == 0) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy = Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else if (LocaleCompare(value,"4") == 0) #ifdef Z_FIXED /* Z_FIXED was added to zlib-1.2.2.2 */ mng_info->write_png_compression_strategy = Z_FIXED+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-strategy", "=%s",value); } value=GetImageOption(image_info,"png:compression-filter"); if (value == NULL) value=GetImageArtifact(image,"png:compression-filter"); if (value != NULL) { /* To do: combinations of filters allowed by libpng * masks 0x08 through 0xf8 * * Implement this as a comma-separated list of 0,1,2,3,4,5 * where 5 is a special case meaning PNG_ALL_FILTERS. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_filter = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_filter = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_filter = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_filter = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_filter = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_filter = 6; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-filter", "=%s",value); } for (source=0; source<8; source++) { value = NULL; if (source == 0) value=GetImageOption(image_info,"png:exclude-chunks"); if (source == 1) value=GetImageArtifact(image,"png:exclude-chunks"); if (source == 2) value=GetImageOption(image_info,"png:exclude-chunk"); if (source == 3) value=GetImageArtifact(image,"png:exclude-chunk"); if (source == 4) value=GetImageOption(image_info,"png:include-chunks"); if (source == 5) value=GetImageArtifact(image,"png:include-chunks"); if (source == 6) value=GetImageOption(image_info,"png:include-chunk"); if (source == 7) value=GetImageArtifact(image,"png:include-chunk"); if (value == NULL) continue; if (source < 4) excluding = MagickTrue; else excluding = MagickFalse; if (logging != MagickFalse) { if (source == 0 || source == 2) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image options.\n", value); else if (source == 1 || source == 3) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image artifacts.\n", value); else if (source == 4 || source == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image options.\n", value); else /* if (source == 5 || source == 7) */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image artifacts.\n", value); } if (IsOptionMember("all",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding; mng_info->ping_exclude_caNv=excluding; mng_info->ping_exclude_cHRM=excluding; mng_info->ping_exclude_date=excluding; mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; mng_info->ping_exclude_gAMA=excluding; mng_info->ping_exclude_iCCP=excluding; /* mng_info->ping_exclude_iTXt=excluding; */ mng_info->ping_exclude_oFFs=excluding; mng_info->ping_exclude_pHYs=excluding; mng_info->ping_exclude_sRGB=excluding; mng_info->ping_exclude_tIME=excluding; mng_info->ping_exclude_tEXt=excluding; mng_info->ping_exclude_tRNS=excluding; mng_info->ping_exclude_vpAg=excluding; mng_info->ping_exclude_zCCP=excluding; mng_info->ping_exclude_zTXt=excluding; } if (IsOptionMember("none",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_caNv=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_cHRM=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_date=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_eXIf=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_EXIF=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_gAMA=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_iCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; /* mng_info->ping_exclude_iTXt=!excluding; */ mng_info->ping_exclude_oFFs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_pHYs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_sRGB=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tEXt=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tIME=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tRNS=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_vpAg=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zTXt=excluding != MagickFalse ? MagickFalse : MagickTrue; } if (IsOptionMember("bkgd",value) != MagickFalse) mng_info->ping_exclude_bKGD=excluding; if (IsOptionMember("caNv",value) != MagickFalse) mng_info->ping_exclude_caNv=excluding; if (IsOptionMember("chrm",value) != MagickFalse) mng_info->ping_exclude_cHRM=excluding; if (IsOptionMember("date",value) != MagickFalse) mng_info->ping_exclude_date=excluding; if (IsOptionMember("exif",value) != MagickFalse) { mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; } if (IsOptionMember("gama",value) != MagickFalse) mng_info->ping_exclude_gAMA=excluding; if (IsOptionMember("iccp",value) != MagickFalse) mng_info->ping_exclude_iCCP=excluding; #if 0 if (IsOptionMember("itxt",value) != MagickFalse) mng_info->ping_exclude_iTXt=excluding; #endif if (IsOptionMember("offs",value) != MagickFalse) mng_info->ping_exclude_oFFs=excluding; if (IsOptionMember("phys",value) != MagickFalse) mng_info->ping_exclude_pHYs=excluding; if (IsOptionMember("srgb",value) != MagickFalse) mng_info->ping_exclude_sRGB=excluding; if (IsOptionMember("text",value) != MagickFalse) mng_info->ping_exclude_tEXt=excluding; if (IsOptionMember("time",value) != MagickFalse) mng_info->ping_exclude_tIME=excluding; if (IsOptionMember("trns",value) != MagickFalse) mng_info->ping_exclude_tRNS=excluding; if (IsOptionMember("vpag",value) != MagickFalse) mng_info->ping_exclude_vpAg=excluding; if (IsOptionMember("zccp",value) != MagickFalse) mng_info->ping_exclude_zCCP=excluding; if (IsOptionMember("ztxt",value) != MagickFalse) mng_info->ping_exclude_zTXt=excluding; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Chunks to be excluded from the output png:"); if (mng_info->ping_exclude_bKGD != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " bKGD"); if (mng_info->ping_exclude_caNv != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " caNv"); if (mng_info->ping_exclude_cHRM != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " cHRM"); if (mng_info->ping_exclude_date != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " date"); if (mng_info->ping_exclude_EXIF != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " EXIF"); if (mng_info->ping_exclude_eXIf != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " eXIf"); if (mng_info->ping_exclude_gAMA != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " gAMA"); if (mng_info->ping_exclude_iCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iCCP"); #if 0 if (mng_info->ping_exclude_iTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iTXt"); #endif if (mng_info->ping_exclude_oFFs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " oFFs"); if (mng_info->ping_exclude_pHYs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pHYs"); if (mng_info->ping_exclude_sRGB != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " sRGB"); if (mng_info->ping_exclude_tEXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tEXt"); if (mng_info->ping_exclude_tIME != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tIME"); if (mng_info->ping_exclude_tRNS != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS"); if (mng_info->ping_exclude_vpAg != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " vpAg"); if (mng_info->ping_exclude_zCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zCCP"); if (mng_info->ping_exclude_zTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zTXt"); } mng_info->need_blob = MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WritePNGImage()"); return(status); } #if defined(JNG_SUPPORTED) /* Write one JNG image */ static MagickBooleanType WriteOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { Image *jpeg_image; ImageInfo *jpeg_image_info; int unique_filenames; MagickBooleanType logging, status; size_t length; unsigned char *blob, chunk[80], *p; unsigned int jng_alpha_compression_method, jng_alpha_sample_depth, jng_color_type, transparent; size_t jng_alpha_quality, jng_quality; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOneJNGImage()"); blob=(unsigned char *) NULL; jpeg_image=(Image *) NULL; jpeg_image_info=(ImageInfo *) NULL; length=0; unique_filenames=0; status=MagickTrue; transparent=image_info->type==GrayscaleMatteType || image_info->type==TrueColorMatteType || image->matte != MagickFalse; jng_alpha_sample_depth = 0; jng_quality=image_info->quality == 0UL ? 75UL : image_info->quality%1000; jng_alpha_compression_method=image->compression==JPEGCompression? 8 : 0; jng_alpha_quality=image_info->quality == 0UL ? 75UL : image_info->quality; if (jng_alpha_quality >= 1000) jng_alpha_quality /= 1000; if (transparent != 0) { jng_color_type=14; /* Create JPEG blob, image, and image_info */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info for opacity."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); status=SeparateImageChannel(jpeg_image,OpacityChannel); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); status=NegateImage(jpeg_image,MagickFalse); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_image->matte=MagickFalse; jpeg_image_info->type=GrayscaleType; jpeg_image->quality=jng_alpha_quality; (void) SetImageType(jpeg_image,GrayscaleType); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent, "%s",jpeg_image->filename); } else { jng_alpha_compression_method=0; jng_color_type=10; jng_alpha_sample_depth=0; } /* To do: check bit depth of PNG alpha channel */ /* Check if image is grayscale. */ if (image_info->type != TrueColorMatteType && image_info->type != TrueColorType && SetImageGray(image,&image->exception)) jng_color_type-=2; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Quality = %d",(int) jng_quality); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Color Type = %d",jng_color_type); if (transparent != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Compression = %d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Depth = %d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Quality = %d",(int) jng_alpha_quality); } } if (transparent != 0) { if (jng_alpha_compression_method==0) { const char *value; /* Encode opacity as a grayscale PNG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating PNG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); length=0; (void) CopyMagickString(jpeg_image_info->magick,"PNG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"PNG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; /* Exclude all ancillary chunks */ (void) SetImageArtifact(jpeg_image,"png:exclude-chunks","all"); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); /* Retrieve sample depth used */ value=GetImageProperty(jpeg_image,"png:bit-depth-written"); if (value != (char *) NULL) jng_alpha_sample_depth= (unsigned int) value[0]; } else { /* Encode opacity as a grayscale JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating JPEG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); jng_alpha_sample_depth=8; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); } /* Destroy JPEG image and image_info */ jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); } /* Write JHDR chunk */ (void) WriteBlobMSBULong(image,16L); /* chunk data length=16 */ PNGType(chunk,mng_JHDR); LogPNGChunk(logging,mng_JHDR,16L); PNGLong(chunk+4,(png_uint_32) image->columns); PNGLong(chunk+8,(png_uint_32) image->rows); chunk[12]=jng_color_type; chunk[13]=8; /* sample depth */ chunk[14]=8; /*jng_image_compression_method */ chunk[15]=(unsigned char) (image_info->interlace == NoInterlace ? 0 : 8); chunk[16]=jng_alpha_sample_depth; chunk[17]=jng_alpha_compression_method; chunk[18]=0; /*jng_alpha_filter_method */ chunk[19]=0; /*jng_alpha_interlace_method */ (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG width:%15lu",(unsigned long) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG height:%14lu",(unsigned long) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG color type:%10d",jng_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG sample depth:%8d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG compression:%9d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG interlace:%11d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha depth:%9d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha compression:%3d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha filter:%8d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha interlace:%5d",0); } /* Write any JNG-chunk-b profiles */ (void) Magick_png_write_chunk_from_profile(image,"JNG-chunk-b",logging); /* Write leading ancillary chunks */ if (transparent != 0) { /* Write JNG bKGD chunk */ unsigned char blue, green, red; ssize_t num_bytes; if (jng_color_type == 8 || jng_color_type == 12) num_bytes=6L; else num_bytes=10L; (void) WriteBlobMSBULong(image,(size_t) (num_bytes-4L)); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,(size_t) (num_bytes-4L)); red=ScaleQuantumToChar(image->background_color.red); green=ScaleQuantumToChar(image->background_color.green); blue=ScaleQuantumToChar(image->background_color.blue); *(chunk+4)=0; *(chunk+5)=red; *(chunk+6)=0; *(chunk+7)=green; *(chunk+8)=0; *(chunk+9)=blue; (void) WriteBlob(image,(size_t) num_bytes,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) num_bytes)); } if ((image->colorspace == sRGBColorspace || image->rendering_intent)) { /* Write JNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { if (image->gamma != 0.0) { /* Write JNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); } if ((mng_info->equal_chrms == MagickFalse) && (image->chromaticity.red_primary.x != 0.0)) { PrimaryInfo primary; /* Write JNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); } } if (image->x_resolution && image->y_resolution && !mng_info->equal_physs) { /* Write JNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (mng_info->write_mng == 0 && (image->page.x || image->page.y)) { /* Write JNG oFFs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_oFFs); LogPNGChunk(logging,mng_oFFs,9L); PNGsLong(chunk+4,(ssize_t) (image->page.x)); PNGsLong(chunk+8,(ssize_t) (image->page.y)); chunk[12]=0; (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (mng_info->write_mng == 0 && (image->page.width || image->page.height)) { (void) WriteBlobMSBULong(image,9L); /* data length=8 */ PNGType(chunk,mng_vpAg); LogPNGChunk(logging,mng_vpAg,9L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); chunk[12]=0; /* unit = pixels */ (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (transparent != 0) { if (jng_alpha_compression_method==0) { register ssize_t i; size_t len; /* Write IDAT chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write IDAT chunks from blob, length=%.20g.",(double) length); /* Copy IDAT chunks */ len=0; p=blob+8; for (i=8; i<(ssize_t) length; i+=len+12) { len=(size_t) (*p) << 24; len|=(size_t) (*(p+1)) << 16; len|=(size_t) (*(p+2)) << 8; len|=(size_t) (*(p+3)); p+=4; if (*(p)==73 && *(p+1)==68 && *(p+2)==65 && *(p+3)==84) /* IDAT */ { /* Found an IDAT chunk. */ (void) WriteBlobMSBULong(image,len); LogPNGChunk(logging,mng_IDAT,len); (void) WriteBlob(image,len+4,p); (void) WriteBlobMSBULong(image,crc32(0,p,(uInt) len+4)); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping %c%c%c%c chunk, length=%.20g.", *(p),*(p+1),*(p+2),*(p+3),(double) len); } p+=(8+len); } } else if (length != 0) { /* Write JDAA chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAA chunk, length=%.20g.",(double) length); (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAA); LogPNGChunk(logging,mng_JDAA,length); /* Write JDAT chunk(s) data */ (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob, (uInt) length)); } blob=(unsigned char *) RelinquishMagickMemory(blob); } /* Encode image as a JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent,"%s", jpeg_image->filename); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Created jpeg_image, %.20g x %.20g.",(double) jpeg_image->columns, (double) jpeg_image->rows); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (jng_color_type == 8 || jng_color_type == 12) jpeg_image_info->type=GrayscaleType; jpeg_image_info->quality=jng_quality; jpeg_image->quality=jng_quality; (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating blob."); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length,&image->exception); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAT chunk, length=%.20g.",(double) length); } /* Write JDAT chunk(s) */ (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAT); LogPNGChunk(logging,mng_JDAT,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob,(uInt) length)); jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); blob=(unsigned char *) RelinquishMagickMemory(blob); /* Write any JNG-chunk-e profiles */ (void) Magick_png_write_chunk_from_profile(image,"JNG-chunk-e",logging); /* Write IEND chunk */ (void) WriteBlobMSBULong(image,0L); PNGType(chunk,mng_IEND); LogPNGChunk(logging,mng_IEND,0); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOneJNGImage(); unique_filenames=%d",unique_filenames); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJNGImage() writes a JPEG Network Graphics (JNG) image file. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteJNGImage method is: % % MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteJNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); if ((image->columns > 65535UL) || (image->rows > 65535UL)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; (void) WriteBlob(image,8,(const unsigned char *) "\213JNG\r\n\032\n"); status=WriteOneJNGImage(mng_info,image_info,image); mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); (void) CatchImageException(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteJNGImage()"); return(status); } #endif static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { const char *option; Image *next_image; MagickBooleanType status; volatile MagickBooleanType logging; MngInfo *mng_info; int image_count, need_iterations, need_matte; volatile int #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) need_local_plte, #endif all_images_are_gray, need_defi, use_global_plte; register ssize_t i; unsigned char chunk[800]; volatile unsigned int write_jng, write_mng; volatile size_t scene; size_t final_delay=0, initial_delay; #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteMNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; write_mng=LocaleCompare(image_info->magick,"MNG") == 0; /* * See if user has requested a specific PNG subformat to be used * for all of the PNGs in the MNG being written, e.g., * * convert *.png png8:animation.mng * * To do: check -define png:bit_depth and png:color_type as well, * or perhaps use mng:bit_depth and mng:color_type instead for * global settings. */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; write_jng=MagickFalse; if (image_info->compression == JPEGCompression) write_jng=MagickTrue; mng_info->adjoin=image_info->adjoin && (GetNextImageInList(image) != (Image *) NULL) && write_mng; if (logging != MagickFalse) { /* Log some info about the input */ Image *p; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Checking input image(s)\n" " Image_info depth: %.20g, Type: %d", (double) image_info->depth, image_info->type); scene=0; for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scene: %.20g\n, Image depth: %.20g", (double) scene++, (double) p->depth); if (p->matte) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: True"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: False"); if (p->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: DirectClass"); if (p->colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) p->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: unspecified"); if (mng_info->adjoin == MagickFalse) break; } } use_global_plte=MagickFalse; all_images_are_gray=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_defi=MagickFalse; need_matte=MagickFalse; mng_info->framing_mode=1; mng_info->old_framing_mode=1; if (write_mng) if (image_info->page != (char *) NULL) { /* Determine image bounding box. */ SetGeometry(image,&mng_info->page); (void) ParseMetaGeometry(image_info->page,&mng_info->page.x, &mng_info->page.y,&mng_info->page.width,&mng_info->page.height); } if (write_mng) { unsigned int need_geom; unsigned short red, green, blue; mng_info->page=image->page; need_geom=MagickTrue; if (mng_info->page.width || mng_info->page.height) need_geom=MagickFalse; /* Check all the scenes. */ initial_delay=image->delay; need_iterations=MagickFalse; mng_info->equal_chrms=image->chromaticity.red_primary.x != 0.0; mng_info->equal_physs=MagickTrue, mng_info->equal_gammas=MagickTrue; mng_info->equal_srgbs=MagickTrue; mng_info->equal_backgrounds=MagickTrue; image_count=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) all_images_are_gray=MagickTrue; mng_info->equal_palettes=MagickFalse; need_local_plte=MagickFalse; #endif for (next_image=image; next_image != (Image *) NULL; ) { if (need_geom) { if ((next_image->columns+next_image->page.x) > mng_info->page.width) mng_info->page.width=next_image->columns+next_image->page.x; if ((next_image->rows+next_image->page.y) > mng_info->page.height) mng_info->page.height=next_image->rows+next_image->page.y; } if (next_image->page.x || next_image->page.y) need_defi=MagickTrue; if (next_image->matte) need_matte=MagickTrue; if ((int) next_image->dispose >= BackgroundDispose) if (next_image->matte || next_image->page.x || next_image->page.y || ((next_image->columns < mng_info->page.width) && (next_image->rows < mng_info->page.height))) mng_info->need_fram=MagickTrue; if (next_image->iterations) need_iterations=MagickTrue; final_delay=next_image->delay; if (final_delay != initial_delay || final_delay > 1UL* next_image->ticks_per_second) mng_info->need_fram=1; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* check for global palette possibility. */ if (image->matte != MagickFalse) need_local_plte=MagickTrue; if (need_local_plte == 0) { if (SetImageGray(image,&image->exception) == MagickFalse) all_images_are_gray=MagickFalse; mng_info->equal_palettes=PalettesAreEqual(image,next_image); if (use_global_plte == 0) use_global_plte=mng_info->equal_palettes; need_local_plte=!mng_info->equal_palettes; } #endif if (GetNextImageInList(next_image) != (Image *) NULL) { if (next_image->background_color.red != next_image->next->background_color.red || next_image->background_color.green != next_image->next->background_color.green || next_image->background_color.blue != next_image->next->background_color.blue) mng_info->equal_backgrounds=MagickFalse; if (next_image->gamma != next_image->next->gamma) mng_info->equal_gammas=MagickFalse; if (next_image->rendering_intent != next_image->next->rendering_intent) mng_info->equal_srgbs=MagickFalse; if ((next_image->units != next_image->next->units) || (next_image->x_resolution != next_image->next->x_resolution) || (next_image->y_resolution != next_image->next->y_resolution)) mng_info->equal_physs=MagickFalse; if (mng_info->equal_chrms) { if (next_image->chromaticity.red_primary.x != next_image->next->chromaticity.red_primary.x || next_image->chromaticity.red_primary.y != next_image->next->chromaticity.red_primary.y || next_image->chromaticity.green_primary.x != next_image->next->chromaticity.green_primary.x || next_image->chromaticity.green_primary.y != next_image->next->chromaticity.green_primary.y || next_image->chromaticity.blue_primary.x != next_image->next->chromaticity.blue_primary.x || next_image->chromaticity.blue_primary.y != next_image->next->chromaticity.blue_primary.y || next_image->chromaticity.white_point.x != next_image->next->chromaticity.white_point.x || next_image->chromaticity.white_point.y != next_image->next->chromaticity.white_point.y) mng_info->equal_chrms=MagickFalse; } } image_count++; next_image=GetNextImageInList(next_image); } if (image_count < 2) { mng_info->equal_backgrounds=MagickFalse; mng_info->equal_chrms=MagickFalse; mng_info->equal_gammas=MagickFalse; mng_info->equal_srgbs=MagickFalse; mng_info->equal_physs=MagickFalse; use_global_plte=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_iterations=MagickFalse; } if (mng_info->need_fram == MagickFalse) { /* Only certain framing rates 100/n are exactly representable without the FRAM chunk but we'll allow some slop in VLC files */ if (final_delay == 0) { if (need_iterations != MagickFalse) { /* It's probably a GIF with loop; don't run it *too* fast. */ if (mng_info->adjoin) { final_delay=10; (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "input has zero delay between all frames; assuming", " 10 cs `%s'",""); } } else mng_info->ticks_per_second=0; } if (final_delay != 0) mng_info->ticks_per_second=(png_uint_32) (image->ticks_per_second/final_delay); if (final_delay > 50) mng_info->ticks_per_second=2; if (final_delay > 75) mng_info->ticks_per_second=1; if (final_delay > 125) mng_info->need_fram=MagickTrue; if (need_defi && final_delay > 2 && (final_delay != 4) && (final_delay != 5) && (final_delay != 10) && (final_delay != 20) && (final_delay != 25) && (final_delay != 50) && (final_delay != (size_t) image->ticks_per_second)) mng_info->need_fram=MagickTrue; /* make it exact; cannot be VLC */ } if (mng_info->need_fram != MagickFalse) mng_info->ticks_per_second=1UL*image->ticks_per_second; /* If pseudocolor, we should also check to see if all the palettes are identical and write a global PLTE if they are. ../glennrp Feb 99. */ /* Write the MNG version 1.0 signature and MHDR chunk. */ (void) WriteBlob(image,8,(const unsigned char *) "\212MNG\r\n\032\n"); (void) WriteBlobMSBULong(image,28L); /* chunk data length=28 */ PNGType(chunk,mng_MHDR); LogPNGChunk(logging,mng_MHDR,28L); PNGLong(chunk+4,(png_uint_32) mng_info->page.width); PNGLong(chunk+8,(png_uint_32) mng_info->page.height); PNGLong(chunk+12,mng_info->ticks_per_second); PNGLong(chunk+16,0L); /* layer count=unknown */ PNGLong(chunk+20,0L); /* frame count=unknown */ PNGLong(chunk+24,0L); /* play time=unknown */ if (write_jng) { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,27L); /* simplicity=LC+JNG */ else PNGLong(chunk+28,25L); /* simplicity=VLC+JNG */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,19L); /* simplicity=LC+JNG, no transparency */ else PNGLong(chunk+28,17L); /* simplicity=VLC+JNG, no transparency */ } } else { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,11L); /* simplicity=LC */ else PNGLong(chunk+28,9L); /* simplicity=VLC */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,3L); /* simplicity=LC, no transparency */ else PNGLong(chunk+28,1L); /* simplicity=VLC, no transparency */ } } (void) WriteBlob(image,32,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,32)); option=GetImageOption(image_info,"mng:need-cacheoff"); if (option != (const char *) NULL) { size_t length; /* Write "nEED CACHEOFF" to turn playback caching off for streaming MNG. */ PNGType(chunk,mng_nEED); length=CopyMagickString((char *) chunk+4,"CACHEOFF",20); (void) WriteBlobMSBULong(image,(size_t) length); LogPNGChunk(logging,mng_nEED,(size_t) length); length+=4; (void) WriteBlob(image,length,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) length)); } if ((GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) != (Image *) NULL) && (image->iterations != 1)) { /* Write MNG TERM chunk */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_TERM); LogPNGChunk(logging,mng_TERM,10L); chunk[4]=3; /* repeat animation */ chunk[5]=0; /* show last frame when done */ PNGLong(chunk+6,(png_uint_32) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) PNGLong(chunk+10,PNG_UINT_31_MAX); else PNGLong(chunk+10,(png_uint_32) image->iterations); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM delay: %.20g",(double) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM iterations: %.20g",(double) PNG_UINT_31_MAX); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image iterations: %.20g",(double) image->iterations); } (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); } /* To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if ((image->colorspace == sRGBColorspace || image->rendering_intent) && mng_info->equal_srgbs) { /* Write MNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); mng_info->have_write_global_srgb=MagickTrue; } else { if (image->gamma && mng_info->equal_gammas) { /* Write MNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); mng_info->have_write_global_gama=MagickTrue; } if (mng_info->equal_chrms) { PrimaryInfo primary; /* Write MNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); mng_info->have_write_global_chrm=MagickTrue; } } if (image->x_resolution && image->y_resolution && mng_info->equal_physs) { /* Write MNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } /* Write MNG BACK chunk and global bKGD chunk, if the image is transparent or does not cover the entire frame. */ if (write_mng && (image->matte || image->page.x > 0 || image->page.y > 0 || (image->page.width && (image->page.width+image->page.x < mng_info->page.width)) || (image->page.height && (image->page.height+image->page.y < mng_info->page.height)))) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_BACK); LogPNGChunk(logging,mng_BACK,6L); red=ScaleQuantumToShort(image->background_color.red); green=ScaleQuantumToShort(image->background_color.green); blue=ScaleQuantumToShort(image->background_color.blue); PNGShort(chunk+4,red); PNGShort(chunk+6,green); PNGShort(chunk+8,blue); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); if (mng_info->equal_backgrounds) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,6L); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); } } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if ((need_local_plte == MagickFalse) && (image->storage_class == PseudoClass) && (all_images_are_gray == MagickFalse)) { size_t data_length; /* Write MNG PLTE chunk */ data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red) & 0xff; chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green) & 0xff; chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue) & 0xff; } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } #endif } scene=0; mng_info->delay=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) mng_info->equal_palettes=MagickFalse; #endif do { if (mng_info->adjoin) { #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* If we aren't using a global palette for the entire MNG, check to see if we can use one for two or more consecutive images. */ if (need_local_plte && use_global_plte && !all_images_are_gray) { if (mng_info->IsPalette) { /* When equal_palettes is true, this image has the same palette as the previous PseudoClass image */ mng_info->have_write_global_plte=mng_info->equal_palettes; mng_info->equal_palettes=PalettesAreEqual(image,image->next); if (mng_info->equal_palettes && !mng_info->have_write_global_plte) { /* Write MNG PLTE chunk */ size_t data_length; data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red); chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green); chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue); } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk, (uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } } else mng_info->have_write_global_plte=MagickFalse; } #endif if (need_defi) { ssize_t previous_x, previous_y; if (scene != 0) { previous_x=mng_info->page.x; previous_y=mng_info->page.y; } else { previous_x=0; previous_y=0; } mng_info->page=image->page; if ((mng_info->page.x != previous_x) || (mng_info->page.y != previous_y)) { (void) WriteBlobMSBULong(image,12L); /* data length=12 */ PNGType(chunk,mng_DEFI); LogPNGChunk(logging,mng_DEFI,12L); chunk[4]=0; /* object 0 MSB */ chunk[5]=0; /* object 0 LSB */ chunk[6]=0; /* visible */ chunk[7]=0; /* abstract */ PNGLong(chunk+8,(png_uint_32) mng_info->page.x); PNGLong(chunk+12,(png_uint_32) mng_info->page.y); (void) WriteBlob(image,16,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,16)); } } } mng_info->write_mng=write_mng; if ((int) image->dispose >= 3) mng_info->framing_mode=3; if (mng_info->need_fram && mng_info->adjoin && ((image->delay != mng_info->delay) || (mng_info->framing_mode != mng_info->old_framing_mode))) { if (image->delay == mng_info->delay) { /* Write a MNG FRAM chunk with the new framing mode. */ (void) WriteBlobMSBULong(image,1L); /* data length=1 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,1L); chunk[4]=(unsigned char) mng_info->framing_mode; (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { /* Write a MNG FRAM chunk with the delay. */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,10L); chunk[4]=(unsigned char) mng_info->framing_mode; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=2; /* flag for changing default delay */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=0; /* flag for changing frame clipping */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) ((mng_info->ticks_per_second* image->delay)/MagickMax(image->ticks_per_second,1))); (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); mng_info->delay=(png_uint_32) image->delay; } mng_info->old_framing_mode=mng_info->framing_mode; } #if defined(JNG_SUPPORTED) if (image_info->compression == JPEGCompression) { ImageInfo *write_info; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing JNG object."); /* To do: specify the desired alpha compression method. */ write_info=CloneImageInfo(image_info); write_info->compression=UndefinedCompression; status=WriteOneJNGImage(mng_info,write_info,image); write_info=DestroyImageInfo(write_info); } else #endif { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG object."); mng_info->need_blob = MagickFalse; mng_info->ping_preserve_colormap = MagickFalse; /* We don't want any ancillary chunks written */ mng_info->ping_exclude_bKGD=MagickTrue; mng_info->ping_exclude_caNv=MagickTrue; mng_info->ping_exclude_cHRM=MagickTrue; mng_info->ping_exclude_date=MagickTrue; mng_info->ping_exclude_EXIF=MagickTrue; mng_info->ping_exclude_eXIf=MagickTrue; mng_info->ping_exclude_gAMA=MagickTrue; mng_info->ping_exclude_iCCP=MagickTrue; /* mng_info->ping_exclude_iTXt=MagickTrue; */ mng_info->ping_exclude_oFFs=MagickTrue; mng_info->ping_exclude_pHYs=MagickTrue; mng_info->ping_exclude_sRGB=MagickTrue; mng_info->ping_exclude_tEXt=MagickTrue; mng_info->ping_exclude_tRNS=MagickTrue; mng_info->ping_exclude_vpAg=MagickTrue; mng_info->ping_exclude_zCCP=MagickTrue; mng_info->ping_exclude_zTXt=MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); } if (status == MagickFalse) { mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); return(MagickFalse); } (void) CatchImageException(image); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (mng_info->adjoin); if (write_mng) { while (GetPreviousImageInList(image) != (Image *) NULL) image=GetPreviousImageInList(image); /* Write the MEND chunk. */ (void) WriteBlobMSBULong(image,0x00000000L); PNGType(chunk,mng_MEND); LogPNGChunk(logging,mng_MEND,0L); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); } /* Relinquish resources. */ (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WriteMNGImage()"); return(MagickTrue); } #else /* PNG_LIBPNG_VER > 10011 */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { (void) image; printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); ThrowBinaryException(CoderError,"PNG library is too old", image_info->filename); } static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { return(WritePNGImage(image_info,image)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif
./CrossVul/dataset_final_sorted/CWE-617/c/good_3363_0
crossvul-cpp_data_good_1219_0
/*- * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include <errno.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <assert.h> #include <stdbool.h> #include "dm.h" #include "vmmapi.h" #include "acpi.h" #include "inout.h" #include "ioapic.h" #include "mem.h" #include "pci_core.h" #include "irq.h" #include "lpc.h" #include "sw_load.h" #define CONF1_ADDR_PORT 0x0cf8 #define CONF1_DATA_PORT 0x0cfc #define CONF1_ENABLE 0x80000000ul #define MAXBUSES (PCI_BUSMAX + 1) #define MAXSLOTS (PCI_SLOTMAX + 1) #define MAXFUNCS (PCI_FUNCMAX + 1) struct funcinfo { char *fi_name; char *fi_param; char *fi_param_saved; /* save for reboot */ struct pci_vdev *fi_devi; }; struct intxinfo { int ii_count; int ii_pirq_pin; int ii_ioapic_irq; }; struct slotinfo { struct intxinfo si_intpins[4]; struct funcinfo si_funcs[MAXFUNCS]; }; struct businfo { uint16_t iobase, iolimit; /* I/O window */ uint32_t membase32, memlimit32; /* mmio window below 4GB */ uint64_t membase64, memlimit64; /* mmio window above 4GB */ struct slotinfo slotinfo[MAXSLOTS]; }; static struct businfo *pci_businfo[MAXBUSES]; SET_DECLARE(pci_vdev_ops_set, struct pci_vdev_ops); static uint64_t pci_emul_iobase; static uint64_t pci_emul_membase32; static uint64_t pci_emul_membase64; extern bool skip_pci_mem64bar_workaround; #define PCI_EMUL_IOBASE 0x2000 #define PCI_EMUL_IOLIMIT 0x10000 #define PCI_EMUL_ECFG_SIZE (MAXBUSES * 1024 * 1024) /* 1MB per bus */ SYSRES_MEM(PCI_EMUL_ECFG_BASE, PCI_EMUL_ECFG_SIZE); #define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE static struct pci_vdev_ops *pci_emul_finddev(char *name); static void pci_lintr_route(struct pci_vdev *dev); static void pci_lintr_update(struct pci_vdev *dev); static void pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, int coff, int bytes, uint32_t *val); static void pci_emul_free_msixcap(struct pci_vdev *pdi); static inline void CFGWRITE(struct pci_vdev *dev, int coff, uint32_t val, int bytes) { if (bytes == 1) pci_set_cfgdata8(dev, coff, val); else if (bytes == 2) pci_set_cfgdata16(dev, coff, val); else pci_set_cfgdata32(dev, coff, val); } static inline uint32_t CFGREAD(struct pci_vdev *dev, int coff, int bytes) { if (bytes == 1) return pci_get_cfgdata8(dev, coff); else if (bytes == 2) return pci_get_cfgdata16(dev, coff); else return pci_get_cfgdata32(dev, coff); } /* * I/O access */ /* * Slot options are in the form: * * <bus>:<slot>:<func>,<emul>[,<config>] * <slot>[:<func>],<emul>[,<config>] * * slot is 0..31 * func is 0..7 * emul is a string describing the type of PCI device e.g. virtio-net * config is an optional string, depending on the device, that can be * used for configuration. * Examples are: * 1,virtio-net,tap0 * 3:0,dummy */ static void pci_parse_slot_usage(char *aopt) { fprintf(stderr, "Invalid PCI slot info field \"%s\"\n", aopt); } int parse_bdf(char *s, int *bus, int *dev, int *func, int base) { char *s_bus, *s_dev, *s_func; char *str, *cp; int ret = 0; str = cp = strdup(s); bus ? *bus = 0 : 0; dev ? *dev = 0 : 0; func ? *func = 0 : 0; s_bus = s_dev = s_func = NULL; s_dev = strsep(&cp, ":/."); if (cp) { s_func = strsep(&cp, ":/."); if (cp) { s_bus = s_dev; s_dev = s_func; s_func = strsep(&cp, ":/."); } } if (s_dev && dev) ret |= dm_strtoi(s_dev, &s_dev, base, dev); if (s_func && func) ret |= dm_strtoi(s_func, &s_func, base, func); if (s_bus && bus) ret |= dm_strtoi(s_bus, &s_bus, base, bus); free(str); return ret; } int pci_parse_slot(char *opt) { struct businfo *bi; struct slotinfo *si; char *emul, *config, *str, *cp, *b = NULL; int error, bnum, snum, fnum; error = -1; str = strdup(opt); if (!str) { fprintf(stderr, "%s: strdup returns NULL\n", __func__); return -1; } emul = config = NULL; cp = str; str = strsep(&cp, ","); if (cp) { emul = strsep(&cp, ","); /* for boot device */ if (cp && *cp == 'b' && *(cp+1) == ',') b = strsep(&cp, ","); config = cp; } else { pci_parse_slot_usage(opt); goto done; } /* <bus>:<slot>:<func> */ if (parse_bdf(str, &bnum, &snum, &fnum, 10) != 0) snum = -1; if (bnum < 0 || bnum >= MAXBUSES || snum < 0 || snum >= MAXSLOTS || fnum < 0 || fnum >= MAXFUNCS) { pci_parse_slot_usage(opt); goto done; } if (pci_businfo[bnum] == NULL) pci_businfo[bnum] = calloc(1, sizeof(struct businfo)); bi = pci_businfo[bnum]; si = &bi->slotinfo[snum]; if (si->si_funcs[fnum].fi_name != NULL) { fprintf(stderr, "pci slot %d:%d already occupied!\n", snum, fnum); goto done; } if (pci_emul_finddev(emul) == NULL) { fprintf(stderr, "pci slot %d:%d: unknown device \"%s\"\n", snum, fnum, emul); goto done; } error = 0; si->si_funcs[fnum].fi_name = emul; /* saved fi param in case reboot */ si->si_funcs[fnum].fi_param_saved = config; if (b != NULL) { if ((strcmp("virtio-blk", emul) == 0) && (b != NULL) && (strchr(b, 'b') != NULL)) { vsbl_set_bdf(bnum, snum, fnum); } } done: if (error) free(str); return error; } static int pci_valid_pba_offset(struct pci_vdev *dev, uint64_t offset) { if (offset < dev->msix.pba_offset) return 0; if (offset >= dev->msix.pba_offset + dev->msix.pba_size) return 0; return 1; } int pci_emul_msix_twrite(struct pci_vdev *dev, uint64_t offset, int size, uint64_t value) { int msix_entry_offset; int tab_index; char *dest; /* support only 4 or 8 byte writes */ if (size != 4 && size != 8) return -1; /* * Return if table index is beyond what device supports */ tab_index = offset / MSIX_TABLE_ENTRY_SIZE; if (tab_index >= dev->msix.table_count) return -1; msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; /* support only aligned writes */ if ((msix_entry_offset % size) != 0) return -1; dest = (char *)(dev->msix.table + tab_index); dest += msix_entry_offset; if (size == 4) *((uint32_t *)dest) = value; else *((uint64_t *)dest) = value; return 0; } uint64_t pci_emul_msix_tread(struct pci_vdev *dev, uint64_t offset, int size) { char *dest; int msix_entry_offset; int tab_index; uint64_t retval = ~0; /* * The PCI standard only allows 4 and 8 byte accesses to the MSI-X * table but we also allow 1 byte access to accommodate reads from * ddb. */ if (size != 1 && size != 4 && size != 8) return retval; msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; /* support only aligned reads */ if ((msix_entry_offset % size) != 0) return retval; tab_index = offset / MSIX_TABLE_ENTRY_SIZE; if (tab_index < dev->msix.table_count) { /* valid MSI-X Table access */ dest = (char *)(dev->msix.table + tab_index); dest += msix_entry_offset; if (size == 1) retval = *((uint8_t *)dest); else if (size == 4) retval = *((uint32_t *)dest); else retval = *((uint64_t *)dest); } else if (pci_valid_pba_offset(dev, offset)) { /* return 0 for PBA access */ retval = 0; } return retval; } int pci_msix_table_bar(struct pci_vdev *dev) { if (dev->msix.table != NULL) return dev->msix.table_bar; else return -1; } int pci_msix_pba_bar(struct pci_vdev *dev) { if (dev->msix.table != NULL) return dev->msix.pba_bar; else return -1; } static inline uint64_t bar_value(int size, uint64_t val) { uint64_t mask; assert(size == 1 || size == 2 || size == 4 || size == 8); mask = (size < 8 ? 1UL << (size * 8) : 0UL) - 1; return val & mask; } static int pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, uint32_t *eax, void *arg) { struct pci_vdev *pdi = arg; struct pci_vdev_ops *ops = pdi->dev_ops; uint64_t offset; int i; for (i = 0; i <= PCI_BARMAX; i++) { if (pdi->bar[i].type == PCIBAR_IO && port >= pdi->bar[i].addr && port + bytes <= pdi->bar[i].addr + pdi->bar[i].size) { offset = port - pdi->bar[i].addr; if (in) { *eax = (*ops->vdev_barread)(ctx, vcpu, pdi, i, offset, bytes); *eax = bar_value(bytes, *eax); } else (*ops->vdev_barwrite)(ctx, vcpu, pdi, i, offset, bytes, bar_value(bytes, *eax)); return 0; } } return -1; } static int pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int size, uint64_t *val, void *arg1, long arg2) { struct pci_vdev *pdi = arg1; struct pci_vdev_ops *ops = pdi->dev_ops; uint64_t offset; int bidx = (int) arg2; if (addr + size > pdi->bar[bidx].addr + pdi->bar[bidx].size) { printf("%s, Out of emulated memory range.\n", __func__); return -ESRCH; } offset = addr - pdi->bar[bidx].addr; if (dir == MEM_F_WRITE) { if (size == 8) { (*ops->vdev_barwrite)(ctx, vcpu, pdi, bidx, offset, 4, *val & 0xffffffff); (*ops->vdev_barwrite)(ctx, vcpu, pdi, bidx, offset + 4, 4, *val >> 32); } else { (*ops->vdev_barwrite)(ctx, vcpu, pdi, bidx, offset, size, bar_value(size, *val)); } } else { if (size == 8) { uint64_t val_lo, val_hi; val_lo = (*ops->vdev_barread)(ctx, vcpu, pdi, bidx, offset, 4); val_lo = bar_value(4, val_lo); val_hi = (*ops->vdev_barread)(ctx, vcpu, pdi, bidx, offset + 4, 4); *val = val_lo | (val_hi << 32); } else { *val = (*ops->vdev_barread)(ctx, vcpu, pdi, bidx, offset, size); *val = bar_value(size, *val); } } return 0; } static int pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, uint64_t *addr) { uint64_t base; assert((size & (size - 1)) == 0); /* must be a power of 2 */ base = roundup2(*baseptr, size); if (base + size <= limit) { *addr = base; *baseptr = base + size; return 0; } else return -1; } int pci_emul_alloc_bar(struct pci_vdev *pdi, int idx, enum pcibar_type type, uint64_t size) { return pci_emul_alloc_pbar(pdi, idx, 0, type, size); } /* * Register (or unregister) the MMIO or I/O region associated with the BAR * register 'idx' of an emulated pci device. */ static void modify_bar_registration(struct pci_vdev *dev, int idx, int registration) { int error; struct inout_port iop; struct mem_range mr; switch (dev->bar[idx].type) { case PCIBAR_IO: bzero(&iop, sizeof(struct inout_port)); iop.name = dev->name; iop.port = dev->bar[idx].addr; iop.size = dev->bar[idx].size; if (registration) { iop.flags = IOPORT_F_INOUT; iop.handler = pci_emul_io_handler; iop.arg = dev; error = register_inout(&iop); } else error = unregister_inout(&iop); break; case PCIBAR_MEM32: case PCIBAR_MEM64: bzero(&mr, sizeof(struct mem_range)); mr.name = dev->name; mr.base = dev->bar[idx].addr; mr.size = dev->bar[idx].size; if (registration) { mr.flags = MEM_F_RW; mr.handler = pci_emul_mem_handler; mr.arg1 = dev; mr.arg2 = idx; error = register_mem(&mr); } else error = unregister_mem(&mr); break; default: error = EINVAL; break; } assert(error == 0); } static void unregister_bar(struct pci_vdev *dev, int idx) { modify_bar_registration(dev, idx, 0); } static void register_bar(struct pci_vdev *dev, int idx) { modify_bar_registration(dev, idx, 1); } /* Are we decoding i/o port accesses for the emulated pci device? */ static int porten(struct pci_vdev *dev) { uint16_t cmd; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); return (cmd & PCIM_CMD_PORTEN); } /* Are we decoding memory accesses for the emulated pci device? */ static int memen(struct pci_vdev *dev) { uint16_t cmd; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); return (cmd & PCIM_CMD_MEMEN); } /* * Update the MMIO or I/O address that is decoded by the BAR register. * * If the pci device has enabled the address space decoding then intercept * the address range decoded by the BAR register. */ static void update_bar_address(struct pci_vdev *dev, uint64_t addr, int idx, int type) { int decode; if (dev->bar[idx].type == PCIBAR_IO) decode = porten(dev); else decode = memen(dev); if (decode) unregister_bar(dev, idx); switch (type) { case PCIBAR_IO: case PCIBAR_MEM32: dev->bar[idx].addr = addr; break; case PCIBAR_MEM64: dev->bar[idx].addr &= ~0xffffffffUL; dev->bar[idx].addr |= addr; break; case PCIBAR_MEMHI64: dev->bar[idx].addr &= 0xffffffff; dev->bar[idx].addr |= addr; break; default: assert(0); } if (decode) register_bar(dev, idx); } int pci_emul_alloc_pbar(struct pci_vdev *pdi, int idx, uint64_t hostbase, enum pcibar_type type, uint64_t size) { int error; uint64_t *baseptr, limit, addr, mask, lobits, bar; assert(idx >= 0 && idx <= PCI_BARMAX); if ((size & (size - 1)) != 0) size = 1UL << flsl(size); /* round up to a power of 2 */ /* Enforce minimum BAR sizes required by the PCI standard */ if (type == PCIBAR_IO) { if (size < 4) size = 4; } else { if (size < 16) size = 16; } switch (type) { case PCIBAR_NONE: baseptr = NULL; addr = mask = lobits = 0; break; case PCIBAR_IO: baseptr = &pci_emul_iobase; limit = PCI_EMUL_IOLIMIT; mask = PCIM_BAR_IO_BASE; lobits = PCIM_BAR_IO_SPACE; break; case PCIBAR_MEM64: /* * FIXME * Some drivers do not work well if the 64-bit BAR is allocated * above 4GB. Allow for this by allocating small requests under * 4GB unless then allocation size is larger than some arbitrary * number (32MB currently). If guest booted by ovmf, then skip the * workaround. */ if (!skip_pci_mem64bar_workaround && (size <= 32 * 1024 * 1024)) { baseptr = &pci_emul_membase32; limit = PCI_EMUL_MEMLIMIT32; mask = PCIM_BAR_MEM_BASE; lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; break; } /* * XXX special case for device requiring peer-peer DMA */ if (size == 0x100000000UL) baseptr = &hostbase; else baseptr = &pci_emul_membase64; limit = PCI_EMUL_MEMLIMIT64; mask = PCIM_BAR_MEM_BASE; lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | PCIM_BAR_MEM_PREFETCH; break; case PCIBAR_MEM32: baseptr = &pci_emul_membase32; limit = PCI_EMUL_MEMLIMIT32; mask = PCIM_BAR_MEM_BASE; lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; break; default: printf("pci_emul_alloc_base: invalid bar type %d\n", type); assert(0); } if (baseptr != NULL) { error = pci_emul_alloc_resource(baseptr, limit, size, &addr); if (error != 0) return error; } pdi->bar[idx].type = type; pdi->bar[idx].addr = addr; pdi->bar[idx].size = size; /* Initialize the BAR register in config space */ bar = (addr & mask) | lobits; pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); if (type == PCIBAR_MEM64) { assert(idx + 1 <= PCI_BARMAX); pdi->bar[idx + 1].type = PCIBAR_MEMHI64; pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); } register_bar(pdi, idx); return 0; } void pci_emul_free_bars(struct pci_vdev *pdi) { int i, enabled; for (i = 0; i < PCI_BARMAX; i++) { if ((pdi->bar[i].type != PCIBAR_NONE) && (pdi->bar[i].type != PCIBAR_MEMHI64)){ /* * Check whether the bar is enabled or not, * if it is disabled then it should have been * unregistered in pci_emul_cmdsts_write. */ if (pdi->bar[i].type == PCIBAR_IO) enabled = porten(pdi); else enabled = memen(pdi); if (enabled) unregister_bar(pdi, i); pdi->bar[i].type = PCIBAR_NONE; } } } #define CAP_START_OFFSET 0x40 int pci_emul_add_capability(struct pci_vdev *dev, u_char *capdata, int caplen) { int i, capoff, reallen; uint16_t sts; assert(caplen > 0); reallen = roundup2(caplen, 4); /* dword aligned */ sts = pci_get_cfgdata16(dev, PCIR_STATUS); if ((sts & PCIM_STATUS_CAPPRESENT) == 0) capoff = CAP_START_OFFSET; else capoff = dev->capend + 1; /* Check if we have enough space */ if (capoff + reallen > PCI_REGMAX + 1) return -1; /* Set the previous capability pointer */ if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { pci_set_cfgdata8(dev, PCIR_CAP_PTR, capoff); pci_set_cfgdata16(dev, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); } else pci_set_cfgdata8(dev, dev->prevcap + 1, capoff); /* Copy the capability */ for (i = 0; i < caplen; i++) pci_set_cfgdata8(dev, capoff + i, capdata[i]); /* Set the next capability pointer */ pci_set_cfgdata8(dev, capoff + 1, 0); dev->prevcap = capoff; dev->capend = capoff + reallen - 1; return 0; } /* * p_capoff is used as both input and output. Set *p_capoff to 0 when this * function is called for the first time, it will return offset of the first * matched one in p_capoff. To find the next matched one, please use the * returned *p_capoff from last call as the input, in this case the offset of * the next matched one will be returned in *p_capoff. * Please check the returned value first before touch p_capoff. */ int pci_emul_find_capability(struct pci_vdev *dev, uint8_t capid, int *p_capoff) { int coff; uint16_t sts; sts = pci_get_cfgdata16(dev, PCIR_STATUS); if ((sts & PCIM_STATUS_CAPPRESENT) == 0) return -1; if (!p_capoff) return -1; if (*p_capoff == 0) coff = pci_get_cfgdata8(dev, PCIR_CAP_PTR); else if (*p_capoff >= CAP_START_OFFSET && *p_capoff <= dev->prevcap) coff = pci_get_cfgdata8(dev, *p_capoff + 1); else return -1; while (coff >= CAP_START_OFFSET && coff <= dev->prevcap) { if (pci_get_cfgdata8(dev, coff) == capid) { *p_capoff = coff; return 0; } coff = pci_get_cfgdata8(dev, coff + 1); } return -1; } static struct pci_vdev_ops * pci_emul_finddev(char *name) { struct pci_vdev_ops **pdpp, *pdp; SET_FOREACH(pdpp, pci_vdev_ops_set) { pdp = *pdpp; if (!strcmp(pdp->class_name, name)) return pdp; } return NULL; } static int pci_emul_init(struct vmctx *ctx, struct pci_vdev_ops *ops, int bus, int slot, int func, struct funcinfo *fi) { struct pci_vdev *pdi; int err; pdi = calloc(1, sizeof(struct pci_vdev)); if (!pdi) { fprintf(stderr, "%s: calloc returns NULL\n", __func__); return -1; } pdi->vmctx = ctx; pdi->bus = bus; pdi->slot = slot; pdi->func = func; pthread_mutex_init(&pdi->lintr.lock, NULL); pdi->lintr.pin = 0; pdi->lintr.state = IDLE; pdi->lintr.pirq_pin = 0; pdi->lintr.ioapic_irq = 0; pdi->dev_ops = ops; snprintf(pdi->name, PI_NAMESZ, "%s-pci-%d", ops->class_name, slot); /* Disable legacy interrupts */ pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); pci_set_cfgdata8(pdi, PCIR_COMMAND, PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); if (fi->fi_param_saved) fi->fi_param = strdup(fi->fi_param_saved); else fi->fi_param = NULL; err = (*ops->vdev_init)(ctx, pdi, fi->fi_param); if (err == 0) fi->fi_devi = pdi; else free(pdi); return err; } static void pci_emul_deinit(struct vmctx *ctx, struct pci_vdev_ops *ops, int bus, int slot, int func, struct funcinfo *fi) { if (ops->vdev_deinit && fi->fi_devi) (*ops->vdev_deinit)(ctx, fi->fi_devi, fi->fi_param); if (fi->fi_param) free(fi->fi_param); if (fi->fi_devi) { pci_lintr_release(fi->fi_devi); pci_emul_free_bars(fi->fi_devi); pci_emul_free_msixcap(fi->fi_devi); free(fi->fi_devi); } } void pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) { int mmc; /* Number of msi messages must be a power of 2 between 1 and 32 */ assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); mmc = ffs(msgnum) - 1; bzero(msicap, sizeof(struct msicap)); msicap->capid = PCIY_MSI; msicap->nextptr = nextptr; msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); } int pci_emul_add_msicap(struct pci_vdev *dev, int msgnum) { struct msicap msicap; pci_populate_msicap(&msicap, msgnum, 0); return pci_emul_add_capability(dev, (u_char *)&msicap, sizeof(msicap)); } static void pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, uint32_t msix_tab_size) { assert(msix_tab_size % 4096 == 0); bzero(msixcap, sizeof(struct msixcap)); msixcap->capid = PCIY_MSIX; /* * Message Control Register, all fields set to * zero except for the Table Size. * Note: Table size N is encoded as N-1 */ msixcap->msgctrl = msgnum - 1; /* * MSI-X BAR setup: * - MSI-X table start at offset 0 * - PBA table starts at a 4K aligned offset after the MSI-X table */ msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); } static void pci_msix_table_init(struct pci_vdev *dev, int table_entries) { int i, table_size; assert(table_entries > 0); assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; dev->msix.table = calloc(1, table_size); assert(dev->msix.table != NULL); /* set mask bit of vector control register */ for (i = 0; i < table_entries; i++) dev->msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; } int pci_emul_add_msixcap(struct pci_vdev *dev, int msgnum, int barnum) { uint32_t tab_size; struct msixcap msixcap; assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; /* Align table size to nearest 4K */ tab_size = roundup2(tab_size, 4096); dev->msix.table_bar = barnum; dev->msix.pba_bar = barnum; dev->msix.table_offset = 0; dev->msix.table_count = msgnum; dev->msix.pba_offset = tab_size; dev->msix.pba_size = PBA_SIZE(msgnum); pci_msix_table_init(dev, msgnum); pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size); /* allocate memory for MSI-X Table and PBA */ pci_emul_alloc_bar(dev, barnum, PCIBAR_MEM32, tab_size + dev->msix.pba_size); return (pci_emul_add_capability(dev, (u_char *)&msixcap, sizeof(msixcap))); } static void pci_emul_free_msixcap(struct pci_vdev *pdi) { if (pdi->msix.table) { free(pdi->msix.table); pdi->msix.table = NULL; } } void msixcap_cfgwrite(struct pci_vdev *dev, int capoff, int offset, int bytes, uint32_t val) { uint16_t msgctrl, rwmask; int off; off = offset - capoff; /* Message Control Register */ if (off == 2 && bytes == 2) { rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; msgctrl = pci_get_cfgdata16(dev, offset); msgctrl &= ~rwmask; msgctrl |= val & rwmask; val = msgctrl; dev->msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; dev->msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; pci_lintr_update(dev); } CFGWRITE(dev, offset, val, bytes); } void msicap_cfgwrite(struct pci_vdev *dev, int capoff, int offset, int bytes, uint32_t val) { uint16_t msgctrl, rwmask, msgdata, mme; uint32_t addrlo; /* * If guest is writing to the message control register make sure * we do not overwrite read-only fields. */ if ((offset - capoff) == 2 && bytes == 2) { rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; msgctrl = pci_get_cfgdata16(dev, offset); msgctrl &= ~rwmask; msgctrl |= val & rwmask; val = msgctrl; addrlo = pci_get_cfgdata32(dev, capoff + 4); if (msgctrl & PCIM_MSICTRL_64BIT) msgdata = pci_get_cfgdata16(dev, capoff + 12); else msgdata = pci_get_cfgdata16(dev, capoff + 8); mme = msgctrl & PCIM_MSICTRL_MME_MASK; dev->msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; if (dev->msi.enabled) { dev->msi.addr = addrlo; dev->msi.msg_data = msgdata; dev->msi.maxmsgnum = 1 << (mme >> 4); } else { dev->msi.maxmsgnum = 0; } pci_lintr_update(dev); } CFGWRITE(dev, offset, val, bytes); } void pciecap_cfgwrite(struct pci_vdev *dev, int capoff, int offset, int bytes, uint32_t val) { /* XXX don't write to the readonly parts */ CFGWRITE(dev, offset, val, bytes); } #define PCIECAP_VERSION 0x2 int pci_emul_add_pciecap(struct pci_vdev *dev, int type) { int err; struct pciecap pciecap; if (type != PCIEM_TYPE_ROOT_PORT) return -1; bzero(&pciecap, sizeof(pciecap)); pciecap.capid = PCIY_EXPRESS; pciecap.pcie_capabilities = PCIECAP_VERSION | PCIEM_TYPE_ROOT_PORT; pciecap.link_capabilities = 0x411; /* gen1, x1 */ pciecap.link_status = 0x11; /* gen1, x1 */ err = pci_emul_add_capability(dev, (u_char *)&pciecap, sizeof(pciecap)); return err; } /* * This function assumes that 'coff' is in the capabilities region of the * config space. */ static void pci_emul_capwrite(struct pci_vdev *dev, int offset, int bytes, uint32_t val) { int capid; uint8_t capoff, nextoff; /* Do not allow un-aligned writes */ if ((offset & (bytes - 1)) != 0) return; /* Find the capability that we want to update */ capoff = CAP_START_OFFSET; while (1) { nextoff = pci_get_cfgdata8(dev, capoff + 1); if (nextoff == 0) break; if (offset >= capoff && offset < nextoff) break; capoff = nextoff; } assert(offset >= capoff); /* * Capability ID and Next Capability Pointer are readonly. * However, some o/s's do 4-byte writes that include these. * For this case, trim the write back to 2 bytes and adjust * the data. */ if (offset == capoff || offset == capoff + 1) { if (offset == capoff && bytes == 4) { bytes = 2; offset += 2; val >>= 16; } else return; } capid = pci_get_cfgdata8(dev, capoff); switch (capid) { case PCIY_MSI: msicap_cfgwrite(dev, capoff, offset, bytes, val); break; case PCIY_MSIX: msixcap_cfgwrite(dev, capoff, offset, bytes, val); break; case PCIY_EXPRESS: pciecap_cfgwrite(dev, capoff, offset, bytes, val); break; default: CFGWRITE(dev, offset, val, bytes); break; } } static int pci_emul_iscap(struct pci_vdev *dev, int offset) { uint16_t sts; sts = pci_get_cfgdata16(dev, PCIR_STATUS); if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { if (offset >= CAP_START_OFFSET && offset <= dev->capend) return 1; } return 0; } static int pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int size, uint64_t *val, void *arg1, long arg2) { /* * Ignore writes; return 0xff's for reads. The mem read code * will take care of truncating to the correct size. */ if (dir == MEM_F_READ) *val = 0xffffffffffffffff; return 0; } static int pci_emul_ecfg_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int bytes, uint64_t *val, void *arg1, long arg2) { int bus, slot, func, coff, in; coff = addr & 0xfff; func = (addr >> 12) & 0x7; slot = (addr >> 15) & 0x1f; bus = (addr >> 20) & 0xff; in = (dir == MEM_F_READ); if (in) *val = ~0UL; pci_cfgrw(ctx, vcpu, in, bus, slot, func, coff, bytes, (uint32_t *)val); return 0; } #define BUSIO_ROUNDUP 32 #define BUSMEM_ROUNDUP (1024 * 1024) int init_pci(struct vmctx *ctx) { struct mem_range mr; struct pci_vdev_ops *ops; struct businfo *bi; struct slotinfo *si; struct funcinfo *fi; size_t lowmem; int bus, slot, func; int success_cnt = 0; int error; pci_emul_iobase = PCI_EMUL_IOBASE; pci_emul_membase32 = vm_get_lowmem_limit(ctx); pci_emul_membase64 = PCI_EMUL_MEMBASE64; create_gsi_sharing_groups(); for (bus = 0; bus < MAXBUSES; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; /* * Keep track of the i/o and memory resources allocated to * this bus. */ bi->iobase = pci_emul_iobase; bi->membase32 = pci_emul_membase32; bi->membase64 = pci_emul_membase64; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_name == NULL) continue; ops = pci_emul_finddev(fi->fi_name); assert(ops != NULL); error = pci_emul_init(ctx, ops, bus, slot, func, fi); if (error) goto pci_emul_init_fail; success_cnt++; } } /* * Add some slop to the I/O and memory resources decoded by * this bus to give a guest some flexibility if it wants to * reprogram the BARs. */ pci_emul_iobase += BUSIO_ROUNDUP; pci_emul_iobase = roundup2(pci_emul_iobase, BUSIO_ROUNDUP); bi->iolimit = pci_emul_iobase; pci_emul_membase32 += BUSMEM_ROUNDUP; pci_emul_membase32 = roundup2(pci_emul_membase32, BUSMEM_ROUNDUP); bi->memlimit32 = pci_emul_membase32; pci_emul_membase64 += BUSMEM_ROUNDUP; pci_emul_membase64 = roundup2(pci_emul_membase64, BUSMEM_ROUNDUP); bi->memlimit64 = pci_emul_membase64; } error = check_gsi_sharing_violation(); if (error < 0) goto pci_emul_init_fail; /* * PCI backends are initialized before routing INTx interrupts * so that LPC devices are able to reserve ISA IRQs before * routing PIRQ pins. */ for (bus = 0; bus < MAXBUSES; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_devi == NULL) continue; pci_lintr_route(fi->fi_devi); ops = fi->fi_devi->dev_ops; if (ops && ops->vdev_phys_access) ops->vdev_phys_access(ctx, fi->fi_devi); } } } lpc_pirq_routed(); /* * The guest physical memory map looks like the following: * [0, lowmem) guest system memory * [lowmem, lowmem_limit) memory hole (may be absent) * [lowmem_limit, 0xE0000000) PCI hole (32-bit BAR allocation) * [0xE0000000, 0xF0000000) PCI extended config window * [0xF0000000, 4GB) LAPIC, IOAPIC, HPET, firmware * [4GB, 5GB) PCI hole (64-bit BAR allocation) * [5GB, 5GB + highmem) guest system memory */ /* * Accesses to memory addresses that are not allocated to system * memory or PCI devices return 0xff's. */ lowmem = vm_get_lowmem_size(ctx); bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (32-bit)"; mr.flags = MEM_F_RW; mr.base = lowmem; mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; mr.handler = pci_emul_fallback_handler; error = register_mem_fallback(&mr); assert(error == 0); /* ditto for the 64-bit PCI host aperture */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (64-bit)"; mr.flags = MEM_F_RW; mr.base = PCI_EMUL_MEMBASE64; mr.size = PCI_EMUL_MEMLIMIT64 - PCI_EMUL_MEMBASE64; mr.handler = pci_emul_fallback_handler; error = register_mem_fallback(&mr); assert(error == 0); /* PCI extended config space */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI ECFG"; mr.flags = MEM_F_RW; mr.base = PCI_EMUL_ECFG_BASE; mr.size = PCI_EMUL_ECFG_SIZE; mr.handler = pci_emul_ecfg_handler; error = register_mem(&mr); assert(error == 0); return 0; pci_emul_init_fail: for (bus = 0; bus < MAXBUSES && success_cnt > 0; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; for (slot = 0; slot < MAXSLOTS && success_cnt > 0; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_name == NULL) continue; if (success_cnt-- <= 0) break; ops = pci_emul_finddev(fi->fi_name); assert(ops != NULL); pci_emul_deinit(ctx, ops, bus, slot, func, fi); } } } return error; } void deinit_pci(struct vmctx *ctx) { struct pci_vdev_ops *ops; struct businfo *bi; struct slotinfo *si; struct funcinfo *fi; int bus, slot, func; size_t lowmem; struct mem_range mr; /* Release PCI extended config space */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI ECFG"; mr.base = PCI_EMUL_ECFG_BASE; mr.size = PCI_EMUL_ECFG_SIZE; unregister_mem(&mr); /* Release PCI hole space */ lowmem = vm_get_lowmem_size(ctx); bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (32-bit)"; mr.base = lowmem; mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; unregister_mem_fallback(&mr); /* ditto for the 64-bit PCI host aperture */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (64-bit)"; mr.base = PCI_EMUL_MEMBASE64; mr.size = PCI_EMUL_MEMLIMIT64 - PCI_EMUL_MEMBASE64; unregister_mem_fallback(&mr); for (bus = 0; bus < MAXBUSES; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_name == NULL) continue; ops = pci_emul_finddev(fi->fi_name); assert(ops != NULL); pci_emul_deinit(ctx, ops, bus, slot, func, fi); } } } } static void pci_apic_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, void *arg) { dsdt_line(" Package ()"); dsdt_line(" {"); dsdt_line(" 0x%X,", slot << 16 | 0xffff); dsdt_line(" 0x%02X,", pin - 1); dsdt_line(" Zero,"); dsdt_line(" 0x%X", ioapic_irq); dsdt_line(" },"); } static void pci_pirq_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, void *arg) { char *name; name = lpc_pirq_name(pirq_pin); if (name == NULL) return; dsdt_line(" Package ()"); dsdt_line(" {"); dsdt_line(" 0x%X,", slot << 16 | 0xffff); dsdt_line(" 0x%02X,", pin - 1); dsdt_line(" %s,", name); dsdt_line(" 0x00"); dsdt_line(" },"); free(name); } /* * A acrn-dm virtual machine has a flat PCI hierarchy with a root port * corresponding to each PCI bus. */ static void pci_bus_write_dsdt(int bus) { struct businfo *bi; struct slotinfo *si; struct pci_vdev *dev; int count, func, slot; /* * If there are no devices on this 'bus' then just return. */ bi = pci_businfo[bus]; if (bi == NULL) { /* * Bus 0 is special because it decodes the I/O ports used * for PCI config space access even if there are no devices * on it. */ if (bus != 0) return; } dsdt_line(" Device (PCI%01X)", bus); dsdt_line(" {"); dsdt_line(" Name (_HID, EisaId (\"PNP0A03\"))"); dsdt_line(" Name (_ADR, Zero)"); dsdt_line(" Method (_BBN, 0, NotSerialized)"); dsdt_line(" {"); dsdt_line(" Return (0x%08X)", bus); dsdt_line(" }"); dsdt_line(" Name (_CRS, ResourceTemplate ()"); dsdt_line(" {"); dsdt_line(" WordBusNumber (ResourceProducer, MinFixed, " "MaxFixed, PosDecode,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x%04X, // Range Minimum", bus); dsdt_line(" 0x%04X, // Range Maximum", bus); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x0001, // Length"); dsdt_line(" ,, )"); if (bus == 0) { dsdt_indent(3); dsdt_fixed_ioport(0xCF8, 8); dsdt_unindent(3); dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " "PosDecode, EntireRange,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x0000, // Range Minimum"); dsdt_line(" 0x0CF7, // Range Maximum"); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x0CF8, // Length"); dsdt_line(" ,, , TypeStatic)"); dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " "PosDecode, EntireRange,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x0D00, // Range Minimum"); dsdt_line(" 0x%04X, // Range Maximum", PCI_EMUL_IOBASE - 1); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x%04X, // Length", PCI_EMUL_IOBASE - 0x0D00); dsdt_line(" ,, , TypeStatic)"); if (bi == NULL) { dsdt_line(" })"); goto done; } } assert(bi != NULL); /* i/o window */ dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " "PosDecode, EntireRange,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x%04X, // Range Minimum", bi->iobase); dsdt_line(" 0x%04X, // Range Maximum", bi->iolimit - 1); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x%04X, // Length", bi->iolimit - bi->iobase); dsdt_line(" ,, , TypeStatic)"); /* mmio window (32-bit) */ dsdt_line(" DWordMemory (ResourceProducer, PosDecode, " "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); dsdt_line(" 0x00000000, // Granularity"); dsdt_line(" 0x%08X, // Range Minimum\n", bi->membase32); dsdt_line(" 0x%08X, // Range Maximum\n", bi->memlimit32 - 1); dsdt_line(" 0x00000000, // Translation Offset"); dsdt_line(" 0x%08X, // Length\n", bi->memlimit32 - bi->membase32); dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); /* mmio window (64-bit) */ dsdt_line(" QWordMemory (ResourceProducer, PosDecode, " "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); dsdt_line(" 0x0000000000000000, // Granularity"); dsdt_line(" 0x%016lX, // Range Minimum\n", bi->membase64); dsdt_line(" 0x%016lX, // Range Maximum\n", bi->memlimit64 - 1); dsdt_line(" 0x0000000000000000, // Translation Offset"); dsdt_line(" 0x%016lX, // Length\n", bi->memlimit64 - bi->membase64); dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); dsdt_line(" })"); count = pci_count_lintr(bus); if (count != 0) { dsdt_indent(2); dsdt_line("Name (PPRT, Package ()"); dsdt_line("{"); pci_walk_lintr(bus, pci_pirq_prt_entry, NULL); dsdt_line("})"); dsdt_line("Name (APRT, Package ()"); dsdt_line("{"); pci_walk_lintr(bus, pci_apic_prt_entry, NULL); dsdt_line("})"); dsdt_line("Method (_PRT, 0, NotSerialized)"); dsdt_line("{"); dsdt_line(" If (PICM)"); dsdt_line(" {"); dsdt_line(" Return (APRT)"); dsdt_line(" }"); dsdt_line(" Else"); dsdt_line(" {"); dsdt_line(" Return (PPRT)"); dsdt_line(" }"); dsdt_line("}"); dsdt_unindent(2); } dsdt_indent(2); for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { dev = si->si_funcs[func].fi_devi; if (dev != NULL && dev->dev_ops->vdev_write_dsdt != NULL) dev->dev_ops->vdev_write_dsdt(dev); } } dsdt_unindent(2); done: dsdt_line(" }"); } void pci_write_dsdt(void) { int bus; dsdt_indent(1); dsdt_line("Name (PICM, 0x00)"); dsdt_line("Method (_PIC, 1, NotSerialized)"); dsdt_line("{"); dsdt_line(" Store (Arg0, PICM)"); dsdt_line("}"); dsdt_line(""); dsdt_line("Scope (_SB)"); dsdt_line("{"); for (bus = 0; bus < MAXBUSES; bus++) pci_bus_write_dsdt(bus); dsdt_line("}"); dsdt_unindent(1); } int pci_bus_configured(int bus) { assert(bus >= 0 && bus < MAXBUSES); return (pci_businfo[bus] != NULL); } int pci_msi_enabled(struct pci_vdev *dev) { return dev->msi.enabled; } int pci_msi_maxmsgnum(struct pci_vdev *dev) { if (dev->msi.enabled) return dev->msi.maxmsgnum; else return 0; } int pci_msix_enabled(struct pci_vdev *dev) { return (dev->msix.enabled && !dev->msi.enabled); } /** * @brief Generate a MSI-X interrupt to guest * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * @param index MSIx table entry index. * * @return None */ void pci_generate_msix(struct pci_vdev *dev, int index) { struct msix_table_entry *mte; if (!pci_msix_enabled(dev)) return; if (dev->msix.function_mask) return; if (index >= dev->msix.table_count) return; mte = &dev->msix.table[index]; if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { /* XXX Set PBA bit if interrupt is disabled */ vm_lapic_msi(dev->vmctx, mte->addr, mte->msg_data); } } /** * @brief Generate a MSI interrupt to guest * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * @param index Message data index. * * @return None */ void pci_generate_msi(struct pci_vdev *dev, int index) { if (pci_msi_enabled(dev) && index < pci_msi_maxmsgnum(dev)) { vm_lapic_msi(dev->vmctx, dev->msi.addr, dev->msi.msg_data + index); } } static bool pci_lintr_permitted(struct pci_vdev *dev) { uint16_t cmd; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); return (!(dev->msi.enabled || dev->msix.enabled || (cmd & PCIM_CMD_INTxDIS))); } void pci_lintr_request(struct pci_vdev *dev) { struct businfo *bi; struct slotinfo *si; int bestpin, bestcount, pin; bi = pci_businfo[dev->bus]; assert(bi != NULL); /* * Just allocate a pin from our slot. The pin will be * assigned IRQs later when interrupts are routed. */ si = &bi->slotinfo[dev->slot]; bestpin = 0; bestcount = si->si_intpins[0].ii_count; for (pin = 1; pin < 4; pin++) { if (si->si_intpins[pin].ii_count < bestcount) { bestpin = pin; bestcount = si->si_intpins[pin].ii_count; } } si->si_intpins[bestpin].ii_count++; dev->lintr.pin = bestpin + 1; pci_set_cfgdata8(dev, PCIR_INTPIN, bestpin + 1); } void pci_lintr_release(struct pci_vdev *dev) { struct businfo *bi; struct slotinfo *si; int pin; bi = pci_businfo[dev->bus]; assert(bi != NULL); si = &bi->slotinfo[dev->slot]; for (pin = 1; pin < 4; pin++) { si->si_intpins[pin].ii_count = 0; si->si_intpins[pin].ii_pirq_pin = 0; si->si_intpins[pin].ii_ioapic_irq = 0; } } static void pci_lintr_route(struct pci_vdev *dev) { struct businfo *bi; struct intxinfo *ii; if (dev->lintr.pin == 0) return; bi = pci_businfo[dev->bus]; assert(bi != NULL); ii = &bi->slotinfo[dev->slot].si_intpins[dev->lintr.pin - 1]; /* * Attempt to allocate an I/O APIC pin for this intpin if one * is not yet assigned. */ if (ii->ii_ioapic_irq == 0) ii->ii_ioapic_irq = ioapic_pci_alloc_irq(dev); assert(ii->ii_ioapic_irq > 0); /* * Attempt to allocate a PIRQ pin for this intpin if one is * not yet assigned. */ if (ii->ii_pirq_pin == 0) ii->ii_pirq_pin = pirq_alloc_pin(dev); assert(ii->ii_pirq_pin > 0); dev->lintr.ioapic_irq = ii->ii_ioapic_irq; dev->lintr.pirq_pin = ii->ii_pirq_pin; pci_set_cfgdata8(dev, PCIR_INTLINE, pirq_irq(ii->ii_pirq_pin)); } /** * @brief Assert INTx pin of virtual PCI device * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * * @return None */ void pci_lintr_assert(struct pci_vdev *dev) { assert(dev->lintr.pin > 0); pthread_mutex_lock(&dev->lintr.lock); if (dev->lintr.state == IDLE) { if (pci_lintr_permitted(dev)) { dev->lintr.state = ASSERTED; pci_irq_assert(dev); } else dev->lintr.state = PENDING; } pthread_mutex_unlock(&dev->lintr.lock); } /** * @brief Deassert INTx pin of virtual PCI device * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * * @return None */ void pci_lintr_deassert(struct pci_vdev *dev) { assert(dev->lintr.pin > 0); pthread_mutex_lock(&dev->lintr.lock); if (dev->lintr.state == ASSERTED) { dev->lintr.state = IDLE; pci_irq_deassert(dev); } else if (dev->lintr.state == PENDING) dev->lintr.state = IDLE; pthread_mutex_unlock(&dev->lintr.lock); } static void pci_lintr_update(struct pci_vdev *dev) { pthread_mutex_lock(&dev->lintr.lock); if (dev->lintr.state == ASSERTED && !pci_lintr_permitted(dev)) { pci_irq_deassert(dev); dev->lintr.state = PENDING; } else if (dev->lintr.state == PENDING && pci_lintr_permitted(dev)) { dev->lintr.state = ASSERTED; pci_irq_assert(dev); } pthread_mutex_unlock(&dev->lintr.lock); } int pci_count_lintr(int bus) { int count, slot, pin; struct slotinfo *slotinfo; count = 0; if (pci_businfo[bus] != NULL) { for (slot = 0; slot < MAXSLOTS; slot++) { slotinfo = &pci_businfo[bus]->slotinfo[slot]; for (pin = 0; pin < 4; pin++) { if (slotinfo->si_intpins[pin].ii_count != 0) count++; } } } return count; } void pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg) { struct businfo *bi; struct slotinfo *si; struct intxinfo *ii; int slot, pin; bi = pci_businfo[bus]; if (bi == NULL) return; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (pin = 0; pin < 4; pin++) { ii = &si->si_intpins[pin]; if (ii->ii_count != 0) cb(bus, slot, pin + 1, ii->ii_pirq_pin, ii->ii_ioapic_irq, arg); } } } /* * Return 1 if the emulated device in 'slot' is a multi-function device. * Return 0 otherwise. */ static int pci_emul_is_mfdev(int bus, int slot) { struct businfo *bi; struct slotinfo *si; int f, numfuncs; numfuncs = 0; bi = pci_businfo[bus]; if (bi != NULL) { si = &bi->slotinfo[slot]; for (f = 0; f < MAXFUNCS; f++) { if (si->si_funcs[f].fi_devi != NULL) numfuncs++; } } return (numfuncs > 1); } /* * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on * whether or not is a multi-function being emulated in the pci 'slot'. */ static void pci_emul_hdrtype_fixup(int bus, int slot, int off, int bytes, uint32_t *rv) { int mfdev; if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { mfdev = pci_emul_is_mfdev(bus, slot); switch (bytes) { case 1: case 2: *rv &= ~PCIM_MFDEV; if (mfdev) *rv |= PCIM_MFDEV; break; case 4: *rv &= ~(PCIM_MFDEV << 16); if (mfdev) *rv |= (PCIM_MFDEV << 16); break; } } } static void pci_emul_cmdsts_write(struct pci_vdev *dev, int coff, uint32_t new, int bytes) { int i, rshift; uint32_t cmd, cmd2, changed, old, readonly; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); /* stash old value */ /* * From PCI Local Bus Specification 3.0 sections 6.2.2 and 6.2.3. * * XXX Bits 8, 11, 12, 13, 14 and 15 in the status register are * 'write 1 to clear'. However these bits are not set to '1' by * any device emulation so it is simpler to treat them as readonly. */ rshift = (coff & 0x3) * 8; readonly = 0xFFFFF880 >> rshift; old = CFGREAD(dev, coff, bytes); new &= ~readonly; new |= (old & readonly); CFGWRITE(dev, coff, new, bytes); /* update config */ cmd2 = pci_get_cfgdata16(dev, PCIR_COMMAND); /* get updated value */ changed = cmd ^ cmd2; /* * If the MMIO or I/O address space decoding has changed then * register/unregister all BARs that decode that address space. */ for (i = 0; i <= PCI_BARMAX; i++) { switch (dev->bar[i].type) { case PCIBAR_NONE: case PCIBAR_MEMHI64: break; case PCIBAR_IO: /* I/O address space decoding changed? */ if (changed & PCIM_CMD_PORTEN) { if (porten(dev)) register_bar(dev, i); else unregister_bar(dev, i); } break; case PCIBAR_MEM32: case PCIBAR_MEM64: /* MMIO address space decoding changed? */ if (changed & PCIM_CMD_MEMEN) { if (memen(dev)) register_bar(dev, i); else unregister_bar(dev, i); } break; default: assert(0); } } /* * If INTx has been unmasked and is pending, assert the * interrupt. */ pci_lintr_update(dev); } static void pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, int coff, int bytes, uint32_t *eax) { struct businfo *bi; struct slotinfo *si; struct pci_vdev *dev; struct pci_vdev_ops *ops; int idx, needcfg; uint64_t addr, bar, mask; bi = pci_businfo[bus]; if (bi != NULL) { si = &bi->slotinfo[slot]; dev = si->si_funcs[func].fi_devi; } else dev = NULL; /* * Just return if there is no device at this slot:func or if the * the guest is doing an un-aligned access. */ if (dev == NULL || (bytes != 1 && bytes != 2 && bytes != 4) || (coff & (bytes - 1)) != 0) { if (in) *eax = 0xffffffff; return; } ops = dev->dev_ops; /* * For non-passthru device, extended config space is NOT supported. * Ignore all writes beyond the standard config space and return all * ones on reads. * * For passthru device, extended config space is supported. * Access to extended config space is implemented via libpciaccess. */ if (strcmp("passthru", ops->class_name)) { if (coff >= PCI_REGMAX + 1) { if (in) { *eax = 0xffffffff; /* * Extended capabilities begin at offset 256 in * config space. * Absence of extended capabilities is signaled * with all 0s in the extended capability header * at offset 256. */ if (coff <= PCI_REGMAX + 4) *eax = 0x00000000; } return; } } /* * Config read */ if (in) { /* Let the device emulation override the default handler */ if (ops->vdev_cfgread != NULL) { needcfg = ops->vdev_cfgread(ctx, vcpu, dev, coff, bytes, eax); } else { needcfg = 1; } if (needcfg) *eax = CFGREAD(dev, coff, bytes); pci_emul_hdrtype_fixup(bus, slot, coff, bytes, eax); } else { /* Let the device emulation override the default handler */ if (ops->vdev_cfgwrite != NULL && (*ops->vdev_cfgwrite)(ctx, vcpu, dev, coff, bytes, *eax) == 0) return; /* * Special handling for write to BAR registers */ if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) { /* * Ignore writes to BAR registers that are not * 4-byte aligned. */ if (bytes != 4 || (coff & 0x3) != 0) return; idx = (coff - PCIR_BAR(0)) / 4; mask = ~(dev->bar[idx].size - 1); switch (dev->bar[idx].type) { case PCIBAR_NONE: dev->bar[idx].addr = bar = 0; break; case PCIBAR_IO: addr = *eax & mask; addr &= 0xffff; bar = addr | PCIM_BAR_IO_SPACE; /* * Register the new BAR value for interception */ if (addr != dev->bar[idx].addr) { update_bar_address(dev, addr, idx, PCIBAR_IO); } break; case PCIBAR_MEM32: addr = bar = *eax & mask; bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; if (addr != dev->bar[idx].addr) { update_bar_address(dev, addr, idx, PCIBAR_MEM32); } break; case PCIBAR_MEM64: addr = bar = *eax & mask; bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | PCIM_BAR_MEM_PREFETCH; if (addr != (uint32_t)dev->bar[idx].addr) { update_bar_address(dev, addr, idx, PCIBAR_MEM64); } break; case PCIBAR_MEMHI64: assert(idx >= 1); mask = ~(dev->bar[idx - 1].size - 1); addr = ((uint64_t)*eax << 32) & mask; bar = addr >> 32; if (bar != dev->bar[idx - 1].addr >> 32) { update_bar_address(dev, addr, idx - 1, PCIBAR_MEMHI64); } break; default: assert(0); } pci_set_cfgdata32(dev, coff, bar); } else if (coff == PCIR_BIOS) { /* ignore ROM BAR length request */ } else if (pci_emul_iscap(dev, coff)) { pci_emul_capwrite(dev, coff, bytes, *eax); } else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) { pci_emul_cmdsts_write(dev, coff, *eax, bytes); } else { CFGWRITE(dev, coff, *eax, bytes); } } } static int cfgenable, cfgbus, cfgslot, cfgfunc, cfgoff; static int pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes, uint32_t *eax, void *arg) { uint32_t x; if (bytes != 4) { if (in) *eax = (bytes == 2) ? 0xffff : 0xff; return 0; } if (in) { x = (cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) | cfgoff; if (cfgenable) x |= CONF1_ENABLE; *eax = x; } else { x = *eax; cfgenable = (x & CONF1_ENABLE) == CONF1_ENABLE; cfgoff = x & PCI_REGMAX; cfgfunc = (x >> 8) & PCI_FUNCMAX; cfgslot = (x >> 11) & PCI_SLOTMAX; cfgbus = (x >> 16) & PCI_BUSMAX; } return 0; } INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_INOUT, pci_emul_cfgaddr); static int pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes, uint32_t *eax, void *arg) { int coff; assert(bytes == 1 || bytes == 2 || bytes == 4); coff = cfgoff + (port - CONF1_DATA_PORT); if (cfgenable) { pci_cfgrw(ctx, vcpu, in, cfgbus, cfgslot, cfgfunc, coff, bytes, eax); } else { /* Ignore accesses to cfgdata if not enabled by cfgaddr */ if (in) *eax = 0xffffffff; } return 0; } INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); int emulate_pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, int reg, int bytes, int *value) { pci_cfgrw(ctx, vcpu, in, bus, slot, func, reg, bytes, (uint32_t *)value); return 0; } #define PCI_EMUL_TEST #ifdef PCI_EMUL_TEST /* * Define a dummy test device */ #define DIOSZ 8 #define DMEMSZ 4096 struct pci_emul_dummy { uint8_t ioregs[DIOSZ]; uint8_t memregs[2][DMEMSZ]; }; #define PCI_EMUL_MSI_MSGS 4 #define PCI_EMUL_MSIX_MSGS 16 static int pci_emul_dinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts) { int error; struct pci_emul_dummy *dummy; dummy = calloc(1, sizeof(struct pci_emul_dummy)); dev->arg = dummy; pci_set_cfgdata16(dev, PCIR_DEVICE, 0x0001); pci_set_cfgdata16(dev, PCIR_VENDOR, 0x10DD); pci_set_cfgdata8(dev, PCIR_CLASS, 0x02); error = pci_emul_add_msicap(dev, PCI_EMUL_MSI_MSGS); assert(error == 0); error = pci_emul_alloc_bar(dev, 0, PCIBAR_IO, DIOSZ); assert(error == 0); error = pci_emul_alloc_bar(dev, 1, PCIBAR_MEM32, DMEMSZ); assert(error == 0); error = pci_emul_alloc_bar(dev, 2, PCIBAR_MEM32, DMEMSZ); assert(error == 0); return 0; } static void pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx, uint64_t offset, int size, uint64_t value) { int i; void *offset_ptr; struct pci_emul_dummy *dummy = dev->arg; if (baridx == 0) { if (offset + size > DIOSZ) { printf("diow: iow too large, offset %ld size %d\n", offset, size); return; } offset_ptr = (void *) &dummy->ioregs[offset]; if (size == 1) *(uint8_t *)offset_ptr = value & 0xff; else if (size == 2) *(uint16_t *)offset_ptr = value & 0xffff; else if (size == 4) *(uint32_t *)offset = value; else printf("diow: iow unknown size %d\n", size); /* * Special magic value to generate an interrupt */ if (offset == 4 && size == 4 && pci_msi_enabled(dev)) pci_generate_msi(dev, value % pci_msi_maxmsgnum(dev)); if (value == 0xabcdef) { for (i = 0; i < pci_msi_maxmsgnum(dev); i++) pci_generate_msi(dev, i); } } if (baridx == 1 || baridx == 2) { if (offset + size > DMEMSZ) { printf("diow: memw too large, offset %ld size %d\n", offset, size); return; } i = baridx - 1; /* 'memregs' index */ offset_ptr = (void *) &dummy->memregs[i][offset]; if (size == 1) *(uint8_t *)offset_ptr = value; else if (size == 2) *(uint16_t *)offset_ptr = value; else if (size == 4) *(uint32_t *)offset_ptr = value; else if (size == 8) *(uint64_t *)offset_ptr = value; else printf("diow: memw unknown size %d\n", size); /* * magic interrupt ?? */ } if (baridx > 2 || baridx < 0) printf("diow: unknown bar idx %d\n", baridx); } static uint64_t pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx, uint64_t offset, int size) { struct pci_emul_dummy *dummy = dev->arg; uint32_t value = 0; int i; void *offset_ptr; if (baridx == 0) { if (offset + size > DIOSZ) { printf("dior: ior too large, offset %ld size %d\n", offset, size); return 0; } value = 0; offset_ptr = (void *) &dummy->ioregs[offset]; if (size == 1) value = *(uint8_t *)offset_ptr; else if (size == 2) value = *(uint16_t *)offset_ptr; else if (size == 4) value = *(uint32_t *)offset_ptr; else printf("dior: ior unknown size %d\n", size); } if (baridx == 1 || baridx == 2) { if (offset + size > DMEMSZ) { printf("dior: memr too large, offset %ld size %d\n", offset, size); return 0; } i = baridx - 1; /* 'memregs' index */ offset_ptr = (void *) &dummy->memregs[i][offset]; if (size == 1) value = *(uint8_t *)offset_ptr; else if (size == 2) value = *(uint16_t *)offset_ptr; else if (size == 4) value = *(uint32_t *)offset_ptr; else if (size == 8) value = *(uint64_t *)offset_ptr; else printf("dior: ior unknown size %d\n", size); } if (baridx > 2 || baridx < 0) { printf("dior: unknown bar idx %d\n", baridx); return 0; } return value; } struct pci_vdev_ops pci_dummy = { .class_name = "dummy", .vdev_init = pci_emul_dinit, .vdev_barwrite = pci_emul_diow, .vdev_barread = pci_emul_dior }; DEFINE_PCI_DEVTYPE(pci_dummy); #endif /* PCI_EMUL_TEST */
./CrossVul/dataset_final_sorted/CWE-617/c/good_1219_0
crossvul-cpp_data_bad_1771_0
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "trace.h" #include <stdio.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <sys/stat.h> #include <fcntl.h> #include <time.h> #include <libgen.h> #include <assert.h> #include <sys/utsname.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/socket.h> #include <sys/select.h> #include <sys/time.h> #include <sys/ioctl.h> #include <arpa/inet.h> #include <netinet/if_ether.h> #include <pwd.h> #include <grp.h> static void usage(void); static struct protocol protos[] = { { LLDPD_MODE_LLDP, 1, "LLDP", 'l', lldp_send, lldp_decode, NULL, LLDP_MULTICAST_ADDR }, #ifdef ENABLE_CDP { LLDPD_MODE_CDPV1, 0, "CDPv1", 'c', cdpv1_send, cdp_decode, cdpv1_guess, CDP_MULTICAST_ADDR }, { LLDPD_MODE_CDPV2, 0, "CDPv2", 'c', cdpv2_send, cdp_decode, cdpv2_guess, CDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_SONMP { LLDPD_MODE_SONMP, 0, "SONMP", 's', sonmp_send, sonmp_decode, NULL, SONMP_MULTICAST_ADDR }, #endif #ifdef ENABLE_EDP { LLDPD_MODE_EDP, 0, "EDP", 'e', edp_send, edp_decode, NULL, EDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_FDP { LLDPD_MODE_FDP, 0, "FDP", 'f', fdp_send, cdp_decode, NULL, FDP_MULTICAST_ADDR }, #endif { 0, 0, "any", ' ', NULL, NULL, NULL, {0,0,0,0,0,0} } }; static char **saved_argv; #ifdef HAVE___PROGNAME extern const char *__progname; #else # define __progname "lldpd" #endif static void usage(void) { fprintf(stderr, "Usage: %s [OPTIONS ...]\n", __progname); fprintf(stderr, "Version: %s\n", PACKAGE_STRING); fprintf(stderr, "\n"); fprintf(stderr, "-d Do not daemonize.\n"); fprintf(stderr, "-r Receive-only mode\n"); fprintf(stderr, "-i Disable LLDP-MED inventory TLV transmission.\n"); fprintf(stderr, "-k Disable advertising of kernel release, version, machine.\n"); fprintf(stderr, "-S descr Override the default system description.\n"); fprintf(stderr, "-P name Override the default hardware platform.\n"); fprintf(stderr, "-m IP Specify the IPv4 management addresses of this system.\n"); fprintf(stderr, "-u file Specify the Unix-domain socket used for communication with lldpctl(8).\n"); fprintf(stderr, "-H mode Specify the behaviour when detecting multiple neighbors.\n"); fprintf(stderr, "-I iface Limit interfaces to use.\n"); #ifdef ENABLE_LLDPMED fprintf(stderr, "-M class Enable emission of LLDP-MED frame. 'class' should be one of:\n"); fprintf(stderr, " 1 Generic Endpoint (Class I)\n"); fprintf(stderr, " 2 Media Endpoint (Class II)\n"); fprintf(stderr, " 3 Communication Device Endpoints (Class III)\n"); fprintf(stderr, " 4 Network Connectivity Device\n"); #endif #ifdef USE_SNMP fprintf(stderr, "-x Enable SNMP subagent.\n"); #endif fprintf(stderr, "\n"); #if defined ENABLE_CDP || defined ENABLE_EDP || defined ENABLE_FDP || defined ENABLE_SONMP fprintf(stderr, "Additional protocol support.\n"); #ifdef ENABLE_CDP fprintf(stderr, "-c Enable the support of CDP protocol. (Cisco)\n"); #endif #ifdef ENABLE_EDP fprintf(stderr, "-e Enable the support of EDP protocol. (Extreme)\n"); #endif #ifdef ENABLE_FDP fprintf(stderr, "-f Enable the support of FDP protocol. (Foundry)\n"); #endif #ifdef ENABLE_SONMP fprintf(stderr, "-s Enable the support of SONMP protocol. (Nortel)\n"); #endif fprintf(stderr, "\n"); #endif fprintf(stderr, "see manual page lldpd(8) for more information\n"); exit(1); } struct lldpd_hardware * lldpd_get_hardware(struct lldpd *cfg, char *name, int index, struct lldpd_ops *ops) { struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if ((strcmp(hardware->h_ifname, name) == 0) && (hardware->h_ifindex == index) && ((!ops) || (ops == hardware->h_ops))) break; } return hardware; } /** * Allocate the default local port. This port will be cloned each time we need a * new local port. */ static void lldpd_alloc_default_local_port(struct lldpd *cfg) { struct lldpd_port *port; if ((port = (struct lldpd_port *) calloc(1, sizeof(struct lldpd_port))) == NULL) fatal("main", NULL); #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); TAILQ_INIT(&port->p_ppvids); TAILQ_INIT(&port->p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&port->p_custom_list); #endif cfg->g_default_local_port = port; } /** * Clone a given port. The destination needs to be already allocated. */ static int lldpd_clone_port(struct lldpd_port *destination, struct lldpd_port *source) { u_int8_t *output = NULL; ssize_t output_len; struct lldpd_port *cloned = NULL; output_len = lldpd_port_serialize(source, (void**)&output); if (output_len == -1 || lldpd_port_unserialize(output, output_len, &cloned) <= 0) { log_warnx("alloc", "unable to clone default port"); free(output); return -1; } memcpy(destination, cloned, sizeof(struct lldpd_port)); free(cloned); free(output); #ifdef ENABLE_DOT1 marshal_repair_tailq(lldpd_vlan, &destination->p_vlans, v_entries); marshal_repair_tailq(lldpd_ppvid, &destination->p_ppvids, p_entries); marshal_repair_tailq(lldpd_pi, &destination->p_pids, p_entries); #endif #ifdef ENABLE_CUSTOM marshal_repair_tailq(lldpd_custom, &destination->p_custom_list, next); #endif return 0; } struct lldpd_hardware * lldpd_alloc_hardware(struct lldpd *cfg, char *name, int index) { struct lldpd_hardware *hardware; log_debug("alloc", "allocate a new local port (%s)", name); if ((hardware = (struct lldpd_hardware *) calloc(1, sizeof(struct lldpd_hardware))) == NULL) return NULL; /* Clone default local port */ if (lldpd_clone_port(&hardware->h_lport, cfg->g_default_local_port) == -1) { log_warnx("alloc", "unable to clone default port"); free(hardware); return NULL; } hardware->h_cfg = cfg; strlcpy(hardware->h_ifname, name, sizeof(hardware->h_ifname)); hardware->h_ifindex = index; hardware->h_lport.p_chassis = LOCAL_CHASSIS(cfg); hardware->h_lport.p_chassis->c_refcount++; TAILQ_INIT(&hardware->h_rports); #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_med_cap_available) { hardware->h_lport.p_med_cap_enabled = LLDP_MED_CAP_CAP; if (!cfg->g_config.c_noinventory) hardware->h_lport.p_med_cap_enabled |= LLDP_MED_CAP_IV; } #endif levent_hardware_init(hardware); return hardware; } struct lldpd_mgmt * lldpd_alloc_mgmt(int family, void *addrptr, size_t addrsize, u_int32_t iface) { struct lldpd_mgmt *mgmt; log_debug("alloc", "allocate a new management address (family: %d)", family); if (family <= LLDPD_AF_UNSPEC || family >= LLDPD_AF_LAST) { errno = EAFNOSUPPORT; return NULL; } if (addrsize > LLDPD_MGMT_MAXADDRSIZE) { errno = EOVERFLOW; return NULL; } mgmt = calloc(1, sizeof(struct lldpd_mgmt)); if (mgmt == NULL) { errno = ENOMEM; return NULL; } mgmt->m_family = family; assert(addrsize <= LLDPD_MGMT_MAXADDRSIZE); memcpy(&mgmt->m_addr, addrptr, addrsize); mgmt->m_addrsize = addrsize; mgmt->m_iface = iface; return mgmt; } void lldpd_hardware_cleanup(struct lldpd *cfg, struct lldpd_hardware *hardware) { log_debug("alloc", "cleanup hardware port %s", hardware->h_ifname); free(hardware->h_lport_previous); free(hardware->h_lchassis_previous_id); free(hardware->h_lport_previous_id); lldpd_port_cleanup(&hardware->h_lport, 1); if (hardware->h_ops && hardware->h_ops->cleanup) hardware->h_ops->cleanup(cfg, hardware); levent_hardware_release(hardware); free(hardware); } static void lldpd_display_neighbors(struct lldpd *cfg) { if (!cfg->g_config.c_set_ifdescr) return; struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { struct lldpd_port *port; char *description; const char *neighbor = NULL; unsigned neighbors = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (SMART_HIDDEN(port)) continue; neighbors++; neighbor = port->p_chassis->c_name; } if (neighbors == 0) priv_iface_description(hardware->h_ifname, ""); else if (neighbors == 1 && neighbor && *neighbor != '\0') { if (asprintf(&description, "%s", neighbor) != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } else { if (asprintf(&description, "%d neighbor%s", neighbors, (neighbors > 1)?"s":"") != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } } } static void lldpd_count_neighbors(struct lldpd *cfg) { #if HAVE_SETPROCTITLE struct lldpd_chassis *chassis; const char *neighbor; unsigned neighbors = 0; TAILQ_FOREACH(chassis, &cfg->g_chassis, c_entries) { neighbors++; neighbor = chassis->c_name; } neighbors--; if (neighbors == 0) setproctitle("no neighbor"); else if (neighbors == 1 && neighbor && *neighbor != '\0') setproctitle("connected to %s", neighbor); else setproctitle("%d neighbor%s", neighbors, (neighbors > 1)?"s":""); #endif lldpd_display_neighbors(cfg); } static void notify_clients_deletion(struct lldpd_hardware *hardware, struct lldpd_port *rport) { TRACE(LLDPD_NEIGHBOR_DELETE(hardware->h_ifname, rport->p_chassis->c_name, rport->p_descr)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_DELETED, rport); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_DELETED, rport); #endif } static void lldpd_reset_timer(struct lldpd *cfg) { /* Reset timer for ports that have been changed. */ struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { /* We keep a flat copy of the local port to see if there is any * change. To do this, we zero out fields that are not * significant, marshal the port, then restore. */ struct lldpd_port *port = &hardware->h_lport; /* Take the current flags into account to detect a change. */ port->_p_hardware_flags = hardware->h_flags; u_int8_t *output = NULL; ssize_t output_len; char save[LLDPD_PORT_START_MARKER]; memcpy(save, port, sizeof(save)); /* coverity[suspicious_sizeof] We intentionally partially memset port */ memset(port, 0, sizeof(save)); output_len = lldpd_port_serialize(port, (void**)&output); memcpy(port, save, sizeof(save)); if (output_len == -1) { log_warnx("localchassis", "unable to serialize local port %s to check for differences", hardware->h_ifname); continue; } /* Compare with the previous value */ if (hardware->h_lport_previous && output_len == hardware->h_lport_previous_len && !memcmp(output, hardware->h_lport_previous, output_len)) { log_debug("localchassis", "no change detected for port %s", hardware->h_ifname); } else { log_debug("localchassis", "change detected for port %s, resetting its timer", hardware->h_ifname); levent_schedule_pdu(hardware); } /* Update the value */ free(hardware->h_lport_previous); hardware->h_lport_previous = output; hardware->h_lport_previous_len = output_len; } } void lldpd_cleanup(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; struct lldpd_chassis *chassis, *chassis_next; log_debug("localchassis", "cleanup all ports"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); if (!hardware->h_flags) { TRACE(LLDPD_INTERFACES_DELETE(hardware->h_ifname)); TAILQ_REMOVE(&cfg->g_hardware, hardware, h_entries); lldpd_remote_cleanup(hardware, notify_clients_deletion, 1); lldpd_hardware_cleanup(cfg, hardware); } else lldpd_remote_cleanup(hardware, notify_clients_deletion, !(hardware->h_flags & IFF_RUNNING)); } log_debug("localchassis", "cleanup all chassis"); for (chassis = TAILQ_FIRST(&cfg->g_chassis); chassis; chassis = chassis_next) { chassis_next = TAILQ_NEXT(chassis, c_entries); if (chassis->c_refcount == 0) { TAILQ_REMOVE(&cfg->g_chassis, chassis, c_entries); lldpd_chassis_cleanup(chassis, 1); } } lldpd_count_neighbors(cfg); levent_schedule_cleanup(cfg); } /* Update chassis `ochassis' with values from `chassis'. The later one is not expected to be part of a list! It will also be wiped from memory. */ static void lldpd_move_chassis(struct lldpd_chassis *ochassis, struct lldpd_chassis *chassis) { struct lldpd_mgmt *mgmt, *mgmt_next; /* We want to keep refcount, index and list stuff from the current * chassis */ TAILQ_ENTRY(lldpd_chassis) entries; int refcount = ochassis->c_refcount; int index = ochassis->c_index; memcpy(&entries, &ochassis->c_entries, sizeof(entries)); lldpd_chassis_cleanup(ochassis, 0); /* Make the copy. */ /* WARNING: this is a kludgy hack, we need in-place copy and cannot use * marshaling. */ memcpy(ochassis, chassis, sizeof(struct lldpd_chassis)); TAILQ_INIT(&ochassis->c_mgmt); /* Copy of management addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); TAILQ_INSERT_TAIL(&ochassis->c_mgmt, mgmt, m_entries); } /* Restore saved values */ ochassis->c_refcount = refcount; ochassis->c_index = index; memcpy(&ochassis->c_entries, &entries, sizeof(entries)); /* Get rid of the new chassis */ free(chassis); } static int lldpd_guess_type(struct lldpd *cfg, char *frame, int s) { int i; if (s < ETHER_ADDR_LEN) return -1; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].guess == NULL) { if (memcmp(frame, cfg->g_protocols[i].mac, ETHER_ADDR_LEN) == 0) { log_debug("decode", "guessed protocol is %s (from MAC address)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } else { if (cfg->g_protocols[i].guess(frame, s)) { log_debug("decode", "guessed protocol is %s (from detector function)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } } return -1; } static void lldpd_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware) { int i; struct lldpd_chassis *chassis, *ochassis = NULL; struct lldpd_port *port, *oport = NULL, *aport; int guess = LLDPD_MODE_LLDP; log_debug("decode", "decode a received frame on %s", hardware->h_ifname); if (s < sizeof(struct ether_header) + 4) /* Too short, just discard it */ return; /* Decapsulate VLAN frames */ struct ether_header eheader; memcpy(&eheader, frame, sizeof(struct ether_header)); if (eheader.ether_type == htons(ETHERTYPE_VLAN)) { /* VLAN decapsulation means to shift 4 bytes left the frame from * offset 2*ETHER_ADDR_LEN */ memmove(frame + 2*ETHER_ADDR_LEN, frame + 2*ETHER_ADDR_LEN + 4, s - 2*ETHER_ADDR_LEN); s -= 4; } TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if ((oport->p_lastframe != NULL) && (oport->p_lastframe->size == s) && (memcmp(oport->p_lastframe->frame, frame, s) == 0)) { /* Already received the same frame */ log_debug("decode", "duplicate frame, no need to decode"); oport->p_lastupdate = time(NULL); return; } } guess = lldpd_guess_type(cfg, frame, s); for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].mode == guess) { log_debug("decode", "using decode function for %s protocol", cfg->g_protocols[i].name); if (cfg->g_protocols[i].decode(cfg, frame, s, hardware, &chassis, &port) == -1) { log_debug("decode", "function for %s protocol did not decode this frame", cfg->g_protocols[i].name); return; } chassis->c_protocol = port->p_protocol = cfg->g_protocols[i].mode; break; } } if (cfg->g_protocols[i].mode == 0) { log_debug("decode", "unable to guess frame type on %s", hardware->h_ifname); return; } TRACE(LLDPD_FRAME_DECODED( hardware->h_ifname, cfg->g_protocols[i].name, chassis->c_name, port->p_descr)); /* Do we already have the same MSAP somewhere? */ int count = 0; log_debug("decode", "search for the same MSAP"); TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (port->p_protocol == oport->p_protocol) { count++; if ((port->p_id_subtype == oport->p_id_subtype) && (port->p_id_len == oport->p_id_len) && (memcmp(port->p_id, oport->p_id, port->p_id_len) == 0) && (chassis->c_id_subtype == oport->p_chassis->c_id_subtype) && (chassis->c_id_len == oport->p_chassis->c_id_len) && (memcmp(chassis->c_id, oport->p_chassis->c_id, chassis->c_id_len) == 0)) { ochassis = oport->p_chassis; log_debug("decode", "MSAP is already known"); break; } } } /* Do we have room for a new MSAP? */ if (!oport && cfg->g_config.c_max_neighbors) { if (count == (cfg->g_config.c_max_neighbors - 1)) { log_debug("decode", "max neighbors %d reached for port %s, " "dropping any new ones silently", cfg->g_config.c_max_neighbors, hardware->h_ifname); } else if (count > cfg->g_config.c_max_neighbors - 1) { log_debug("decode", "too many neighbors for port %s, drop this new one", hardware->h_ifname); lldpd_port_cleanup(port, 1); lldpd_chassis_cleanup(chassis, 1); free(port); return; } } /* No, but do we already know the system? */ if (!oport) { log_debug("decode", "MSAP is unknown, search for the chassis"); TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) { if ((chassis->c_protocol == ochassis->c_protocol) && (chassis->c_id_subtype == ochassis->c_id_subtype) && (chassis->c_id_len == ochassis->c_id_len) && (memcmp(chassis->c_id, ochassis->c_id, chassis->c_id_len) == 0)) break; } } if (oport) { /* The port is known, remove it before adding it back */ TAILQ_REMOVE(&hardware->h_rports, oport, p_entries); lldpd_port_cleanup(oport, 1); free(oport); } if (ochassis) { lldpd_move_chassis(ochassis, chassis); chassis = ochassis; } else { /* Chassis not known, add it */ log_debug("decode", "unknown chassis, add it to the list"); chassis->c_index = ++cfg->g_lastrid; chassis->c_refcount = 0; TAILQ_INSERT_TAIL(&cfg->g_chassis, chassis, c_entries); i = 0; TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) i++; log_debug("decode", "%d different systems are known", i); } /* Add port */ port->p_lastchange = port->p_lastupdate = time(NULL); if ((port->p_lastframe = (struct lldpd_frame *)malloc(s + sizeof(struct lldpd_frame))) != NULL) { port->p_lastframe->size = s; memcpy(port->p_lastframe->frame, frame, s); } TAILQ_INSERT_TAIL(&hardware->h_rports, port, p_entries); port->p_chassis = chassis; port->p_chassis->c_refcount++; /* Several cases are possible : 1. chassis is new, its refcount was 0. It is now attached to this port, its refcount is 1. 2. chassis already exists and was attached to another port, we increase its refcount accordingly. 3. chassis already exists and was attached to the same port, its refcount was decreased with lldpd_port_cleanup() and is now increased again. In all cases, if the port already existed, it has been freed with lldpd_port_cleanup() and therefore, the refcount of the chassis that was attached to it is decreased. */ /* coverity[use_after_free] TAILQ_REMOVE does the right thing */ i = 0; TAILQ_FOREACH(aport, &hardware->h_rports, p_entries) i++; log_debug("decode", "%d neighbors for %s", i, hardware->h_ifname); if (!oport) hardware->h_insert_cnt++; /* Notify */ log_debug("decode", "send notifications for changes on %s", hardware->h_ifname); if (oport) { TRACE(LLDPD_NEIGHBOR_UPDATE(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_UPDATED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_UPDATED, port); #endif } else { TRACE(LLDPD_NEIGHBOR_NEW(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_ADDED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_ADDED, port); #endif } #ifdef ENABLE_LLDPMED if (!oport && port->p_chassis->c_med_type) { /* New neighbor, fast start */ if (hardware->h_cfg->g_config.c_enable_fast_start && !hardware->h_tx_fast) { log_debug("decode", "%s: entering fast start due to " "new neighbor", hardware->h_ifname); hardware->h_tx_fast = hardware->h_cfg->g_config.c_tx_fast_init; } levent_schedule_pdu(hardware); } #endif return; } /* Get the output of lsb_release -s -d. This is a slow function. It should be called once. It return NULL if any problem happens. Otherwise, this is a statically allocated buffer. The result includes the trailing \n */ static char * lldpd_get_lsb_release() { static char release[1024]; char *const command[] = { "lsb_release", "-s", "-d", NULL }; int pid, status, devnull, count; int pipefd[2]; log_debug("localchassis", "grab LSB release"); if (pipe(pipefd)) { log_warn("localchassis", "unable to get a pair of pipes"); return NULL; } pid = vfork(); switch (pid) { case -1: log_warn("localchassis", "unable to fork"); return NULL; case 0: /* Child, exec lsb_release */ close(pipefd[0]); if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDERR_FILENO); dup2(pipefd[1], STDOUT_FILENO); if (devnull > 2) close(devnull); if (pipefd[1] > 2) close(pipefd[1]); execvp("lsb_release", command); } _exit(127); break; default: /* Father, read the output from the children */ close(pipefd[1]); count = 0; do { status = read(pipefd[0], release+count, sizeof(release)-count); if ((status == -1) && (errno == EINTR)) continue; if (status > 0) count += status; } while (count < sizeof(release) && (status > 0)); if (status < 0) { log_info("localchassis", "unable to read from lsb_release"); close(pipefd[0]); waitpid(pid, &status, 0); return NULL; } close(pipefd[0]); if (count >= sizeof(release)) { log_info("localchassis", "output of lsb_release is too large"); waitpid(pid, &status, 0); return NULL; } status = -1; if (waitpid(pid, &status, 0) != pid) return NULL; if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) { log_info("localchassis", "lsb_release information not available"); return NULL; } if (!count) { log_info("localchassis", "lsb_release returned an empty string"); return NULL; } release[count] = '\0'; return release; } /* Should not be here */ return NULL; } /* Same like lldpd_get_lsb_release but reads /etc/os-release for PRETTY_NAME=. */ static char * lldpd_get_os_release() { static char release[1024]; char line[1024]; char *key, *val; char *ptr1 = release; log_debug("localchassis", "grab OS release"); FILE *fp = fopen("/etc/os-release", "r"); if (!fp) { log_debug("localchassis", "could not open /etc/os-release"); fp = fopen("/usr/lib/os-release", "r"); } if (!fp) { log_info("localchassis", "could not open either /etc/os-release or /usr/lib/os-release"); return NULL; } while ((fgets(line, sizeof(line), fp) != NULL)) { key = strtok(line, "="); val = strtok(NULL, "="); if (strncmp(key, "PRETTY_NAME", sizeof(line)) == 0) { strlcpy(release, val, sizeof(line)); break; } } fclose(fp); /* Remove trailing newline and all " in the string. */ ptr1 = release + strlen(release) - 1; while (ptr1 != release && ((*ptr1 == '"') || (*ptr1 == '\n'))) { *ptr1 = '\0'; ptr1--; } if (release[0] == '"') return release+1; return release; } static void lldpd_hide_ports(struct lldpd *cfg, struct lldpd_hardware *hardware, int mask) { struct lldpd_port *port; int protocols[LLDPD_MODE_MAX+1]; char buffer[256]; int i, j, k, found; unsigned int min; log_debug("smartfilter", "apply smart filter for port %s", hardware->h_ifname); /* Compute the number of occurrences of each protocol */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) protocols[port->p_protocol]++; /* Turn the protocols[] array into an array of enabled/disabled protocols. 1 means enabled, 0 means disabled. */ min = (unsigned int)-1; for (i = 0; i <= LLDPD_MODE_MAX; i++) if (protocols[i] && (protocols[i] < min)) min = protocols[i]; found = 0; for (i = 0; i <= LLDPD_MODE_MAX; i++) if ((protocols[i] == min) && !found) { /* If we need a tie breaker, we take the first protocol only */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_PROTO | SMART_INCOMING_ONE_PROTO)) found = 1; protocols[i] = 1; } else protocols[i] = 0; /* We set the p_hidden flag to 1 if the protocol is disabled */ TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) port->p_hidden_out = protocols[port->p_protocol]?0:1; else port->p_hidden_in = protocols[port->p_protocol]?0:1; } /* If we want only one neighbor, we take the first one */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_NEIGH | SMART_INCOMING_ONE_NEIGH)) { found = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) { if (found) port->p_hidden_out = 1; if (!port->p_hidden_out) found = 1; } if (mask == SMART_INCOMING) { if (found) port->p_hidden_in = 1; if (!port->p_hidden_in) found = 1; } } } /* Print a debug message summarizing the operation */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; k = j = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (!(((mask == SMART_OUTGOING) && port->p_hidden_out) || ((mask == SMART_INCOMING) && port->p_hidden_in))) { k++; protocols[port->p_protocol] = 1; } j++; } buffer[0] = '\0'; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (cfg->g_protocols[i].enabled && protocols[cfg->g_protocols[i].mode]) { if (strlen(buffer) + strlen(cfg->g_protocols[i].name) + 3 > sizeof(buffer)) { /* Unlikely, our buffer is too small */ memcpy(buffer + sizeof(buffer) - 4, "...", 4); break; } if (buffer[0]) strncat(buffer, ", ", 2); strncat(buffer, cfg->g_protocols[i].name, strlen(cfg->g_protocols[i].name)); } } log_debug("smartfilter", "%s: %s: %d visible neighbors (out of %d)", hardware->h_ifname, (mask == SMART_OUTGOING)?"out filter":"in filter", k, j); log_debug("smartfilter", "%s: protocols: %s", hardware->h_ifname, buffer[0]?buffer:"(none)"); } /* Hide unwanted ports depending on smart mode set by the user */ static void lldpd_hide_all(struct lldpd *cfg) { struct lldpd_hardware *hardware; if (!cfg->g_config.c_smart) return; log_debug("smartfilter", "apply smart filter results on all ports"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if (cfg->g_config.c_smart & SMART_INCOMING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_INCOMING); if (cfg->g_config.c_smart & SMART_OUTGOING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_OUTGOING); } } void lldpd_recv(struct lldpd *cfg, struct lldpd_hardware *hardware, int fd) { char *buffer = NULL; int n; log_debug("receive", "receive a frame on %s", hardware->h_ifname); if ((buffer = (char *)malloc(hardware->h_mtu)) == NULL) { log_warn("receive", "failed to alloc reception buffer"); return; } if ((n = hardware->h_ops->recv(cfg, hardware, fd, buffer, hardware->h_mtu)) == -1) { log_debug("receive", "discard frame received on %s", hardware->h_ifname); free(buffer); return; } if (hardware->h_lport.p_disable_rx) { log_debug("receive", "RX disabled, ignore the frame on %s", hardware->h_ifname); free(buffer); return; } if (cfg->g_config.c_paused) { log_debug("receive", "paused, ignore the frame on %s", hardware->h_ifname); free(buffer); return; } hardware->h_rx_cnt++; log_debug("receive", "decode received frame on %s", hardware->h_ifname); TRACE(LLDPD_FRAME_RECEIVED(hardware->h_ifname, buffer, (size_t)n)); lldpd_decode(cfg, buffer, n, hardware); lldpd_hide_all(cfg); /* Immediatly hide */ lldpd_count_neighbors(cfg); free(buffer); } static void lldpd_send_shutdown(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if (hardware->h_lport.p_disable_tx) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; /* It's safe to call `lldp_send_shutdown()` because shutdown LLDPU will * only be emitted if LLDP was sent on that port. */ if (lldp_send_shutdown(hardware->h_cfg, hardware) != 0) log_warnx("send", "unable to send shutdown LLDPDU on %s", hardware->h_ifname); } void lldpd_send(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; struct lldpd_port *port; int i, sent; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if (hardware->h_lport.p_disable_tx) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; log_debug("send", "send PDU on %s", hardware->h_ifname); sent = 0; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; /* We send only if we have at least one remote system * speaking this protocol or if the protocol is forced */ if (cfg->g_protocols[i].enabled > 1) { cfg->g_protocols[i].send(cfg, hardware); sent++; continue; } TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { /* If this remote port is disabled, we don't * consider it */ if (port->p_hidden_out) continue; if (port->p_protocol == cfg->g_protocols[i].mode) { TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "send PDU on %s with protocol %s", hardware->h_ifname, cfg->g_protocols[i].name); cfg->g_protocols[i].send(cfg, hardware); sent++; break; } } } if (!sent) { /* Nothing was sent for this port, let's speak the first * available protocol. */ for (i = 0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "fallback to protocol %s for %s", cfg->g_protocols[i].name, hardware->h_ifname); cfg->g_protocols[i].send(cfg, hardware); break; } if (cfg->g_protocols[i].mode == 0) log_warnx("send", "no protocol enabled, dunno what to send"); } } #ifdef ENABLE_LLDPMED static void lldpd_med(struct lldpd_chassis *chassis) { static short int once = 0; if (!once) { chassis->c_med_hw = dmi_hw(); chassis->c_med_fw = dmi_fw(); chassis->c_med_sn = dmi_sn(); chassis->c_med_manuf = dmi_manuf(); chassis->c_med_model = dmi_model(); chassis->c_med_asset = dmi_asset(); once = 1; } } #endif static int lldpd_routing_enabled(struct lldpd *cfg) { int routing; if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_ROUTER) == 0) return 0; if ((routing = interfaces_routing_enabled(cfg)) == -1) { log_debug("localchassis", "unable to check if routing is enabled"); return 0; } return routing; } static void lldpd_update_localchassis(struct lldpd *cfg) { struct utsname un; char *hp; log_debug("localchassis", "update information for local chassis"); assert(LOCAL_CHASSIS(cfg) != NULL); /* Set system name and description */ if (uname(&un) < 0) fatal("localchassis", "failed to get system information"); if (cfg->g_config.c_hostname) { log_debug("localchassis", "use overridden system name `%s`", cfg->g_config.c_hostname); hp = cfg->g_config.c_hostname; } else { if ((hp = priv_gethostname()) == NULL) fatal("localchassis", "failed to get system name"); } free(LOCAL_CHASSIS(cfg)->c_name); free(LOCAL_CHASSIS(cfg)->c_descr); if ((LOCAL_CHASSIS(cfg)->c_name = strdup(hp)) == NULL) fatal("localchassis", NULL); if (cfg->g_config.c_description) { log_debug("localchassis", "use overridden description `%s`", cfg->g_config.c_description); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_config.c_description) == -1) fatal("localchassis", "failed to set full system description"); } else { if (cfg->g_config.c_advertise_version) { log_debug("localchassis", "advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s %s %s %s %s", cfg->g_lsb_release?cfg->g_lsb_release:"", un.sysname, un.release, un.version, un.machine) == -1) fatal("localchassis", "failed to set full system description"); } else { log_debug("localchassis", "do not advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_lsb_release?cfg->g_lsb_release:un.sysname) == -1) fatal("localchassis", "failed to set minimal system description"); } } if (cfg->g_config.c_platform == NULL) cfg->g_config.c_platform = strdup(un.sysname); /* Check routing */ if (lldpd_routing_enabled(cfg)) { log_debug("localchassis", "routing is enabled, enable router capability"); LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_ROUTER; } else LOCAL_CHASSIS(cfg)->c_cap_enabled &= ~LLDP_CAP_ROUTER; #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_TELEPHONE) LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_TELEPHONE; lldpd_med(LOCAL_CHASSIS(cfg)); free(LOCAL_CHASSIS(cfg)->c_med_sw); if (cfg->g_config.c_advertise_version) LOCAL_CHASSIS(cfg)->c_med_sw = strdup(un.release); else LOCAL_CHASSIS(cfg)->c_med_sw = strdup("Unknown"); #endif if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_STATION) && (LOCAL_CHASSIS(cfg)->c_cap_enabled == 0)) LOCAL_CHASSIS(cfg)->c_cap_enabled = LLDP_CAP_STATION; /* Set chassis ID if needed. This is only done if chassis ID has not been set previously (with the MAC address of an interface for example) */ if (LOCAL_CHASSIS(cfg)->c_id == NULL) { log_debug("localchassis", "no chassis ID is currently set, use chassis name"); if (!(LOCAL_CHASSIS(cfg)->c_id = strdup(LOCAL_CHASSIS(cfg)->c_name))) fatal("localchassis", NULL); LOCAL_CHASSIS(cfg)->c_id_len = strlen(LOCAL_CHASSIS(cfg)->c_name); LOCAL_CHASSIS(cfg)->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; } } void lldpd_update_localports(struct lldpd *cfg) { struct lldpd_hardware *hardware; log_debug("localchassis", "update information for local ports"); /* h_flags is set to 0 for each port. If the port is updated, h_flags * will be set to a non-zero value. This will allow us to clean up any * non up-to-date port */ TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) hardware->h_flags = 0; TRACE(LLDPD_INTERFACES_UPDATE()); interfaces_update(cfg); lldpd_cleanup(cfg); lldpd_reset_timer(cfg); } void lldpd_loop(struct lldpd *cfg) { /* Main loop. 1. Update local ports information 2. Update local chassis information */ log_debug("loop", "start new loop"); LOCAL_CHASSIS(cfg)->c_cap_enabled = 0; /* Information for local ports is triggered even when it is possible to * update them on some other event because we want to refresh them if we * missed something. */ log_debug("loop", "update information for local ports"); lldpd_update_localports(cfg); log_debug("loop", "update information for local chassis"); lldpd_update_localchassis(cfg); lldpd_count_neighbors(cfg); } static void lldpd_exit(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; log_debug("main", "exit lldpd"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) lldpd_send_shutdown(hardware); close(cfg->g_ctl); priv_ctl_cleanup(cfg->g_ctlname); log_debug("main", "cleanup hardware information"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); log_debug("main", "cleanup interface %s", hardware->h_ifname); lldpd_remote_cleanup(hardware, NULL, 1); lldpd_hardware_cleanup(cfg, hardware); } interfaces_cleanup(cfg); free(cfg->g_config.c_platform); } /** * Run lldpcli to configure lldpd. * * @return PID of running lldpcli or -1 if error. */ static pid_t lldpd_configure(int debug, const char *path, const char *ctlname) { pid_t lldpcli = vfork(); int devnull; char sdebug[debug + 3]; memset(sdebug, 'd', debug + 3); sdebug[debug + 2] = '\0'; sdebug[0] = '-'; sdebug[1] = 's'; log_debug("main", "invoke %s %s", path, sdebug); switch (lldpcli) { case -1: log_warn("main", "unable to fork"); return -1; case 0: /* Child, exec lldpcli */ if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDOUT_FILENO); if (devnull > 2) close(devnull); execl(path, "lldpcli", sdebug, "-u", ctlname, "-c", SYSCONFDIR "/lldpd.conf", "-c", SYSCONFDIR "/lldpd.d", "resume", (char *)NULL); log_warn("main", "unable to execute %s", path); log_warnx("main", "configuration is incomplete, lldpd needs to be unpaused"); } _exit(127); break; default: /* Father, don't do anything stupid */ return lldpcli; } /* Should not be here */ return -1; } struct intint { int a; int b; }; static const struct intint filters[] = { { 0, 0 }, { 1, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 2, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO }, { 3, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 4, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER }, { 5, SMART_INCOMING_FILTER }, { 6, SMART_OUTGOING_FILTER }, { 7, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 8, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH }, { 9, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 10, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 11, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH }, { 12, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 13, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 14, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 15, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER }, { 16, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 17, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 18, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 19, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { -1, 0 } }; #ifndef HOST_OS_OSX /** * Tell if we have been started by upstart. */ static int lldpd_started_by_upstart() { #ifdef HOST_OS_LINUX const char *upstartjob = getenv("UPSTART_JOB"); if (!(upstartjob && !strcmp(upstartjob, "lldpd"))) return 0; log_debug("main", "running with upstart, don't fork but stop"); raise(SIGSTOP); unsetenv("UPSTART_JOB"); return 1; #else return 0; #endif } /** * Tell if we have been started by systemd. */ static int lldpd_started_by_systemd() { #ifdef HOST_OS_LINUX int fd = -1; const char *notifysocket = getenv("NOTIFY_SOCKET"); if (!notifysocket || !strchr("@/", notifysocket[0]) || strlen(notifysocket) < 2) return 0; log_debug("main", "running with systemd, don't fork but signal ready"); if ((fd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) { log_warn("main", "unable to open systemd notification socket %s", notifysocket); return 0; } struct sockaddr_un su = { .sun_family = AF_UNIX }; strlcpy(su.sun_path, notifysocket, sizeof(su.sun_path)); if (notifysocket[0] == '@') su.sun_path[0] = 0; struct iovec iov = { .iov_base = "READY=1", .iov_len = strlen("READY=1") }; struct msghdr hdr = { .msg_name = &su, .msg_namelen = offsetof(struct sockaddr_un, sun_path) + strlen(notifysocket), .msg_iov = &iov, .msg_iovlen = 1 }; unsetenv("NOTIFY_SOCKET"); if (sendmsg(fd, &hdr, MSG_NOSIGNAL) < 0) { log_warn("main", "unable to send notification to systemd"); close(fd); return 0; } close(fd); return 1; #else return 0; #endif } #endif #ifdef HOST_OS_LINUX static void version_convert(const char *sversion, unsigned iversion[], size_t n) { const char *p = sversion; char *end; for (size_t i = 0; i < n; i++) { iversion[i] = strtol(p, &end, 10); if (*end != '.') break; p = end + 1; } } static void version_check(void) { struct utsname uts; if (uname(&uts) == -1) return; unsigned version_min[3] = {}; unsigned version_cur[3] = {}; version_convert(uts.release, version_cur, 3); version_convert(MIN_LINUX_KERNEL_VERSION, version_min, 3); if (version_min[0] > version_cur[0] || (version_min[0] == version_cur[0] && version_min[1] > version_cur[1]) || (version_min[1] == version_cur[1] && version_min[2] > version_cur[2])) { log_warnx("lldpd", "minimal kernel version required is %s, got %s", MIN_LINUX_KERNEL_VERSION, uts.release); log_warnx("lldpd", "lldpd may be unable to detect bonds and bridges correctly"); #ifndef ENABLE_OLDIES log_warnx("lldpd", "consider recompiling with --enable-oldies option"); #endif } } #else static void version_check(void) {} #endif int lldpd_main(int argc, char *argv[], char *envp[]) { struct lldpd *cfg; struct lldpd_chassis *lchassis; int ch, debug = 0; #ifdef USE_SNMP int snmp = 0; const char *agentx = NULL; /* AgentX socket */ #endif const char *ctlname = NULL; char *mgmtp = NULL; char *cidp = NULL; char *interfaces = NULL; /* We do not want more options here. Please add them in lldpcli instead * unless there is a very good reason. Most command-line options will * get deprecated at some point. */ char *popt, opts[] = "H:vhkrdD:xX:m:u:4:6:I:C:p:M:P:S:iL:@ "; int i, found, advertise_version = 1; #ifdef ENABLE_LLDPMED int lldpmed = 0, noinventory = 0; int enable_fast_start = 1; #endif char *descr_override = NULL; char *platform_override = NULL; char *lsb_release = NULL; const char *lldpcli = LLDPCLI_PATH; int smart = 15; int receiveonly = 0; int ctl; #ifdef ENABLE_PRIVSEP /* Non privileged user */ struct passwd *user; struct group *group; uid_t uid; gid_t gid; #endif saved_argv = argv; #if HAVE_SETPROCTITLE_INIT setproctitle_init(argc, argv, envp); #endif /* * Get and parse command line options */ if ((popt = strchr(opts, '@')) != NULL) { for (i=0; protos[i].mode != 0 && *popt != '\0'; i++) *(popt++) = protos[i].arg; *popt = '\0'; } while ((ch = getopt(argc, argv, opts)) != -1) { switch (ch) { case 'h': usage(); break; case 'v': fprintf(stdout, "%s\n", PACKAGE_VERSION); exit(0); break; case 'd': debug++; break; case 'D': log_accept(optarg); break; case 'r': receiveonly = 1; break; case 'm': if (mgmtp) { fprintf(stderr, "-m can only be used once\n"); usage(); } mgmtp = strdup(optarg); break; case 'u': if (ctlname) { fprintf(stderr, "-u can only be used once\n"); usage(); } ctlname = optarg; break; case 'I': if (interfaces) { fprintf(stderr, "-I can only be used once\n"); usage(); } interfaces = strdup(optarg); break; case 'C': if (cidp) { fprintf(stderr, "-C can only be used once\n"); usage(); } cidp = strdup(optarg); break; case 'L': if (strlen(optarg)) lldpcli = optarg; else lldpcli = NULL; break; case 'k': advertise_version = 0; break; #ifdef ENABLE_LLDPMED case 'M': lldpmed = atoi(optarg); if ((lldpmed < 1) || (lldpmed > 4)) { fprintf(stderr, "-M requires an argument between 1 and 4\n"); usage(); } break; case 'i': noinventory = 1; break; #else case 'M': case 'i': fprintf(stderr, "LLDP-MED support is not built-in\n"); usage(); break; #endif #ifdef USE_SNMP case 'x': snmp = 1; break; case 'X': if (agentx) { fprintf(stderr, "-X can only be used once\n"); usage(); } snmp = 1; agentx = optarg; break; #else case 'x': case 'X': fprintf(stderr, "SNMP support is not built-in\n"); usage(); #endif break; case 'S': if (descr_override) { fprintf(stderr, "-S can only be used once\n"); usage(); } descr_override = strdup(optarg); break; case 'P': if (platform_override) { fprintf(stderr, "-P can only be used once\n"); usage(); } platform_override = strdup(optarg); break; case 'H': smart = atoi(optarg); break; default: found = 0; for (i=0; protos[i].mode != 0; i++) { if (ch == protos[i].arg) { found = 1; protos[i].enabled++; } } if (!found) usage(); } } if (ctlname == NULL) ctlname = LLDPD_CTL_SOCKET; /* Set correct smart mode */ for (i=0; (filters[i].a != -1) && (filters[i].a != smart); i++); if (filters[i].a == -1) { fprintf(stderr, "Incorrect mode for -H\n"); usage(); } smart = filters[i].b; log_init(debug, __progname); tzset(); /* Get timezone info before chroot */ log_debug("main", "lldpd " PACKAGE_VERSION " starting..."); version_check(); /* Grab uid and gid to use for priv sep */ #ifdef ENABLE_PRIVSEP if ((user = getpwnam(PRIVSEP_USER)) == NULL) fatal("main", "no " PRIVSEP_USER " user for privilege separation"); uid = user->pw_uid; if ((group = getgrnam(PRIVSEP_GROUP)) == NULL) fatal("main", "no " PRIVSEP_GROUP " group for privilege separation"); gid = group->gr_gid; #endif /* Create and setup socket */ int retry = 1; log_debug("main", "creating control socket"); while ((ctl = ctl_create(ctlname)) == -1) { if (retry-- && errno == EADDRINUSE) { /* Check if a daemon is really listening */ int tfd; log_info("main", "unable to create control socket because it already exists"); log_info("main", "check if another instance is running"); if ((tfd = ctl_connect(ctlname)) != -1) { /* Another instance is running */ close(tfd); log_warnx("main", "another instance is running, please stop it"); fatalx("main", "giving up"); } else if (errno == ECONNREFUSED) { /* Nobody is listening */ log_info("main", "old control socket is present, clean it"); ctl_cleanup(ctlname); continue; } log_warn("main", "cannot determine if another daemon is already running"); fatalx("main", "giving up"); } log_warn("main", "unable to create control socket"); fatalx("main", "giving up"); } #ifdef ENABLE_PRIVSEP if (chown(ctlname, uid, gid) == -1) log_warn("main", "unable to chown control socket"); if (chmod(ctlname, S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IWGRP | S_IXGRP) == -1) log_warn("main", "unable to chmod control socket"); #endif /* Disable SIGPIPE */ signal(SIGPIPE, SIG_IGN); /* Disable SIGHUP, until handlers are installed */ signal(SIGHUP, SIG_IGN); /* Configuration with lldpcli */ if (lldpcli) { log_debug("main", "invoking lldpcli for configuration"); if (lldpd_configure(debug, lldpcli, ctlname) == -1) fatal("main", "unable to spawn lldpcli"); } /* Daemonization, unless started by upstart, systemd or launchd or debug */ #ifndef HOST_OS_OSX if (!lldpd_started_by_upstart() && !lldpd_started_by_systemd() && !debug) { int pid; char *spid; log_debug("main", "daemonize"); if (daemon(0, 0) != 0) fatal("main", "failed to detach daemon"); if ((pid = open(LLDPD_PID_FILE, O_TRUNC | O_CREAT | O_WRONLY, 0666)) == -1) fatal("main", "unable to open pid file " LLDPD_PID_FILE); if (asprintf(&spid, "%d\n", getpid()) == -1) fatal("main", "unable to create pid file " LLDPD_PID_FILE); if (write(pid, spid, strlen(spid)) == -1) fatal("main", "unable to write pid file " LLDPD_PID_FILE); free(spid); close(pid); } #endif /* Try to read system information from /etc/os-release if possible. Fall back to lsb_release for compatibility. */ log_debug("main", "get OS/LSB release information"); lsb_release = lldpd_get_os_release(); if (!lsb_release) { lsb_release = lldpd_get_lsb_release(); } log_debug("main", "initialize privilege separation"); #ifdef ENABLE_PRIVSEP priv_init(PRIVSEP_CHROOT, ctl, uid, gid); #else priv_init(PRIVSEP_CHROOT, ctl, 0, 0); #endif /* Initialization of global configuration */ if ((cfg = (struct lldpd *) calloc(1, sizeof(struct lldpd))) == NULL) fatal("main", NULL); lldpd_alloc_default_local_port(cfg); cfg->g_ctlname = ctlname; cfg->g_ctl = ctl; cfg->g_config.c_mgmt_pattern = mgmtp; cfg->g_config.c_cid_pattern = cidp; cfg->g_config.c_iface_pattern = interfaces; cfg->g_config.c_smart = smart; if (lldpcli) cfg->g_config.c_paused = 1; cfg->g_config.c_receiveonly = receiveonly; cfg->g_config.c_tx_interval = LLDPD_TX_INTERVAL; cfg->g_config.c_tx_hold = LLDPD_TX_HOLD; cfg->g_config.c_max_neighbors = LLDPD_MAX_NEIGHBORS; #ifdef ENABLE_LLDPMED cfg->g_config.c_enable_fast_start = enable_fast_start; cfg->g_config.c_tx_fast_init = LLDPD_FAST_INIT; cfg->g_config.c_tx_fast_interval = LLDPD_FAST_TX_INTERVAL; #endif #ifdef USE_SNMP cfg->g_snmp = snmp; cfg->g_snmp_agentx = agentx; #endif /* USE_SNMP */ cfg->g_config.c_bond_slave_src_mac_type = \ LLDP_BOND_SLAVE_SRC_MAC_TYPE_LOCALLY_ADMINISTERED; /* Get ioctl socket */ log_debug("main", "get an ioctl socket"); if ((cfg->g_sock = socket(AF_INET, SOCK_DGRAM, 0)) == -1) fatal("main", "failed to get ioctl socket"); /* Description */ if (!(cfg->g_config.c_advertise_version = advertise_version) && lsb_release && lsb_release[strlen(lsb_release) - 1] == '\n') lsb_release[strlen(lsb_release) - 1] = '\0'; cfg->g_lsb_release = lsb_release; if (descr_override) cfg->g_config.c_description = descr_override; if (platform_override) cfg->g_config.c_platform = platform_override; /* Set system capabilities */ log_debug("main", "set system capabilities"); if ((lchassis = (struct lldpd_chassis*) calloc(1, sizeof(struct lldpd_chassis))) == NULL) fatal("localchassis", NULL); cfg->g_config.c_cap_advertise = 1; lchassis->c_cap_available = LLDP_CAP_BRIDGE | LLDP_CAP_WLAN | LLDP_CAP_ROUTER | LLDP_CAP_STATION; cfg->g_config.c_mgmt_advertise = 1; TAILQ_INIT(&lchassis->c_mgmt); #ifdef ENABLE_LLDPMED if (lldpmed > 0) { if (lldpmed == LLDP_MED_CLASS_III) lchassis->c_cap_available |= LLDP_CAP_TELEPHONE; lchassis->c_med_type = lldpmed; lchassis->c_med_cap_available = LLDP_MED_CAP_CAP | LLDP_MED_CAP_IV | LLDP_MED_CAP_LOCATION | LLDP_MED_CAP_POLICY | LLDP_MED_CAP_MDI_PSE | LLDP_MED_CAP_MDI_PD; cfg->g_config.c_noinventory = noinventory; } else cfg->g_config.c_noinventory = 1; #endif /* Set TTL */ lchassis->c_ttl = cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold; log_debug("main", "initialize protocols"); cfg->g_protocols = protos; for (i=0; protos[i].mode != 0; i++) { /* With -ll, disable LLDP */ if (protos[i].mode == LLDPD_MODE_LLDP) protos[i].enabled %= 3; /* With -ccc force CDPV2, enable CDPV1 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled == 3) { protos[i].enabled = 1; } /* With -cc force CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 2) { protos[i].enabled = 1; } /* With -cccc disable CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled >= 4) { protos[i].enabled = 0; } /* With -cccc disable CDPV1, enable CDPV2; -ccccc will force CDPv2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 4) { protos[i].enabled = 1; } if (protos[i].enabled > 1) log_info("main", "protocol %s enabled and forced", protos[i].name); else if (protos[i].enabled) log_info("main", "protocol %s enabled", protos[i].name); else log_info("main", "protocol %s disabled", protos[i].name); } TAILQ_INIT(&cfg->g_hardware); TAILQ_INIT(&cfg->g_chassis); TAILQ_INSERT_TAIL(&cfg->g_chassis, lchassis, c_entries); lchassis->c_refcount++; /* We should always keep a reference to local chassis */ /* Main loop */ log_debug("main", "start main loop"); levent_loop(cfg); lldpd_exit(cfg); free(cfg); return (0); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1771_0
crossvul-cpp_data_good_3362_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/module.h" #include "magick/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *,DDSVector4 *,unsigned char *,size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse, matte; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { matte = MagickTrue; decoder = ReadUncompressedRGBA; } else { matte = MagickTrue; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { matte = MagickFalse; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { matte = MagickFalse; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { matte = MagickTrue; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { matte = MagickTrue; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if (num_images < 1) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); for (n = 0; n < num_images; n++) { if (n != 0) { if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); /* Start a new image */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->matte = matte; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; PixelPacket *q; register ssize_t i, x; size_t bits; ssize_t j, y; unsigned char code; unsigned short c0, c1; for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if (((x + i) < (ssize_t) image->columns) && ((y + j) < (ssize_t) image->rows)) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code])); if ((colors.a[code] != 0) && (image->matte == MagickFalse)) image->matte=MagickTrue; /* Correct matte */ q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; PixelPacket *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; MagickSizeType alpha_bits; PixelPacket *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image))); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(q,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } SetPixelAlpha(q,QuantumRange); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleMatteType); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else if (alphaBits == 2) { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (color >> 8))); SetPixelGray(q,ScaleCharToQuantum((unsigned char)color)); } else { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255))); } } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = SetMagickInfo("DDS"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT1"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT5"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { register ssize_t i; MagickOffsetType offset; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if (clusterFit == MagickFalse || count == 0) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (!image->matte) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, &image->exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,&image->exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MaxTextExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->matte) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->matte != MagickFalse) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) ResetMagickMemory(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->matte != MagickFalse) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t i, y, bx, by; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const PixelPacket *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p++; match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char* indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4* points, const ssize_t* map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p))); if (image->matte) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p))); p++; } } }
./CrossVul/dataset_final_sorted/CWE-617/c/good_3362_0
crossvul-cpp_data_bad_2602_0
/* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * Derived from arch/arm/kvm/coproc.c: * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Authors: Rusty Russell <rusty@rustcorp.com.au> * Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/bsearch.h> #include <linux/kvm_host.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/debug-monitors.h> #include <asm/esr.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_coproc.h> #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> #include <asm/kvm_mmu.h> #include <asm/perf_event.h> #include <asm/sysreg.h> #include <trace/events/kvm.h> #include "sys_regs.h" #include "trace.h" /* * All of this file is extremly similar to the ARM coproc.c, but the * types are different. My gut feeling is that it should be pretty * easy to merge, but that would be an ABI breakage -- again. VFP * would also need to be abstracted. * * For AArch32, we only take care of what is being trapped. Anything * that has to do with init and userspace access has to go via the * 64bit interface. */ /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ static u32 cache_levels; /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ #define CSSELR_MAX 12 /* Which cache CCSIDR represents depends on CSSELR value. */ static u32 get_ccsidr(u32 csselr) { u32 ccsidr; /* Make sure noone else changes CSSELR during this! */ local_irq_disable(); write_sysreg(csselr, csselr_el1); isb(); ccsidr = read_sysreg(ccsidr_el1); local_irq_enable(); return ccsidr; } /* * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). */ static bool access_dcsw(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!p->is_write) return read_from_write_only(vcpu, p); kvm_set_way_flush(vcpu); return true; } /* * Generic accessor for VM registers. Only called as long as HCR_TVM * is set. If the guest enables the MMU, we stop trapping the VM * sys_regs and leave it in complete control of the caches. */ static bool access_vm_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { bool was_enabled = vcpu_has_cache_enabled(vcpu); BUG_ON(!p->is_write); if (!p->is_aarch32) { vcpu_sys_reg(vcpu, r->reg) = p->regval; } else { if (!p->is_32bit) vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); } kvm_toggle_cache(vcpu, was_enabled); return true; } /* * Trap handler for the GICv3 SGI generation system register. * Forward the request to the VGIC emulation. * The cp15_64 code makes sure this automatically works * for both AArch64 and AArch32 accesses. */ static bool access_gic_sgi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!p->is_write) return read_from_write_only(vcpu, p); vgic_v3_dispatch_sgi(vcpu, p->regval); return true; } static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) return ignore_write(vcpu, p); p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; return true; } static bool trap_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) return ignore_write(vcpu, p); else return read_zero(vcpu, p); } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { p->regval = (1 << 3); return true; } } static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { p->regval = read_sysreg(dbgauthstatus_el1); return true; } } /* * We want to avoid world-switching all the DBG registers all the * time: * * - If we've touched any debug register, it is likely that we're * going to touch more of them. It then makes sense to disable the * traps and start doing the save/restore dance * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is * then mandatory to save/restore the registers, as the guest * depends on them. * * For this, we use a DIRTY bit, indicating the guest has modified the * debug registers, used as follow: * * On guest entry: * - If the dirty bit is set (because we're coming back from trapping), * disable the traps, save host registers, restore guest registers. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), * set the dirty bit, disable the traps, save host registers, * restore guest registers. * - Otherwise, enable the traps * * On guest exit: * - If the dirty bit is set, save guest registers, restore host * registers and clear the dirty bit. This ensure that the host can * now use the debug registers. */ static bool trap_debug_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { vcpu_sys_reg(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = vcpu_sys_reg(vcpu, r->reg); } trace_trap_reg(__func__, r->reg, p->is_write, p->regval); return true; } /* * reg_to_dbg/dbg_to_reg * * A 32 bit write to a debug register leave top bits alone * A 32 bit read from a debug register only returns the bottom bits * * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the * hyp.S code switches between host and guest values in future. */ static void reg_to_dbg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, u64 *dbg_reg) { u64 val = p->regval; if (p->is_32bit) { val &= 0xffffffffUL; val |= ((*dbg_reg >> 32) << 32); } *dbg_reg = val; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } static void dbg_to_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, u64 *dbg_reg) { p->regval = *dbg_reg; if (p->is_32bit) p->regval &= 0xffffffffUL; } static bool trap_bvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; } static bool trap_bcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; } static bool trap_wvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); return true; } static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; } static bool trap_wcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; } static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1); } static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mpidr; /* * Map the vcpu_id into the first three affinity level fields of * the MPIDR. We limit the number of VCPUs in level 0 due to a * limitation to 16 CPUs in that level in the ICC_SGIxR registers * of the GICv3 to be able to address each CPU directly when * sending IPIs. */ mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; } static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 pmcr, val; pmcr = read_sysreg(pmcr_el0); /* * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN * except PMCR.E resetting to zero. */ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); vcpu_sys_reg(vcpu, PMCR_EL0) = val; } static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu)); } static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN)) || vcpu_mode_priv(vcpu)); } static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN)) || vcpu_mode_priv(vcpu)); } static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN)) || vcpu_mode_priv(vcpu)); } static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 val; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; if (p->is_write) { /* Only update writeable bits of PMCR */ val = vcpu_sys_reg(vcpu, PMCR_EL0); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ val = vcpu_sys_reg(vcpu, PMCR_EL0) & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); p->regval = val; } return true; } static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_event_counter_el0_disabled(vcpu)) return false; if (p->is_write) vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; else /* return PMSELR.SEL field */ p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; return true; } static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 pmceid; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); BUG_ON(p->is_write); if (pmu_access_el0_disabled(vcpu)) return false; if (!(p->Op2 & 1)) pmceid = read_sysreg(pmceid0_el0); else pmceid = read_sysreg(pmceid1_el0); p->regval = pmceid; return true; } static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) { u64 pmcr, val; pmcr = vcpu_sys_reg(vcpu, PMCR_EL0); val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) return false; return true; } static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 idx; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (r->CRn == 9 && r->CRm == 13) { if (r->Op2 == 2) { /* PMXEVCNTR_EL0 */ if (pmu_access_event_counter_el0_disabled(vcpu)) return false; idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; } else if (r->Op2 == 0) { /* PMCCNTR_EL0 */ if (pmu_access_cycle_counter_el0_disabled(vcpu)) return false; idx = ARMV8_PMU_CYCLE_IDX; } else { BUG(); } } else if (r->CRn == 14 && (r->CRm & 12) == 8) { /* PMEVCNTRn_EL0 */ if (pmu_access_event_counter_el0_disabled(vcpu)) return false; idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); } else { BUG(); } if (!pmu_counter_idx_valid(vcpu, idx)) return false; if (p->is_write) { if (pmu_access_el0_disabled(vcpu)) return false; kvm_pmu_set_counter_value(vcpu, idx, p->regval); } else { p->regval = kvm_pmu_get_counter_value(vcpu, idx); } return true; } static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 idx, reg; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { /* PMXEVTYPER_EL0 */ idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; reg = PMEVTYPER0_EL0 + idx; } else if (r->CRn == 14 && (r->CRm & 12) == 12) { idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); if (idx == ARMV8_PMU_CYCLE_IDX) reg = PMCCFILTR_EL0; else /* PMEVTYPERn_EL0 */ reg = PMEVTYPER0_EL0 + idx; } else { BUG(); } if (!pmu_counter_idx_valid(vcpu, idx)) return false; if (p->is_write) { kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; } else { p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; } return true; } static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 val, mask; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; mask = kvm_pmu_valid_counter_mask(vcpu); if (p->is_write) { val = p->regval & mask; if (r->Op2 & 0x1) { /* accessing PMCNTENSET_EL0 */ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; kvm_pmu_enable_counter(vcpu, val); } else { /* accessing PMCNTENCLR_EL0 */ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; kvm_pmu_disable_counter(vcpu, val); } } else { p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; } return true; } static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 mask = kvm_pmu_valid_counter_mask(vcpu); if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (!vcpu_mode_priv(vcpu)) return false; if (p->is_write) { u64 val = p->regval & mask; if (r->Op2 & 0x1) /* accessing PMINTENSET_EL1 */ vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; else /* accessing PMINTENCLR_EL1 */ vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; } else { p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; } return true; } static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 mask = kvm_pmu_valid_counter_mask(vcpu); if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; if (p->is_write) { if (r->CRm & 0x2) /* accessing PMOVSSET_EL0 */ kvm_pmu_overflow_set(vcpu, p->regval & mask); else /* accessing PMOVSCLR_EL0 */ vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); } else { p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; } return true; } static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 mask; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_write_swinc_el0_disabled(vcpu)) return false; if (p->is_write) { mask = kvm_pmu_valid_counter_mask(vcpu); kvm_pmu_software_increment(vcpu, p->regval & mask); return true; } return false; } static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (p->is_write) { if (!vcpu_mode_priv(vcpu)) return false; vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval & ARMV8_PMU_USERENR_MASK; } else { p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0) & ARMV8_PMU_USERENR_MASK; } return true; } /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ /* DBGBCRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ /* DBGWVRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ /* DBGWCRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } /* Macro to expand the PMEVCNTRn_EL0 register */ #define PMU_PMEVCNTR_EL0(n) \ /* PMEVCNTRn_EL0 */ \ { Op0(0b11), Op1(0b011), CRn(0b1110), \ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } /* Macro to expand the PMEVTYPERn_EL0 register */ #define PMU_PMEVTYPER_EL0(n) \ /* PMEVTYPERn_EL0 */ \ { Op0(0b11), Op1(0b011), CRn(0b1110), \ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 * * Debug handling: We do trap most, if not all debug related system * registers. The implementation is good enough to ensure that a guest * can use these with minimal performance degradation. The drawback is * that we don't implement any of the external debug, none of the * OSlock protocol. This should be revisited if we ever encounter a * more demanding guest... */ static const struct sys_reg_desc sys_reg_descs[] = { /* DC ISW */ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), access_dcsw }, /* DC CSW */ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), access_dcsw }, /* DC CISW */ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), access_dcsw }, DBG_BCR_BVR_WCR_WVR_EL1(0), DBG_BCR_BVR_WCR_WVR_EL1(1), /* MDCCINT_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, /* MDSCR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, DBG_BCR_BVR_WCR_WVR_EL1(2), DBG_BCR_BVR_WCR_WVR_EL1(3), DBG_BCR_BVR_WCR_WVR_EL1(4), DBG_BCR_BVR_WCR_WVR_EL1(5), DBG_BCR_BVR_WCR_WVR_EL1(6), DBG_BCR_BVR_WCR_WVR_EL1(7), DBG_BCR_BVR_WCR_WVR_EL1(8), DBG_BCR_BVR_WCR_WVR_EL1(9), DBG_BCR_BVR_WCR_WVR_EL1(10), DBG_BCR_BVR_WCR_WVR_EL1(11), DBG_BCR_BVR_WCR_WVR_EL1(12), DBG_BCR_BVR_WCR_WVR_EL1(13), DBG_BCR_BVR_WCR_WVR_EL1(14), DBG_BCR_BVR_WCR_WVR_EL1(15), /* MDRAR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), trap_raz_wi }, /* OSLAR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), trap_raz_wi }, /* OSLSR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), trap_oslsr_el1 }, /* OSDLR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), trap_raz_wi }, /* DBGPRCR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), trap_raz_wi }, /* DBGCLAIMSET_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), trap_raz_wi }, /* DBGCLAIMCLR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), trap_raz_wi }, /* DBGAUTHSTATUS_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), trap_dbgauthstatus_el1 }, /* MDCCSR_EL1 */ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), trap_raz_wi }, /* DBGDTR_EL0 */ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), trap_raz_wi }, /* DBGDTR[TR]X_EL0 */ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), trap_raz_wi }, /* DBGVCR32_EL2 */ { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), NULL, reset_val, DBGVCR32_EL2, 0 }, /* MPIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), NULL, reset_mpidr, MPIDR_EL1 }, /* SCTLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, /* CPACR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), NULL, reset_val, CPACR_EL1, 0 }, /* TTBR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), access_vm_reg, reset_unknown, TTBR0_EL1 }, /* TTBR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), access_vm_reg, reset_unknown, TTBR1_EL1 }, /* TCR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), access_vm_reg, reset_val, TCR_EL1, 0 }, /* AFSR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), access_vm_reg, reset_unknown, AFSR0_EL1 }, /* AFSR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), access_vm_reg, reset_unknown, AFSR1_EL1 }, /* ESR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), access_vm_reg, reset_unknown, ESR_EL1 }, /* FAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), access_vm_reg, reset_unknown, FAR_EL1 }, /* PAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), NULL, reset_unknown, PAR_EL1 }, /* PMINTENSET_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), access_pminten, reset_unknown, PMINTENSET_EL1 }, /* PMINTENCLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), access_pminten, NULL, PMINTENSET_EL1 }, /* MAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), access_vm_reg, reset_unknown, MAIR_EL1 }, /* AMAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, /* VBAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), NULL, reset_val, VBAR_EL1, 0 }, /* ICC_SGI1R_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101), access_gic_sgi }, /* ICC_SRE_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), access_gic_sre }, /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, /* TPIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), NULL, reset_unknown, TPIDR_EL1 }, /* CNTKCTL_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), NULL, reset_val, CNTKCTL_EL1, 0}, /* CSSELR_EL1 */ { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), NULL, reset_unknown, CSSELR_EL1 }, /* PMCR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), access_pmcr, reset_pmcr, }, /* PMCNTENSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, /* PMCNTENCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), access_pmcnten, NULL, PMCNTENSET_EL0 }, /* PMOVSCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), access_pmovs, NULL, PMOVSSET_EL0 }, /* PMSWINC_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), access_pmswinc, reset_unknown, PMSWINC_EL0 }, /* PMSELR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), access_pmselr, reset_unknown, PMSELR_EL0 }, /* PMCEID0_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), access_pmceid }, /* PMCEID1_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), access_pmu_evtyper }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), access_pmu_evcntr }, /* PMUSERENR_EL0 * This register resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, /* PMOVSSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), access_pmovs, reset_unknown, PMOVSSET_EL0 }, /* TPIDR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), NULL, reset_unknown, TPIDR_EL0 }, /* TPIDRRO_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), NULL, reset_unknown, TPIDRRO_EL0 }, /* PMEVCNTRn_EL0 */ PMU_PMEVCNTR_EL0(0), PMU_PMEVCNTR_EL0(1), PMU_PMEVCNTR_EL0(2), PMU_PMEVCNTR_EL0(3), PMU_PMEVCNTR_EL0(4), PMU_PMEVCNTR_EL0(5), PMU_PMEVCNTR_EL0(6), PMU_PMEVCNTR_EL0(7), PMU_PMEVCNTR_EL0(8), PMU_PMEVCNTR_EL0(9), PMU_PMEVCNTR_EL0(10), PMU_PMEVCNTR_EL0(11), PMU_PMEVCNTR_EL0(12), PMU_PMEVCNTR_EL0(13), PMU_PMEVCNTR_EL0(14), PMU_PMEVCNTR_EL0(15), PMU_PMEVCNTR_EL0(16), PMU_PMEVCNTR_EL0(17), PMU_PMEVCNTR_EL0(18), PMU_PMEVCNTR_EL0(19), PMU_PMEVCNTR_EL0(20), PMU_PMEVCNTR_EL0(21), PMU_PMEVCNTR_EL0(22), PMU_PMEVCNTR_EL0(23), PMU_PMEVCNTR_EL0(24), PMU_PMEVCNTR_EL0(25), PMU_PMEVCNTR_EL0(26), PMU_PMEVCNTR_EL0(27), PMU_PMEVCNTR_EL0(28), PMU_PMEVCNTR_EL0(29), PMU_PMEVCNTR_EL0(30), /* PMEVTYPERn_EL0 */ PMU_PMEVTYPER_EL0(0), PMU_PMEVTYPER_EL0(1), PMU_PMEVTYPER_EL0(2), PMU_PMEVTYPER_EL0(3), PMU_PMEVTYPER_EL0(4), PMU_PMEVTYPER_EL0(5), PMU_PMEVTYPER_EL0(6), PMU_PMEVTYPER_EL0(7), PMU_PMEVTYPER_EL0(8), PMU_PMEVTYPER_EL0(9), PMU_PMEVTYPER_EL0(10), PMU_PMEVTYPER_EL0(11), PMU_PMEVTYPER_EL0(12), PMU_PMEVTYPER_EL0(13), PMU_PMEVTYPER_EL0(14), PMU_PMEVTYPER_EL0(15), PMU_PMEVTYPER_EL0(16), PMU_PMEVTYPER_EL0(17), PMU_PMEVTYPER_EL0(18), PMU_PMEVTYPER_EL0(19), PMU_PMEVTYPER_EL0(20), PMU_PMEVTYPER_EL0(21), PMU_PMEVTYPER_EL0(22), PMU_PMEVTYPER_EL0(23), PMU_PMEVTYPER_EL0(24), PMU_PMEVTYPER_EL0(25), PMU_PMEVTYPER_EL0(26), PMU_PMEVTYPER_EL0(27), PMU_PMEVTYPER_EL0(28), PMU_PMEVTYPER_EL0(29), PMU_PMEVTYPER_EL0(30), /* PMCCFILTR_EL0 * This register resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, /* DACR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), NULL, reset_unknown, DACR32_EL2 }, /* IFSR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), NULL, reset_unknown, IFSR32_EL2 }, /* FPEXC32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), NULL, reset_val, FPEXC32_EL2, 0x70 }, }; static bool trap_dbgidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1); u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | (6 << 16) | (el3 << 14) | (el3 << 12)); return true; } } static bool trap_debug32(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { vcpu_cp14(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = vcpu_cp14(vcpu, r->reg); } return true; } /* AArch32 debug register mappings * * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] * * All control registers and watchpoint value registers are mapped to * the lower 32 bits of their AArch64 equivalents. We share the trap * handlers with the above AArch64 code which checks what mode the * system is in. */ static bool trap_xvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (p->is_write) { u64 val = *dbg_reg; val &= 0xffffffffUL; val |= p->regval << 32; *dbg_reg = val; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = *dbg_reg >> 32; } trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } #define DBG_BCR_BVR_WCR_WVR(n) \ /* DBGBVRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ /* DBGBCRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ /* DBGWVRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ /* DBGWCRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } #define DBGBXVR(n) \ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } /* * Trapped cp14 registers. We generally ignore most of the external * debug, on the principle that they don't really make sense to a * guest. Revisit this one day, would this principle change. */ static const struct sys_reg_desc cp14_regs[] = { /* DBGIDR */ { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, /* DBGDTRRXext */ { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(0), /* DBGDSCRint */ { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(1), /* DBGDCCINT */ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, /* DBGDSCRext */ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, DBG_BCR_BVR_WCR_WVR(2), /* DBGDTR[RT]Xint */ { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, /* DBGDTR[RT]Xext */ { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(3), DBG_BCR_BVR_WCR_WVR(4), DBG_BCR_BVR_WCR_WVR(5), /* DBGWFAR */ { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, /* DBGOSECCR */ { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(6), /* DBGVCR */ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, DBG_BCR_BVR_WCR_WVR(7), DBG_BCR_BVR_WCR_WVR(8), DBG_BCR_BVR_WCR_WVR(9), DBG_BCR_BVR_WCR_WVR(10), DBG_BCR_BVR_WCR_WVR(11), DBG_BCR_BVR_WCR_WVR(12), DBG_BCR_BVR_WCR_WVR(13), DBG_BCR_BVR_WCR_WVR(14), DBG_BCR_BVR_WCR_WVR(15), /* DBGDRAR (32bit) */ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, DBGBXVR(0), /* DBGOSLAR */ { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, DBGBXVR(1), /* DBGOSLSR */ { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, DBGBXVR(2), DBGBXVR(3), /* DBGOSDLR */ { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, DBGBXVR(4), /* DBGPRCR */ { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, DBGBXVR(5), DBGBXVR(6), DBGBXVR(7), DBGBXVR(8), DBGBXVR(9), DBGBXVR(10), DBGBXVR(11), DBGBXVR(12), DBGBXVR(13), DBGBXVR(14), DBGBXVR(15), /* DBGDSAR (32bit) */ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, /* DBGDEVID2 */ { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, /* DBGDEVID1 */ { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, /* DBGDEVID */ { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, /* DBGCLAIMSET */ { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, /* DBGCLAIMCLR */ { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, /* DBGAUTHSTATUS */ { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, }; /* Trapped cp14 64bit registers */ static const struct sys_reg_desc cp14_64_regs[] = { /* DBGDRAR (64bit) */ { Op1( 0), CRm( 1), .access = trap_raz_wi }, /* DBGDSAR (64bit) */ { Op1( 0), CRm( 2), .access = trap_raz_wi }, }; /* Macro to expand the PMEVCNTRn register */ #define PMU_PMEVCNTR(n) \ /* PMEVCNTRn */ \ { Op1(0), CRn(0b1110), \ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr } /* Macro to expand the PMEVTYPERn register */ #define PMU_PMEVTYPER(n) \ /* PMEVTYPERn */ \ { Op1(0), CRn(0b1110), \ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evtyper } /* * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, * depending on the way they are accessed (as a 32bit or a 64bit * register). */ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, /* * DC{C,I,CI}SW operations: */ { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, /* PMU */ { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, /* ICC_SRE */ { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, /* PMEVCNTRn */ PMU_PMEVCNTR(0), PMU_PMEVCNTR(1), PMU_PMEVCNTR(2), PMU_PMEVCNTR(3), PMU_PMEVCNTR(4), PMU_PMEVCNTR(5), PMU_PMEVCNTR(6), PMU_PMEVCNTR(7), PMU_PMEVCNTR(8), PMU_PMEVCNTR(9), PMU_PMEVCNTR(10), PMU_PMEVCNTR(11), PMU_PMEVCNTR(12), PMU_PMEVCNTR(13), PMU_PMEVCNTR(14), PMU_PMEVCNTR(15), PMU_PMEVCNTR(16), PMU_PMEVCNTR(17), PMU_PMEVCNTR(18), PMU_PMEVCNTR(19), PMU_PMEVCNTR(20), PMU_PMEVCNTR(21), PMU_PMEVCNTR(22), PMU_PMEVCNTR(23), PMU_PMEVCNTR(24), PMU_PMEVCNTR(25), PMU_PMEVCNTR(26), PMU_PMEVCNTR(27), PMU_PMEVCNTR(28), PMU_PMEVCNTR(29), PMU_PMEVCNTR(30), /* PMEVTYPERn */ PMU_PMEVTYPER(0), PMU_PMEVTYPER(1), PMU_PMEVTYPER(2), PMU_PMEVTYPER(3), PMU_PMEVTYPER(4), PMU_PMEVTYPER(5), PMU_PMEVTYPER(6), PMU_PMEVTYPER(7), PMU_PMEVTYPER(8), PMU_PMEVTYPER(9), PMU_PMEVTYPER(10), PMU_PMEVTYPER(11), PMU_PMEVTYPER(12), PMU_PMEVTYPER(13), PMU_PMEVTYPER(14), PMU_PMEVTYPER(15), PMU_PMEVTYPER(16), PMU_PMEVTYPER(17), PMU_PMEVTYPER(18), PMU_PMEVTYPER(19), PMU_PMEVTYPER(20), PMU_PMEVTYPER(21), PMU_PMEVTYPER(22), PMU_PMEVTYPER(23), PMU_PMEVTYPER(24), PMU_PMEVTYPER(25), PMU_PMEVTYPER(26), PMU_PMEVTYPER(27), PMU_PMEVTYPER(28), PMU_PMEVTYPER(29), PMU_PMEVTYPER(30), /* PMCCFILTR */ { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, }; static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; /* Target specific emulation tables */ static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; void kvm_register_target_sys_reg_table(unsigned int target, struct kvm_sys_reg_target_table *table) { target_tables[target] = table; } /* Get specific register table for this target. */ static const struct sys_reg_desc *get_target_table(unsigned target, bool mode_is_64, size_t *num) { struct kvm_sys_reg_target_table *table; table = target_tables[target]; if (mode_is_64) { *num = table->table64.num; return table->table64.table; } else { *num = table->table32.num; return table->table32.table; } } #define reg_to_match_value(x) \ ({ \ unsigned long val; \ val = (x)->Op0 << 14; \ val |= (x)->Op1 << 11; \ val |= (x)->CRn << 7; \ val |= (x)->CRm << 3; \ val |= (x)->Op2; \ val; \ }) static int match_sys_reg(const void *key, const void *elt) { const unsigned long pval = (unsigned long)key; const struct sys_reg_desc *r = elt; return pval - reg_to_match_value(r); } static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], unsigned int num) { unsigned long pval = reg_to_match_value(params); return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); } int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) { kvm_inject_undefined(vcpu); return 1; } /* * emulate_cp -- tries to match a sys_reg access in a handling table, and * call the corresponding trap handler. * * @params: pointer to the descriptor of the access * @table: array of trap descriptors * @num: size of the trap descriptor array * * Return 0 if the access has been handled, and -1 if not. */ static int emulate_cp(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *table, size_t num) { const struct sys_reg_desc *r; if (!table) return -1; /* Not handled */ r = find_reg(params, table, num); if (r) { /* * Not having an accessor means that we have * configured a trap that we don't know how to * handle. This certainly qualifies as a gross bug * that should be fixed right away. */ BUG_ON(!r->access); if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); /* Handled */ return 0; } } /* Not handled */ return -1; } static void unhandled_cp_access(struct kvm_vcpu *vcpu, struct sys_reg_params *params) { u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); int cp = -1; switch(hsr_ec) { case ESR_ELx_EC_CP15_32: case ESR_ELx_EC_CP15_64: cp = 15; break; case ESR_ELx_EC_CP14_MR: case ESR_ELx_EC_CP14_64: cp = 14; break; default: WARN_ON(1); } kvm_err("Unsupported guest CP%d access at: %08lx\n", cp, *vcpu_pc(vcpu)); print_sys_reg_instr(params); kvm_inject_undefined(vcpu); } /** * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, const struct sys_reg_desc *global, size_t nr_global, const struct sys_reg_desc *target_specific, size_t nr_specific) { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); int Rt = (hsr >> 5) & 0xf; int Rt2 = (hsr >> 10) & 0xf; params.is_aarch32 = true; params.is_32bit = false; params.CRm = (hsr >> 1) & 0xf; params.is_write = ((hsr & 1) == 0); params.Op0 = 0; params.Op1 = (hsr >> 16) & 0xf; params.Op2 = 0; params.CRn = 0; /* * Make a 64-bit value out of Rt and Rt2. As we use the same trap * backends between AArch32 and AArch64, we get away with it. */ if (params.is_write) { params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; } if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) goto out; if (!emulate_cp(vcpu, &params, global, nr_global)) goto out; unhandled_cp_access(vcpu, &params); out: /* Split up the value between registers for the read side */ if (!params.is_write) { vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); } return 1; } /** * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, const struct sys_reg_desc *global, size_t nr_global, const struct sys_reg_desc *target_specific, size_t nr_specific) { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); int Rt = (hsr >> 5) & 0xf; params.is_aarch32 = true; params.is_32bit = true; params.CRm = (hsr >> 1) & 0xf; params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = ((hsr & 1) == 0); params.CRn = (hsr >> 10) & 0xf; params.Op0 = 0; params.Op1 = (hsr >> 14) & 0x7; params.Op2 = (hsr >> 17) & 0x7; if (!emulate_cp(vcpu, &params, target_specific, nr_specific) || !emulate_cp(vcpu, &params, global, nr_global)) { if (!params.is_write) vcpu_set_reg(vcpu, Rt, params.regval); return 1; } unhandled_cp_access(vcpu, &params); return 1; } int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { const struct sys_reg_desc *target_specific; size_t num; target_specific = get_target_table(vcpu->arch.target, false, &num); return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs), target_specific, num); } int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) { const struct sys_reg_desc *target_specific; size_t num; target_specific = get_target_table(vcpu->arch.target, false, &num); return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), target_specific, num); } int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs), NULL, 0); } int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) { return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs), NULL, 0); } static int emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params) { size_t num; const struct sys_reg_desc *table, *r; table = get_target_table(vcpu->arch.target, true, &num); /* Search target-specific then generic table. */ r = find_reg(params, table, num); if (!r) r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); if (likely(r)) { /* * Not having an accessor means that we have * configured a trap that we don't know how to * handle. This certainly qualifies as a gross bug * that should be fixed right away. */ BUG_ON(!r->access); if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } /* If access function fails, it should complain. */ } else { kvm_err("Unsupported guest sys_reg access at: %lx\n", *vcpu_pc(vcpu)); print_sys_reg_instr(params); } kvm_inject_undefined(vcpu); return 1; } static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *table, size_t num) { unsigned long i; for (i = 0; i < num; i++) if (table[i].reset) table[i].reset(vcpu, &table[i]); } /** * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_hsr(vcpu); int Rt = (esr >> 5) & 0x1f; int ret; trace_kvm_handle_sys_reg(esr); params.is_aarch32 = false; params.is_32bit = false; params.Op0 = (esr >> 20) & 3; params.Op1 = (esr >> 14) & 0x7; params.CRn = (esr >> 10) & 0xf; params.CRm = (esr >> 1) & 0xf; params.Op2 = (esr >> 17) & 0x7; params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = !(esr & 1); ret = emulate_sys_reg(vcpu, &params); if (!params.is_write) vcpu_set_reg(vcpu, Rt, params.regval); return ret; } /****************************************************************************** * Userspace API *****************************************************************************/ static bool index_to_params(u64 id, struct sys_reg_params *params) { switch (id & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U64: /* Any unused index bits means it's not valid. */ if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK | KVM_REG_ARM64_SYSREG_OP0_MASK | KVM_REG_ARM64_SYSREG_OP1_MASK | KVM_REG_ARM64_SYSREG_CRN_MASK | KVM_REG_ARM64_SYSREG_CRM_MASK | KVM_REG_ARM64_SYSREG_OP2_MASK)) return false; params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); return true; default: return false; } } /* Decode an index value, and find the sys_reg_desc entry. */ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id) { size_t num; const struct sys_reg_desc *table, *r; struct sys_reg_params params; /* We only do sys_reg for now. */ if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) return NULL; if (!index_to_params(id, &params)) return NULL; table = get_target_table(vcpu->arch.target, true, &num); r = find_reg(&params, table, num); if (!r) r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); /* Not saved in the sys_reg array? */ if (r && !r->reg) r = NULL; return r; } /* * These are the invariant sys_reg registers: we let the guest see the * host versions of these, so they're part of the guest state. * * A future CPU may provide a mechanism to present different values to * the guest, or a future kvm may trap them. */ #define FUNCTION_INVARIANT(reg) \ static void get_##reg(struct kvm_vcpu *v, \ const struct sys_reg_desc *r) \ { \ ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ } FUNCTION_INVARIANT(midr_el1) FUNCTION_INVARIANT(ctr_el0) FUNCTION_INVARIANT(revidr_el1) FUNCTION_INVARIANT(id_pfr0_el1) FUNCTION_INVARIANT(id_pfr1_el1) FUNCTION_INVARIANT(id_dfr0_el1) FUNCTION_INVARIANT(id_afr0_el1) FUNCTION_INVARIANT(id_mmfr0_el1) FUNCTION_INVARIANT(id_mmfr1_el1) FUNCTION_INVARIANT(id_mmfr2_el1) FUNCTION_INVARIANT(id_mmfr3_el1) FUNCTION_INVARIANT(id_isar0_el1) FUNCTION_INVARIANT(id_isar1_el1) FUNCTION_INVARIANT(id_isar2_el1) FUNCTION_INVARIANT(id_isar3_el1) FUNCTION_INVARIANT(id_isar4_el1) FUNCTION_INVARIANT(id_isar5_el1) FUNCTION_INVARIANT(clidr_el1) FUNCTION_INVARIANT(aidr_el1) /* ->val is filled in by kvm_sys_reg_table_init() */ static struct sys_reg_desc invariant_sys_regs[] = { { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), NULL, get_midr_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), NULL, get_revidr_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), NULL, get_id_pfr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), NULL, get_id_pfr1_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), NULL, get_id_dfr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), NULL, get_id_afr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), NULL, get_id_mmfr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), NULL, get_id_mmfr1_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), NULL, get_id_mmfr2_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), NULL, get_id_mmfr3_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), NULL, get_id_isar0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), NULL, get_id_isar1_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), NULL, get_id_isar2_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), NULL, get_id_isar3_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), NULL, get_id_isar4_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), NULL, get_id_isar5_el1 }, { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), NULL, get_clidr_el1 }, { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), NULL, get_aidr_el1 }, { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), NULL, get_ctr_el0 }, }; static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) { if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) return -EFAULT; return 0; } static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) { if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) return -EFAULT; return 0; } static int get_invariant_sys_reg(u64 id, void __user *uaddr) { struct sys_reg_params params; const struct sys_reg_desc *r; if (!index_to_params(id, &params)) return -ENOENT; r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); if (!r) return -ENOENT; return reg_to_user(uaddr, &r->val, id); } static int set_invariant_sys_reg(u64 id, void __user *uaddr) { struct sys_reg_params params; const struct sys_reg_desc *r; int err; u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ if (!index_to_params(id, &params)) return -ENOENT; r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); if (!r) return -ENOENT; err = reg_from_user(&val, uaddr, id); if (err) return err; /* This is what we mean by invariant: you can't change it. */ if (r->val != val) return -EINVAL; return 0; } static bool is_valid_cache(u32 val) { u32 level, ctype; if (val >= CSSELR_MAX) return false; /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ level = (val >> 1); ctype = (cache_levels >> (level * 3)) & 7; switch (ctype) { case 0: /* No cache */ return false; case 1: /* Instruction cache only */ return (val & 1); case 2: /* Data cache only */ case 4: /* Unified cache */ return !(val & 1); case 3: /* Separate instruction and data caches */ return true; default: /* Reserved: we can't know instruction or data. */ return false; } } static int demux_c15_get(u64 id, void __user *uaddr) { u32 val; u32 __user *uval = uaddr; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { case KVM_REG_ARM_DEMUX_ID_CCSIDR: if (KVM_REG_SIZE(id) != 4) return -ENOENT; val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) >> KVM_REG_ARM_DEMUX_VAL_SHIFT; if (!is_valid_cache(val)) return -ENOENT; return put_user(get_ccsidr(val), uval); default: return -ENOENT; } } static int demux_c15_set(u64 id, void __user *uaddr) { u32 val, newval; u32 __user *uval = uaddr; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { case KVM_REG_ARM_DEMUX_ID_CCSIDR: if (KVM_REG_SIZE(id) != 4) return -ENOENT; val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) >> KVM_REG_ARM_DEMUX_VAL_SHIFT; if (!is_valid_cache(val)) return -ENOENT; if (get_user(newval, uval)) return -EFAULT; /* This is also invariant: you can't change it. */ if (newval != get_ccsidr(val)) return -EINVAL; return 0; default: return -ENOENT; } } int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct sys_reg_desc *r; void __user *uaddr = (void __user *)(unsigned long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_get(reg->id, uaddr); if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) return -ENOENT; r = index_to_sys_reg_desc(vcpu, reg->id); if (!r) return get_invariant_sys_reg(reg->id, uaddr); if (r->get_user) return (r->get_user)(vcpu, r, reg, uaddr); return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); } int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct sys_reg_desc *r; void __user *uaddr = (void __user *)(unsigned long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_set(reg->id, uaddr); if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) return -ENOENT; r = index_to_sys_reg_desc(vcpu, reg->id); if (!r) return set_invariant_sys_reg(reg->id, uaddr); if (r->set_user) return (r->set_user)(vcpu, r, reg, uaddr); return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); } static unsigned int num_demux_regs(void) { unsigned int i, count = 0; for (i = 0; i < CSSELR_MAX; i++) if (is_valid_cache(i)) count++; return count; } static int write_demux_regids(u64 __user *uindices) { u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; unsigned int i; val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; for (i = 0; i < CSSELR_MAX; i++) { if (!is_valid_cache(i)) continue; if (put_user(val | i, uindices)) return -EFAULT; uindices++; } return 0; } static u64 sys_reg_to_index(const struct sys_reg_desc *reg) { return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM64_SYSREG | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); } static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) { if (!*uind) return true; if (put_user(sys_reg_to_index(reg), *uind)) return false; (*uind)++; return true; } /* Assumed ordered tables, see kvm_sys_reg_table_init. */ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) { const struct sys_reg_desc *i1, *i2, *end1, *end2; unsigned int total = 0; size_t num; /* We check for duplicates here, to allow arch-specific overrides. */ i1 = get_target_table(vcpu->arch.target, true, &num); end1 = i1 + num; i2 = sys_reg_descs; end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); BUG_ON(i1 == end1 || i2 == end2); /* Walk carefully, as both tables may refer to the same register. */ while (i1 || i2) { int cmp = cmp_sys_reg(i1, i2); /* target-specific overrides generic entry. */ if (cmp <= 0) { /* Ignore registers we trap but don't save. */ if (i1->reg) { if (!copy_reg_to_user(i1, &uind)) return -EFAULT; total++; } } else { /* Ignore registers we trap but don't save. */ if (i2->reg) { if (!copy_reg_to_user(i2, &uind)) return -EFAULT; total++; } } if (cmp <= 0 && ++i1 == end1) i1 = NULL; if (cmp >= 0 && ++i2 == end2) i2 = NULL; } return total; } unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) { return ARRAY_SIZE(invariant_sys_regs) + num_demux_regs() + walk_sys_regs(vcpu, (u64 __user *)NULL); } int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { unsigned int i; int err; /* Then give them all the invariant registers' indices. */ for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) return -EFAULT; uindices++; } err = walk_sys_regs(vcpu, uindices); if (err < 0) return err; uindices += err; return write_demux_regids(uindices); } static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) { unsigned int i; for (i = 1; i < n; i++) { if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); return 1; } } return 0; } void kvm_sys_reg_table_init(void) { unsigned int i; struct sys_reg_desc clidr; /* Make sure tables are unique and in order. */ BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); /* We abuse the reset function to overwrite the table itself. */ for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); /* * CLIDR format is awkward, so clean it up. See ARM B4.1.20: * * If software reads the Cache Type fields from Ctype1 * upwards, once it has seen a value of 0b000, no caches * exist at further-out levels of the hierarchy. So, for * example, if Ctype3 is the first Cache Type field with a * value of 0b000, the values of Ctype4 to Ctype7 must be * ignored. */ get_clidr_el1(NULL, &clidr); /* Ugly... */ cache_levels = clidr.val; for (i = 0; i < 7; i++) if (((cache_levels >> (i*3)) & 7) == 0) break; /* Clear all higher bits. */ cache_levels &= (1 << (i*3))-1; } /** * kvm_reset_sys_regs - sets system registers to reset value * @vcpu: The VCPU pointer * * This function finds the right table above and sets the registers on the * virtual CPU struct to their architecturally defined reset values. */ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) { size_t num; const struct sys_reg_desc *table; /* Catch someone adding a register without putting in reset entry. */ memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); /* Generic chip reset first (so target could override). */ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); table = get_target_table(vcpu->arch.target, true, &num); reset_sys_reg_descs(vcpu, table, num); for (num = 1; num < NR_SYS_REGS; num++) if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) panic("Didn't reset vcpu_sys_reg(%zi)", num); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2602_0
crossvul-cpp_data_good_2489_1
/* Copyright (c) 2001 Matej Pfajfar. * Copyright (c) 2001-2004, Roger Dingledine. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2013, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file relay.c * \brief Handle relay cell encryption/decryption, plus packaging and * receiving from circuits, plus queuing on circuits. **/ #define RELAY_PRIVATE #include "or.h" #include "addressmap.h" #include "buffers.h" #include "channel.h" #include "circuitbuild.h" #include "circuitlist.h" #include "circuituse.h" #include "config.h" #include "connection.h" #include "connection_edge.h" #include "connection_or.h" #include "control.h" #include "geoip.h" #include "main.h" #include "mempool.h" #include "networkstatus.h" #include "nodelist.h" #include "onion.h" #include "policies.h" #include "reasons.h" #include "relay.h" #include "rendcommon.h" #include "router.h" #include "routerlist.h" #include "routerparse.h" static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction, crypt_path_t *layer_hint); static int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ, edge_connection_t *conn, crypt_path_t *layer_hint); static void circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint); static void circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint); static int circuit_resume_edge_reading_helper(edge_connection_t *conn, circuit_t *circ, crypt_path_t *layer_hint); static int circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint); static int circuit_queue_streams_are_blocked(circuit_t *circ); static void adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ, entry_connection_t *conn, node_t *node, const tor_addr_t *addr); /** Stop reading on edge connections when we have this many cells * waiting on the appropriate queue. */ #define CELL_QUEUE_HIGHWATER_SIZE 256 /** Start reading from edge connections again when we get down to this many * cells. */ #define CELL_QUEUE_LOWWATER_SIZE 64 /** Stats: how many relay cells have originated at this hop, or have * been relayed onward (not recognized at this hop)? */ uint64_t stats_n_relay_cells_relayed = 0; /** Stats: how many relay cells have been delivered to streams at this * hop? */ uint64_t stats_n_relay_cells_delivered = 0; /** Used to tell which stream to read from first on a circuit. */ static tor_weak_rng_t stream_choice_rng = TOR_WEAK_RNG_INIT; /** Update digest from the payload of cell. Assign integrity part to * cell. */ static void relay_set_digest(crypto_digest_t *digest, cell_t *cell) { char integrity[4]; relay_header_t rh; crypto_digest_add_bytes(digest, (char*)cell->payload, CELL_PAYLOAD_SIZE); crypto_digest_get_digest(digest, integrity, 4); // log_fn(LOG_DEBUG,"Putting digest of %u %u %u %u into relay cell.", // integrity[0], integrity[1], integrity[2], integrity[3]); relay_header_unpack(&rh, cell->payload); memcpy(rh.integrity, integrity, 4); relay_header_pack(cell->payload, &rh); } /** Does the digest for this circuit indicate that this cell is for us? * * Update digest from the payload of cell (with the integrity part set * to 0). If the integrity part is valid, return 1, else restore digest * and cell to their original state and return 0. */ static int relay_digest_matches(crypto_digest_t *digest, cell_t *cell) { char received_integrity[4], calculated_integrity[4]; relay_header_t rh; crypto_digest_t *backup_digest=NULL; backup_digest = crypto_digest_dup(digest); relay_header_unpack(&rh, cell->payload); memcpy(received_integrity, rh.integrity, 4); memset(rh.integrity, 0, 4); relay_header_pack(cell->payload, &rh); // log_fn(LOG_DEBUG,"Reading digest of %u %u %u %u from relay cell.", // received_integrity[0], received_integrity[1], // received_integrity[2], received_integrity[3]); crypto_digest_add_bytes(digest, (char*) cell->payload, CELL_PAYLOAD_SIZE); crypto_digest_get_digest(digest, calculated_integrity, 4); if (tor_memneq(received_integrity, calculated_integrity, 4)) { // log_fn(LOG_INFO,"Recognized=0 but bad digest. Not recognizing."); // (%d vs %d).", received_integrity, calculated_integrity); /* restore digest to its old form */ crypto_digest_assign(digest, backup_digest); /* restore the relay header */ memcpy(rh.integrity, received_integrity, 4); relay_header_pack(cell->payload, &rh); crypto_digest_free(backup_digest); return 0; } crypto_digest_free(backup_digest); return 1; } /** Apply <b>cipher</b> to CELL_PAYLOAD_SIZE bytes of <b>in</b> * (in place). * * If <b>encrypt_mode</b> is 1 then encrypt, else decrypt. * * Return -1 if the crypto fails, else return 0. */ static int relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in, int encrypt_mode) { int r; (void)encrypt_mode; r = crypto_cipher_crypt_inplace(cipher, (char*) in, CELL_PAYLOAD_SIZE); if (r) { log_warn(LD_BUG,"Error during relay encryption"); return -1; } return 0; } /** Receive a relay cell: * - Crypt it (encrypt if headed toward the origin or if we <b>are</b> the * origin; decrypt if we're headed toward the exit). * - Check if recognized (if exitward). * - If recognized and the digest checks out, then find if there's a stream * that the cell is intended for, and deliver it to the right * connection_edge. * - If not recognized, then we need to relay it: append it to the appropriate * cell_queue on <b>circ</b>. * * Return -<b>reason</b> on failure. */ int circuit_receive_relay_cell(cell_t *cell, circuit_t *circ, cell_direction_t cell_direction) { channel_t *chan = NULL; crypt_path_t *layer_hint=NULL; char recognized=0; int reason; tor_assert(cell); tor_assert(circ); tor_assert(cell_direction == CELL_DIRECTION_OUT || cell_direction == CELL_DIRECTION_IN); if (circ->marked_for_close) return 0; if (relay_crypt(circ, cell, cell_direction, &layer_hint, &recognized) < 0) { log_warn(LD_BUG,"relay crypt failed. Dropping connection."); return -END_CIRC_REASON_INTERNAL; } if (recognized) { edge_connection_t *conn = NULL; if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) { pathbias_check_probe_response(circ, cell); /* We need to drop this cell no matter what to avoid code that expects * a certain purpose (such as the hidserv code). */ return 0; } conn = relay_lookup_conn(circ, cell, cell_direction, layer_hint); if (cell_direction == CELL_DIRECTION_OUT) { ++stats_n_relay_cells_delivered; log_debug(LD_OR,"Sending away from origin."); if ((reason=connection_edge_process_relay_cell(cell, circ, conn, NULL)) < 0) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "connection_edge_process_relay_cell (away from origin) " "failed."); return reason; } } if (cell_direction == CELL_DIRECTION_IN) { ++stats_n_relay_cells_delivered; log_debug(LD_OR,"Sending to origin."); if ((reason = connection_edge_process_relay_cell(cell, circ, conn, layer_hint)) < 0) { log_warn(LD_OR, "connection_edge_process_relay_cell (at origin) failed."); return reason; } } return 0; } /* not recognized. pass it on. */ if (cell_direction == CELL_DIRECTION_OUT) { cell->circ_id = circ->n_circ_id; /* switch it */ chan = circ->n_chan; } else if (! CIRCUIT_IS_ORIGIN(circ)) { cell->circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; /* switch it */ chan = TO_OR_CIRCUIT(circ)->p_chan; } else { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Dropping unrecognized inbound cell on origin circuit."); /* If we see unrecognized cells on path bias testing circs, * it's bad mojo. Those circuits need to die. * XXX: Shouldn't they always die? */ if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) { TO_ORIGIN_CIRCUIT(circ)->path_state = PATH_STATE_USE_FAILED; return -END_CIRC_REASON_TORPROTOCOL; } else { return 0; } } if (!chan) { // XXXX Can this splice stuff be done more cleanly? if (! CIRCUIT_IS_ORIGIN(circ) && TO_OR_CIRCUIT(circ)->rend_splice && cell_direction == CELL_DIRECTION_OUT) { or_circuit_t *splice = TO_OR_CIRCUIT(circ)->rend_splice; tor_assert(circ->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED); tor_assert(splice->base_.purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED); cell->circ_id = splice->p_circ_id; cell->command = CELL_RELAY; /* can't be relay_early anyway */ if ((reason = circuit_receive_relay_cell(cell, TO_CIRCUIT(splice), CELL_DIRECTION_IN)) < 0) { log_warn(LD_REND, "Error relaying cell across rendezvous; closing " "circuits"); /* XXXX Do this here, or just return -1? */ circuit_mark_for_close(circ, -reason); return reason; } return 0; } log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Didn't recognize cell, but circ stops here! Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } log_debug(LD_OR,"Passing on unrecognized cell."); ++stats_n_relay_cells_relayed; /* XXXX no longer quite accurate {cells} * we might kill the circ before we relay * the cells. */ append_cell_to_circuit_queue(circ, chan, cell, cell_direction, 0); return 0; } /** Do the appropriate en/decryptions for <b>cell</b> arriving on * <b>circ</b> in direction <b>cell_direction</b>. * * If cell_direction == CELL_DIRECTION_IN: * - If we're at the origin (we're the OP), for hops 1..N, * decrypt cell. If recognized, stop. * - Else (we're not the OP), encrypt one hop. Cell is not recognized. * * If cell_direction == CELL_DIRECTION_OUT: * - decrypt one hop. Check if recognized. * * If cell is recognized, set *recognized to 1, and set * *layer_hint to the hop that recognized it. * * Return -1 to indicate that we should mark the circuit for close, * else return 0. */ int relay_crypt(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction, crypt_path_t **layer_hint, char *recognized) { relay_header_t rh; tor_assert(circ); tor_assert(cell); tor_assert(recognized); tor_assert(cell_direction == CELL_DIRECTION_IN || cell_direction == CELL_DIRECTION_OUT); if (cell_direction == CELL_DIRECTION_IN) { if (CIRCUIT_IS_ORIGIN(circ)) { /* We're at the beginning of the circuit. * We'll want to do layered decrypts. */ crypt_path_t *thishop, *cpath = TO_ORIGIN_CIRCUIT(circ)->cpath; thishop = cpath; if (thishop->state != CPATH_STATE_OPEN) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay cell before first created cell? Closing."); return -1; } do { /* Remember: cpath is in forward order, that is, first hop first. */ tor_assert(thishop); if (relay_crypt_one_payload(thishop->b_crypto, cell->payload, 0) < 0) return -1; relay_header_unpack(&rh, cell->payload); if (rh.recognized == 0) { /* it's possibly recognized. have to check digest to be sure. */ if (relay_digest_matches(thishop->b_digest, cell)) { *recognized = 1; *layer_hint = thishop; return 0; } } thishop = thishop->next; } while (thishop != cpath && thishop->state == CPATH_STATE_OPEN); log_fn(LOG_PROTOCOL_WARN, LD_OR, "Incoming cell at client not recognized. Closing."); return -1; } else { /* we're in the middle. Just one crypt. */ if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->p_crypto, cell->payload, 1) < 0) return -1; // log_fn(LOG_DEBUG,"Skipping recognized check, because we're not " // "the client."); } } else /* cell_direction == CELL_DIRECTION_OUT */ { /* we're in the middle. Just one crypt. */ if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->n_crypto, cell->payload, 0) < 0) return -1; relay_header_unpack(&rh, cell->payload); if (rh.recognized == 0) { /* it's possibly recognized. have to check digest to be sure. */ if (relay_digest_matches(TO_OR_CIRCUIT(circ)->n_digest, cell)) { *recognized = 1; return 0; } } } return 0; } /** Package a relay cell from an edge: * - Encrypt it to the right layer * - Append it to the appropriate cell_queue on <b>circ</b>. */ static int circuit_package_relay_cell(cell_t *cell, circuit_t *circ, cell_direction_t cell_direction, crypt_path_t *layer_hint, streamid_t on_stream, const char *filename, int lineno) { channel_t *chan; /* where to send the cell */ if (cell_direction == CELL_DIRECTION_OUT) { crypt_path_t *thishop; /* counter for repeated crypts */ chan = circ->n_chan; if (!chan) { log_warn(LD_BUG,"outgoing relay cell sent from %s:%d has n_chan==NULL." " Dropping.", filename, lineno); return 0; /* just drop it */ } if (!CIRCUIT_IS_ORIGIN(circ)) { log_warn(LD_BUG,"outgoing relay cell sent from %s:%d on non-origin " "circ. Dropping.", filename, lineno); return 0; /* just drop it */ } relay_set_digest(layer_hint->f_digest, cell); thishop = layer_hint; /* moving from farthest to nearest hop */ do { tor_assert(thishop); /* XXXX RD This is a bug, right? */ log_debug(LD_OR,"crypting a layer of the relay cell."); if (relay_crypt_one_payload(thishop->f_crypto, cell->payload, 1) < 0) { return -1; } thishop = thishop->prev; } while (thishop != TO_ORIGIN_CIRCUIT(circ)->cpath->prev); } else { /* incoming cell */ or_circuit_t *or_circ; if (CIRCUIT_IS_ORIGIN(circ)) { /* We should never package an _incoming_ cell from the circuit * origin; that means we messed up somewhere. */ log_warn(LD_BUG,"incoming relay cell at origin circuit. Dropping."); assert_circuit_ok(circ); return 0; /* just drop it */ } or_circ = TO_OR_CIRCUIT(circ); chan = or_circ->p_chan; relay_set_digest(or_circ->p_digest, cell); if (relay_crypt_one_payload(or_circ->p_crypto, cell->payload, 1) < 0) return -1; } ++stats_n_relay_cells_relayed; append_cell_to_circuit_queue(circ, chan, cell, cell_direction, on_stream); return 0; } /** If cell's stream_id matches the stream_id of any conn that's * attached to circ, return that conn, else return NULL. */ static edge_connection_t * relay_lookup_conn(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction, crypt_path_t *layer_hint) { edge_connection_t *tmpconn; relay_header_t rh; relay_header_unpack(&rh, cell->payload); if (!rh.stream_id) return NULL; /* IN or OUT cells could have come from either direction, now * that we allow rendezvous *to* an OP. */ if (CIRCUIT_IS_ORIGIN(circ)) { for (tmpconn = TO_ORIGIN_CIRCUIT(circ)->p_streams; tmpconn; tmpconn=tmpconn->next_stream) { if (rh.stream_id == tmpconn->stream_id && !tmpconn->base_.marked_for_close && tmpconn->cpath_layer == layer_hint) { log_debug(LD_APP,"found conn for stream %d.", rh.stream_id); return tmpconn; } } } else { for (tmpconn = TO_OR_CIRCUIT(circ)->n_streams; tmpconn; tmpconn=tmpconn->next_stream) { if (rh.stream_id == tmpconn->stream_id && !tmpconn->base_.marked_for_close) { log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id); if (cell_direction == CELL_DIRECTION_OUT || connection_edge_is_rendezvous_stream(tmpconn)) return tmpconn; } } for (tmpconn = TO_OR_CIRCUIT(circ)->resolving_streams; tmpconn; tmpconn=tmpconn->next_stream) { if (rh.stream_id == tmpconn->stream_id && !tmpconn->base_.marked_for_close) { log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id); return tmpconn; } } } return NULL; /* probably a begin relay cell */ } /** Pack the relay_header_t host-order structure <b>src</b> into * network-order in the buffer <b>dest</b>. See tor-spec.txt for details * about the wire format. */ void relay_header_pack(uint8_t *dest, const relay_header_t *src) { set_uint8(dest, src->command); set_uint16(dest+1, htons(src->recognized)); set_uint16(dest+3, htons(src->stream_id)); memcpy(dest+5, src->integrity, 4); set_uint16(dest+9, htons(src->length)); } /** Unpack the network-order buffer <b>src</b> into a host-order * relay_header_t structure <b>dest</b>. */ void relay_header_unpack(relay_header_t *dest, const uint8_t *src) { dest->command = get_uint8(src); dest->recognized = ntohs(get_uint16(src+1)); dest->stream_id = ntohs(get_uint16(src+3)); memcpy(dest->integrity, src+5, 4); dest->length = ntohs(get_uint16(src+9)); } /** Convert the relay <b>command</b> into a human-readable string. */ static const char * relay_command_to_string(uint8_t command) { switch (command) { case RELAY_COMMAND_BEGIN: return "BEGIN"; case RELAY_COMMAND_DATA: return "DATA"; case RELAY_COMMAND_END: return "END"; case RELAY_COMMAND_CONNECTED: return "CONNECTED"; case RELAY_COMMAND_SENDME: return "SENDME"; case RELAY_COMMAND_EXTEND: return "EXTEND"; case RELAY_COMMAND_EXTENDED: return "EXTENDED"; case RELAY_COMMAND_TRUNCATE: return "TRUNCATE"; case RELAY_COMMAND_TRUNCATED: return "TRUNCATED"; case RELAY_COMMAND_DROP: return "DROP"; case RELAY_COMMAND_RESOLVE: return "RESOLVE"; case RELAY_COMMAND_RESOLVED: return "RESOLVED"; case RELAY_COMMAND_BEGIN_DIR: return "BEGIN_DIR"; case RELAY_COMMAND_ESTABLISH_INTRO: return "ESTABLISH_INTRO"; case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: return "ESTABLISH_RENDEZVOUS"; case RELAY_COMMAND_INTRODUCE1: return "INTRODUCE1"; case RELAY_COMMAND_INTRODUCE2: return "INTRODUCE2"; case RELAY_COMMAND_RENDEZVOUS1: return "RENDEZVOUS1"; case RELAY_COMMAND_RENDEZVOUS2: return "RENDEZVOUS2"; case RELAY_COMMAND_INTRO_ESTABLISHED: return "INTRO_ESTABLISHED"; case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED: return "RENDEZVOUS_ESTABLISHED"; case RELAY_COMMAND_INTRODUCE_ACK: return "INTRODUCE_ACK"; default: return "(unrecognized)"; } } /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and send * it onto the open circuit <b>circ</b>. <b>stream_id</b> is the ID on * <b>circ</b> for the stream that's sending the relay cell, or 0 if it's a * control cell. <b>cpath_layer</b> is NULL for OR->OP cells, or the * destination hop for OP->OR cells. * * If you can't send the cell, mark the circuit for close and return -1. Else * return 0. */ int relay_send_command_from_edge_(streamid_t stream_id, circuit_t *circ, uint8_t relay_command, const char *payload, size_t payload_len, crypt_path_t *cpath_layer, const char *filename, int lineno) { cell_t cell; relay_header_t rh; cell_direction_t cell_direction; /* XXXX NM Split this function into a separate versions per circuit type? */ tor_assert(circ); tor_assert(payload_len <= RELAY_PAYLOAD_SIZE); memset(&cell, 0, sizeof(cell_t)); cell.command = CELL_RELAY; if (cpath_layer) { cell.circ_id = circ->n_circ_id; cell_direction = CELL_DIRECTION_OUT; } else if (! CIRCUIT_IS_ORIGIN(circ)) { cell.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; cell_direction = CELL_DIRECTION_IN; } else { return -1; } memset(&rh, 0, sizeof(rh)); rh.command = relay_command; rh.stream_id = stream_id; rh.length = payload_len; relay_header_pack(cell.payload, &rh); if (payload_len) memcpy(cell.payload+RELAY_HEADER_SIZE, payload, payload_len); log_debug(LD_OR,"delivering %d cell %s.", relay_command, cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward"); /* If we are sending an END cell and this circuit is used for a tunneled * directory request, advance its state. */ if (relay_command == RELAY_COMMAND_END && circ->dirreq_id) geoip_change_dirreq_state(circ->dirreq_id, DIRREQ_TUNNELED, DIRREQ_END_CELL_SENT); if (cell_direction == CELL_DIRECTION_OUT && circ->n_chan) { /* if we're using relaybandwidthrate, this conn wants priority */ channel_timestamp_client(circ->n_chan); } if (cell_direction == CELL_DIRECTION_OUT) { origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ); if (origin_circ->remaining_relay_early_cells > 0 && (relay_command == RELAY_COMMAND_EXTEND || relay_command == RELAY_COMMAND_EXTEND2 || cpath_layer != origin_circ->cpath)) { /* If we've got any relay_early cells left and (we're sending * an extend cell or we're not talking to the first hop), use * one of them. Don't worry about the conn protocol version: * append_cell_to_circuit_queue will fix it up. */ cell.command = CELL_RELAY_EARLY; --origin_circ->remaining_relay_early_cells; log_debug(LD_OR, "Sending a RELAY_EARLY cell; %d remaining.", (int)origin_circ->remaining_relay_early_cells); /* Memorize the command that is sent as RELAY_EARLY cell; helps debug * task 878. */ origin_circ->relay_early_commands[ origin_circ->relay_early_cells_sent++] = relay_command; } else if (relay_command == RELAY_COMMAND_EXTEND || relay_command == RELAY_COMMAND_EXTEND2) { /* If no RELAY_EARLY cells can be sent over this circuit, log which * commands have been sent as RELAY_EARLY cells before; helps debug * task 878. */ smartlist_t *commands_list = smartlist_new(); int i = 0; char *commands = NULL; for (; i < origin_circ->relay_early_cells_sent; i++) smartlist_add(commands_list, (char *) relay_command_to_string(origin_circ->relay_early_commands[i])); commands = smartlist_join_strings(commands_list, ",", 0, NULL); log_warn(LD_BUG, "Uh-oh. We're sending a RELAY_COMMAND_EXTEND cell, " "but we have run out of RELAY_EARLY cells on that circuit. " "Commands sent before: %s", commands); tor_free(commands); smartlist_free(commands_list); } } if (circuit_package_relay_cell(&cell, circ, cell_direction, cpath_layer, stream_id, filename, lineno) < 0) { log_warn(LD_BUG,"circuit_package_relay_cell failed. Closing."); circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL); return -1; } return 0; } /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and * send it onto the open circuit <b>circ</b>. <b>fromconn</b> is the stream * that's sending the relay cell, or NULL if it's a control cell. * <b>cpath_layer</b> is NULL for OR->OP cells, or the destination hop * for OP->OR cells. * * If you can't send the cell, mark the circuit for close and * return -1. Else return 0. */ int connection_edge_send_command(edge_connection_t *fromconn, uint8_t relay_command, const char *payload, size_t payload_len) { /* XXXX NM Split this function into a separate versions per circuit type? */ circuit_t *circ; crypt_path_t *cpath_layer = fromconn->cpath_layer; tor_assert(fromconn); circ = fromconn->on_circuit; if (fromconn->base_.marked_for_close) { log_warn(LD_BUG, "called on conn that's already marked for close at %s:%d.", fromconn->base_.marked_for_close_file, fromconn->base_.marked_for_close); return 0; } if (!circ) { if (fromconn->base_.type == CONN_TYPE_AP) { log_info(LD_APP,"no circ. Closing conn."); connection_mark_unattached_ap(EDGE_TO_ENTRY_CONN(fromconn), END_STREAM_REASON_INTERNAL); } else { log_info(LD_EXIT,"no circ. Closing conn."); fromconn->edge_has_sent_end = 1; /* no circ to send to */ fromconn->end_reason = END_STREAM_REASON_INTERNAL; connection_mark_for_close(TO_CONN(fromconn)); } return -1; } return relay_send_command_from_edge(fromconn->stream_id, circ, relay_command, payload, payload_len, cpath_layer); } /** How many times will I retry a stream that fails due to DNS * resolve failure or misc error? */ #define MAX_RESOLVE_FAILURES 3 /** Return 1 if reason is something that you should retry if you * get the end cell before you've connected; else return 0. */ static int edge_reason_is_retriable(int reason) { return reason == END_STREAM_REASON_HIBERNATING || reason == END_STREAM_REASON_RESOURCELIMIT || reason == END_STREAM_REASON_EXITPOLICY || reason == END_STREAM_REASON_RESOLVEFAILED || reason == END_STREAM_REASON_MISC || reason == END_STREAM_REASON_NOROUTE; } /** Called when we receive an END cell on a stream that isn't open yet, * from the client side. * Arguments are as for connection_edge_process_relay_cell(). */ static int connection_ap_process_end_not_open( relay_header_t *rh, cell_t *cell, origin_circuit_t *circ, entry_connection_t *conn, crypt_path_t *layer_hint) { node_t *exitrouter; int reason = *(cell->payload+RELAY_HEADER_SIZE); int control_reason; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn); (void) layer_hint; /* unused */ if (rh->length > 0) { if (reason == END_STREAM_REASON_TORPROTOCOL || reason == END_STREAM_REASON_DESTROY) { /* Both of these reasons could mean a failed tag * hit the exit and it complained. Do not probe. * Fail the circuit. */ circ->path_state = PATH_STATE_USE_FAILED; return -END_CIRC_REASON_TORPROTOCOL; } else if (reason == END_STREAM_REASON_INTERNAL) { /* We can't infer success or failure, since older Tors report * ENETUNREACH as END_STREAM_REASON_INTERNAL. */ } else { /* Path bias: If we get a valid reason code from the exit, * it wasn't due to tagging. * * We rely on recognized+digest being strong enough to make * tags unlikely to allow us to get tagged, yet 'recognized' * reason codes here. */ pathbias_mark_use_success(circ); } } if (rh->length == 0) { reason = END_STREAM_REASON_MISC; } control_reason = reason | END_STREAM_REASON_FLAG_REMOTE; if (edge_reason_is_retriable(reason) && /* avoid retry if rend */ !connection_edge_is_rendezvous_stream(edge_conn)) { const char *chosen_exit_digest = circ->build_state->chosen_exit->identity_digest; log_info(LD_APP,"Address '%s' refused due to '%s'. Considering retrying.", safe_str(conn->socks_request->address), stream_end_reason_to_string(reason)); exitrouter = node_get_mutable_by_id(chosen_exit_digest); switch (reason) { case END_STREAM_REASON_EXITPOLICY: { tor_addr_t addr; tor_addr_make_unspec(&addr); if (rh->length >= 5) { int ttl = -1; tor_addr_make_unspec(&addr); if (rh->length == 5 || rh->length == 9) { tor_addr_from_ipv4n(&addr, get_uint32(cell->payload+RELAY_HEADER_SIZE+1)); if (rh->length == 9) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5)); } else if (rh->length == 17 || rh->length == 21) { tor_addr_from_ipv6_bytes(&addr, (char*)(cell->payload+RELAY_HEADER_SIZE+1)); if (rh->length == 21) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+17)); } if (tor_addr_is_null(&addr)) { log_info(LD_APP,"Address '%s' resolved to 0.0.0.0. Closing,", safe_str(conn->socks_request->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if ((tor_addr_family(&addr) == AF_INET && !conn->ipv4_traffic_ok) || (tor_addr_family(&addr) == AF_INET6 && !conn->ipv6_traffic_ok)) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got an EXITPOLICY failure on a connection with a " "mismatched family. Closing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if (get_options()->ClientDNSRejectInternalAddresses && tor_addr_is_internal(&addr, 0)) { log_info(LD_APP,"Address '%s' resolved to internal. Closing,", safe_str(conn->socks_request->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } client_dns_set_addressmap(conn, conn->socks_request->address, &addr, conn->chosen_exit_name, ttl); { char new_addr[TOR_ADDR_BUF_LEN]; tor_addr_to_str(new_addr, &addr, sizeof(new_addr), 1); if (strcmp(conn->socks_request->address, new_addr)) { strlcpy(conn->socks_request->address, new_addr, sizeof(conn->socks_request->address)); control_event_stream_status(conn, STREAM_EVENT_REMAP, 0); } } } /* check if he *ought* to have allowed it */ adjust_exit_policy_from_exitpolicy_failure(circ, conn, exitrouter, &addr); if (conn->chosen_exit_optional || conn->chosen_exit_retries) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; /* A non-zero chosen_exit_retries can happen if we set a * TrackHostExits for this address under a port that the exit * relay allows, but then try the same address with a different * port that it doesn't allow to exit. We shouldn't unregister * the mapping, since it is probably still wanted on the * original port. But now we give away to the exit relay that * we probably have a TrackHostExits on it. So be it. */ conn->chosen_exit_retries = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, conn will get closed below */ break; } case END_STREAM_REASON_CONNECTREFUSED: if (!conn->chosen_exit_optional) break; /* break means it'll close, below */ /* Else fall through: expire this circuit, clear the * chosen_exit_name field, and try again. */ case END_STREAM_REASON_RESOLVEFAILED: case END_STREAM_REASON_TIMEOUT: case END_STREAM_REASON_MISC: case END_STREAM_REASON_NOROUTE: if (client_dns_incr_failures(conn->socks_request->address) < MAX_RESOLVE_FAILURES) { /* We haven't retried too many times; reattach the connection. */ circuit_log_path(LOG_INFO,LD_APP,circ); /* Mark this circuit "unusable for new streams". */ mark_circuit_unusable_for_new_conns(circ); if (conn->chosen_exit_optional) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, conn will get closed below */ } else { log_notice(LD_APP, "Have tried resolving or connecting to address '%s' " "at %d different places. Giving up.", safe_str(conn->socks_request->address), MAX_RESOLVE_FAILURES); /* clear the failures, so it will have a full try next time */ client_dns_clear_failures(conn->socks_request->address); } break; case END_STREAM_REASON_HIBERNATING: case END_STREAM_REASON_RESOURCELIMIT: if (exitrouter) { policies_set_node_exitpolicy_to_reject_all(exitrouter); } if (conn->chosen_exit_optional) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, will close below */ break; } /* end switch */ log_info(LD_APP,"Giving up on retrying; conn can't be handled."); } log_info(LD_APP, "Edge got end (%s) before we're connected. Marking for close.", stream_end_reason_to_string(rh->length > 0 ? reason : -1)); circuit_log_path(LOG_INFO,LD_APP,circ); /* need to test because of detach_retriable */ if (!ENTRY_TO_CONN(conn)->marked_for_close) connection_mark_unattached_ap(conn, control_reason); return 0; } /** Called when we have gotten an END_REASON_EXITPOLICY failure on <b>circ</b> * for <b>conn</b>, while attempting to connect via <b>node</b>. If the node * told us which address it rejected, then <b>addr</b> is that address; * otherwise it is AF_UNSPEC. * * If we are sure the node should have allowed this address, mark the node as * having a reject *:* exit policy. Otherwise, mark the circuit as unusable * for this particular address. **/ static void adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ, entry_connection_t *conn, node_t *node, const tor_addr_t *addr) { int make_reject_all = 0; const sa_family_t family = tor_addr_family(addr); if (node) { tor_addr_t tmp; int asked_for_family = tor_addr_parse(&tmp, conn->socks_request->address); if (family == AF_UNSPEC) { make_reject_all = 1; } else if (node_exit_policy_is_exact(node, family) && asked_for_family != -1 && !conn->chosen_exit_name) { make_reject_all = 1; } if (make_reject_all) { log_info(LD_APP, "Exitrouter %s seems to be more restrictive than its exit " "policy. Not using this router as exit for now.", node_describe(node)); policies_set_node_exitpolicy_to_reject_all(node); } } if (family != AF_UNSPEC) addr_policy_append_reject_addr(&circ->prepend_policy, addr); } /** Helper: change the socks_request-&gt;address field on conn to the * dotted-quad representation of <b>new_addr</b>, * and send an appropriate REMAP event. */ static void remap_event_helper(entry_connection_t *conn, const tor_addr_t *new_addr) { tor_addr_to_str(conn->socks_request->address, new_addr, sizeof(conn->socks_request->address), 1); control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_EXIT); } /** Extract the contents of a connected cell in <b>cell</b>, whose relay * header has already been parsed into <b>rh</b>. On success, set * <b>addr_out</b> to the address we're connected to, and <b>ttl_out</b> to * the ttl of that address, in seconds, and return 0. On failure, return * -1. */ int connected_cell_parse(const relay_header_t *rh, const cell_t *cell, tor_addr_t *addr_out, int *ttl_out) { uint32_t bytes; const uint8_t *payload = cell->payload + RELAY_HEADER_SIZE; tor_addr_make_unspec(addr_out); *ttl_out = -1; if (rh->length == 0) return 0; if (rh->length < 4) return -1; bytes = ntohl(get_uint32(payload)); /* If bytes is 0, this is maybe a v6 address. Otherwise it's a v4 address */ if (bytes != 0) { /* v4 address */ tor_addr_from_ipv4h(addr_out, bytes); if (rh->length >= 8) { bytes = ntohl(get_uint32(payload + 4)); if (bytes <= INT32_MAX) *ttl_out = bytes; } } else { if (rh->length < 25) /* 4 bytes of 0s, 1 addr, 16 ipv4, 4 ttl. */ return -1; if (get_uint8(payload + 4) != 6) return -1; tor_addr_from_ipv6_bytes(addr_out, (char*)(payload + 5)); bytes = ntohl(get_uint32(payload + 21)); if (bytes <= INT32_MAX) *ttl_out = (int) bytes; } return 0; } /** An incoming relay cell has arrived from circuit <b>circ</b> to * stream <b>conn</b>. * * The arguments here are the same as in * connection_edge_process_relay_cell() below; this function is called * from there when <b>conn</b> is defined and not in an open state. */ static int connection_edge_process_relay_cell_not_open( relay_header_t *rh, cell_t *cell, circuit_t *circ, edge_connection_t *conn, crypt_path_t *layer_hint) { if (rh->command == RELAY_COMMAND_END) { if (CIRCUIT_IS_ORIGIN(circ) && conn->base_.type == CONN_TYPE_AP) { return connection_ap_process_end_not_open(rh, cell, TO_ORIGIN_CIRCUIT(circ), EDGE_TO_ENTRY_CONN(conn), layer_hint); } else { /* we just got an 'end', don't need to send one */ conn->edge_has_sent_end = 1; conn->end_reason = *(cell->payload+RELAY_HEADER_SIZE) | END_STREAM_REASON_FLAG_REMOTE; connection_mark_for_close(TO_CONN(conn)); return 0; } } if (conn->base_.type == CONN_TYPE_AP && rh->command == RELAY_COMMAND_CONNECTED) { tor_addr_t addr; int ttl; entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); tor_assert(CIRCUIT_IS_ORIGIN(circ)); if (conn->base_.state != AP_CONN_STATE_CONNECT_WAIT) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got 'connected' while not in state connect_wait. Dropping."); return 0; } conn->base_.state = AP_CONN_STATE_OPEN; log_info(LD_APP,"'connected' received after %d seconds.", (int)(time(NULL) - conn->base_.timestamp_lastread)); if (connected_cell_parse(rh, cell, &addr, &ttl) < 0) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a badly formatted connected cell. Closing."); connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); } if (tor_addr_family(&addr) != AF_UNSPEC) { const sa_family_t family = tor_addr_family(&addr); if (tor_addr_is_null(&addr) || (get_options()->ClientDNSRejectInternalAddresses && tor_addr_is_internal(&addr, 0))) { log_info(LD_APP, "...but it claims the IP address was %s. Closing.", fmt_addr(&addr)); connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if ((family == AF_INET && ! entry_conn->ipv4_traffic_ok) || (family == AF_INET6 && ! entry_conn->ipv6_traffic_ok)) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a connected cell to %s with unsupported address family." " Closing.", fmt_addr(&addr)); connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } client_dns_set_addressmap(entry_conn, entry_conn->socks_request->address, &addr, entry_conn->chosen_exit_name, ttl); remap_event_helper(entry_conn, &addr); } circuit_log_path(LOG_INFO,LD_APP,TO_ORIGIN_CIRCUIT(circ)); /* don't send a socks reply to transparent conns */ tor_assert(entry_conn->socks_request != NULL); if (!entry_conn->socks_request->has_finished) connection_ap_handshake_socks_reply(entry_conn, NULL, 0, 0); /* Was it a linked dir conn? If so, a dir request just started to * fetch something; this could be a bootstrap status milestone. */ log_debug(LD_APP, "considering"); if (TO_CONN(conn)->linked_conn && TO_CONN(conn)->linked_conn->type == CONN_TYPE_DIR) { connection_t *dirconn = TO_CONN(conn)->linked_conn; log_debug(LD_APP, "it is! %d", dirconn->purpose); switch (dirconn->purpose) { case DIR_PURPOSE_FETCH_CERTIFICATE: if (consensus_is_waiting_for_certs()) control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_KEYS, 0); break; case DIR_PURPOSE_FETCH_CONSENSUS: control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_STATUS, 0); break; case DIR_PURPOSE_FETCH_SERVERDESC: case DIR_PURPOSE_FETCH_MICRODESC: control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS, count_loading_descriptors_progress()); break; } } /* This is definitely a success, so forget about any pending data we * had sent. */ if (entry_conn->pending_optimistic_data) { generic_buffer_free(entry_conn->pending_optimistic_data); entry_conn->pending_optimistic_data = NULL; } /* handle anything that might have queued */ if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return 0; } return 0; } if (conn->base_.type == CONN_TYPE_AP && rh->command == RELAY_COMMAND_RESOLVED) { int ttl; int answer_len; uint8_t answer_type; entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); if (conn->base_.state != AP_CONN_STATE_RESOLVE_WAIT) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a 'resolved' cell while " "not in state resolve_wait. Dropping."); return 0; } tor_assert(SOCKS_COMMAND_IS_RESOLVE(entry_conn->socks_request->command)); answer_len = cell->payload[RELAY_HEADER_SIZE+1]; if (rh->length < 2 || answer_len+2>rh->length) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Dropping malformed 'resolved' cell"); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } answer_type = cell->payload[RELAY_HEADER_SIZE]; if (rh->length >= answer_len+6) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+ 2+answer_len)); else ttl = -1; if (answer_type == RESOLVED_TYPE_IPV4 || answer_type == RESOLVED_TYPE_IPV6) { tor_addr_t addr; if (decode_address_from_payload(&addr, cell->payload+RELAY_HEADER_SIZE, rh->length) && tor_addr_is_internal(&addr, 0) && get_options()->ClientDNSRejectInternalAddresses) { log_info(LD_APP,"Got a resolve with answer %s. Rejecting.", fmt_addr(&addr)); connection_ap_handshake_socks_resolved(entry_conn, RESOLVED_TYPE_ERROR_TRANSIENT, 0, NULL, 0, TIME_MAX); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } } connection_ap_handshake_socks_resolved(entry_conn, answer_type, cell->payload[RELAY_HEADER_SIZE+1], /*answer_len*/ cell->payload+RELAY_HEADER_SIZE+2, /*answer*/ ttl, -1); if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { tor_addr_t addr; tor_addr_from_ipv4n(&addr, get_uint32(cell->payload+RELAY_HEADER_SIZE+2)); remap_event_helper(entry_conn, &addr); } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) { tor_addr_t addr; tor_addr_from_ipv6_bytes(&addr, (char*)(cell->payload+RELAY_HEADER_SIZE+2)); remap_event_helper(entry_conn, &addr); } connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return 0; } log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Got an unexpected relay command %d, in state %d (%s). Dropping.", rh->command, conn->base_.state, conn_state_to_string(conn->base_.type, conn->base_.state)); return 0; /* for forward compatibility, don't kill the circuit */ // connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); // connection_mark_for_close(conn); // return -1; } /** An incoming relay cell has arrived on circuit <b>circ</b>. If * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is * destined for <b>conn</b>. * * If <b>layer_hint</b> is defined, then we're the origin of the * circuit, and it specifies the hop that packaged <b>cell</b>. * * Return -reason if you want to warn and tear down the circuit, else 0. */ static int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ, edge_connection_t *conn, crypt_path_t *layer_hint) { static int num_seen=0; relay_header_t rh; unsigned domain = layer_hint?LD_APP:LD_EXIT; int reason; int optimistic_data = 0; /* Set to 1 if we receive data on a stream * that's in the EXIT_CONN_STATE_RESOLVING * or EXIT_CONN_STATE_CONNECTING states. */ tor_assert(cell); tor_assert(circ); relay_header_unpack(&rh, cell->payload); // log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id); num_seen++; log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).", num_seen, rh.command, rh.stream_id); if (rh.length > RELAY_PAYLOAD_SIZE) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay cell length field too long. Closing circuit."); return - END_CIRC_REASON_TORPROTOCOL; } if (rh.stream_id == 0) { switch (rh.command) { case RELAY_COMMAND_BEGIN: case RELAY_COMMAND_CONNECTED: case RELAY_COMMAND_DATA: case RELAY_COMMAND_END: case RELAY_COMMAND_RESOLVE: case RELAY_COMMAND_RESOLVED: case RELAY_COMMAND_BEGIN_DIR: log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay command %d with zero " "stream_id. Dropping.", (int)rh.command); return 0; default: ; } } /* either conn is NULL, in which case we've got a control cell, or else * conn points to the recognized stream. */ if (conn && !connection_state_is_open(TO_CONN(conn))) { if (conn->base_.type == CONN_TYPE_EXIT && (conn->base_.state == EXIT_CONN_STATE_CONNECTING || conn->base_.state == EXIT_CONN_STATE_RESOLVING) && rh.command == RELAY_COMMAND_DATA) { /* Allow DATA cells to be delivered to an exit node in state * EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING. * This speeds up HTTP, for example. */ optimistic_data = 1; } else { return connection_edge_process_relay_cell_not_open( &rh, cell, circ, conn, layer_hint); } } switch (rh.command) { case RELAY_COMMAND_DROP: // log_info(domain,"Got a relay-level padding cell. Dropping."); return 0; case RELAY_COMMAND_BEGIN: case RELAY_COMMAND_BEGIN_DIR: if (layer_hint && circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Relay begin request unsupported at AP. Dropping."); return 0; } if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED && layer_hint != TO_ORIGIN_CIRCUIT(circ)->cpath->prev) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Relay begin request to Hidden Service " "from intermediary node. Dropping."); return 0; } if (conn) { log_fn(LOG_PROTOCOL_WARN, domain, "Begin cell for known stream. Dropping."); return 0; } if (rh.command == RELAY_COMMAND_BEGIN_DIR && circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) { /* Assign this circuit and its app-ward OR connection a unique ID, * so that we can measure download times. The local edge and dir * connection will be assigned the same ID when they are created * and linked. */ static uint64_t next_id = 0; circ->dirreq_id = ++next_id; TO_OR_CIRCUIT(circ)->p_chan->dirreq_id = circ->dirreq_id; } return connection_exit_begin_conn(cell, circ); case RELAY_COMMAND_DATA: ++stats_n_data_cells_received; if (( layer_hint && --layer_hint->deliver_window < 0) || (!layer_hint && --circ->deliver_window < 0)) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "(relay data) circ deliver_window below 0. Killing."); if (conn) { /* XXXX Do we actually need to do this? Will killing the circuit * not send an END and mark the stream for close as appropriate? */ connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_for_close(TO_CONN(conn)); } return -END_CIRC_REASON_TORPROTOCOL; } log_debug(domain,"circ deliver_window now %d.", layer_hint ? layer_hint->deliver_window : circ->deliver_window); circuit_consider_sending_sendme(circ, layer_hint); if (!conn) { log_info(domain,"data cell dropped, unknown stream (streamid %d).", rh.stream_id); return 0; } if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "(relay data) conn deliver_window below 0. Killing."); return -END_CIRC_REASON_TORPROTOCOL; } stats_n_data_bytes_received += rh.length; connection_write_to_buf((char*)(cell->payload + RELAY_HEADER_SIZE), rh.length, TO_CONN(conn)); if (!optimistic_data) { /* Only send a SENDME if we're not getting optimistic data; otherwise * a SENDME could arrive before the CONNECTED. */ connection_edge_consider_sending_sendme(conn); } return 0; case RELAY_COMMAND_END: reason = rh.length > 0 ? get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC; if (!conn) { log_info(domain,"end cell (%s) dropped, unknown stream.", stream_end_reason_to_string(reason)); return 0; } /* XXX add to this log_fn the exit node's nickname? */ log_info(domain,TOR_SOCKET_T_FORMAT": end cell (%s) for stream %d. " "Removing stream.", conn->base_.s, stream_end_reason_to_string(reason), conn->stream_id); if (conn->base_.type == CONN_TYPE_AP) { entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); if (entry_conn->socks_request && !entry_conn->socks_request->has_finished) log_warn(LD_BUG, "open stream hasn't sent socks answer yet? Closing."); } /* We just *got* an end; no reason to send one. */ conn->edge_has_sent_end = 1; if (!conn->end_reason) conn->end_reason = reason | END_STREAM_REASON_FLAG_REMOTE; if (!conn->base_.marked_for_close) { /* only mark it if not already marked. it's possible to * get the 'end' right around when the client hangs up on us. */ connection_mark_and_flush(TO_CONN(conn)); } return 0; case RELAY_COMMAND_EXTEND: case RELAY_COMMAND_EXTEND2: { static uint64_t total_n_extend=0, total_nonearly=0; total_n_extend++; if (rh.stream_id) { log_fn(LOG_PROTOCOL_WARN, domain, "'extend' cell received for non-zero stream. Dropping."); return 0; } if (cell->command != CELL_RELAY_EARLY && !networkstatus_get_param(NULL,"AllowNonearlyExtend",0,0,1)) { #define EARLY_WARNING_INTERVAL 3600 static ratelim_t early_warning_limit = RATELIM_INIT(EARLY_WARNING_INTERVAL); char *m; if (cell->command == CELL_RELAY) { ++total_nonearly; if ((m = rate_limit_log(&early_warning_limit, approx_time()))) { double percentage = ((double)total_nonearly)/total_n_extend; percentage *= 100; log_fn(LOG_PROTOCOL_WARN, domain, "EXTEND cell received, " "but not via RELAY_EARLY. Dropping.%s", m); log_fn(LOG_PROTOCOL_WARN, domain, " (We have dropped %.02f%% of " "all EXTEND cells for this reason)", percentage); tor_free(m); } } else { log_fn(LOG_WARN, domain, "EXTEND cell received, in a cell with type %d! Dropping.", cell->command); } return 0; } return circuit_extend(cell, circ); } case RELAY_COMMAND_EXTENDED: case RELAY_COMMAND_EXTENDED2: if (!layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "'extended' unsupported at non-origin. Dropping."); return 0; } log_debug(domain,"Got an extended cell! Yay."); { extended_cell_t extended_cell; if (extended_cell_parse(&extended_cell, rh.command, (const uint8_t*)cell->payload+RELAY_HEADER_SIZE, rh.length)<0) { log_warn(LD_PROTOCOL, "Can't parse EXTENDED cell; killing circuit."); return -END_CIRC_REASON_TORPROTOCOL; } if ((reason = circuit_finish_handshake(TO_ORIGIN_CIRCUIT(circ), &extended_cell.created_cell)) < 0) { log_warn(domain,"circuit_finish_handshake failed."); return reason; } } if ((reason=circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ)))<0) { log_info(domain,"circuit_send_next_onion_skin() failed."); return reason; } return 0; case RELAY_COMMAND_TRUNCATE: if (layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "'truncate' unsupported at origin. Dropping."); return 0; } if (circ->n_hop) { if (circ->n_chan) log_warn(LD_BUG, "n_chan and n_hop set on the same circuit!"); extend_info_free(circ->n_hop); circ->n_hop = NULL; tor_free(circ->n_chan_create_cell); circuit_set_state(circ, CIRCUIT_STATE_OPEN); } if (circ->n_chan) { uint8_t trunc_reason = get_uint8(cell->payload + RELAY_HEADER_SIZE); circuit_clear_cell_queue(circ, circ->n_chan); channel_send_destroy(circ->n_circ_id, circ->n_chan, trunc_reason); circuit_set_n_circid_chan(circ, 0, NULL); } log_debug(LD_EXIT, "Processed 'truncate', replying."); { char payload[1]; payload[0] = (char)END_CIRC_REASON_REQUESTED; relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED, payload, sizeof(payload), NULL); } return 0; case RELAY_COMMAND_TRUNCATED: if (!layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_EXIT, "'truncated' unsupported at non-origin. Dropping."); return 0; } circuit_truncated(TO_ORIGIN_CIRCUIT(circ), layer_hint, get_uint8(cell->payload + RELAY_HEADER_SIZE)); return 0; case RELAY_COMMAND_CONNECTED: if (conn) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "'connected' unsupported while open. Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } log_info(domain, "'connected' received, no conn attached anymore. Ignoring."); return 0; case RELAY_COMMAND_SENDME: if (!rh.stream_id) { if (layer_hint) { if (layer_hint->package_window + CIRCWINDOW_INCREMENT > CIRCWINDOW_START_MAX) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Unexpected sendme cell from exit relay. " "Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } layer_hint->package_window += CIRCWINDOW_INCREMENT; log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.", layer_hint->package_window); circuit_resume_edge_reading(circ, layer_hint); } else { if (circ->package_window + CIRCWINDOW_INCREMENT > CIRCWINDOW_START_MAX) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Unexpected sendme cell from client. " "Closing circ (window %d).", circ->package_window); return -END_CIRC_REASON_TORPROTOCOL; } circ->package_window += CIRCWINDOW_INCREMENT; log_debug(LD_APP, "circ-level sendme at non-origin, packagewindow %d.", circ->package_window); circuit_resume_edge_reading(circ, layer_hint); } return 0; } if (!conn) { log_info(domain,"sendme cell dropped, unknown stream (streamid %d).", rh.stream_id); return 0; } conn->package_window += STREAMWINDOW_INCREMENT; log_debug(domain,"stream-level sendme, packagewindow now %d.", conn->package_window); if (circuit_queue_streams_are_blocked(circ)) { /* Still waiting for queue to flush; don't touch conn */ return 0; } connection_start_reading(TO_CONN(conn)); /* handle whatever might still be on the inbuf */ if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return 0; } return 0; case RELAY_COMMAND_RESOLVE: if (layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "resolve request unsupported at AP; dropping."); return 0; } else if (conn) { log_fn(LOG_PROTOCOL_WARN, domain, "resolve request for known stream; dropping."); return 0; } else if (circ->purpose != CIRCUIT_PURPOSE_OR) { log_fn(LOG_PROTOCOL_WARN, domain, "resolve request on circ with purpose %d; dropping", circ->purpose); return 0; } connection_exit_begin_resolve(cell, TO_OR_CIRCUIT(circ)); return 0; case RELAY_COMMAND_RESOLVED: if (conn) { log_fn(LOG_PROTOCOL_WARN, domain, "'resolved' unsupported while open. Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } log_info(domain, "'resolved' received, no conn attached anymore. Ignoring."); return 0; case RELAY_COMMAND_ESTABLISH_INTRO: case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: case RELAY_COMMAND_INTRODUCE1: case RELAY_COMMAND_INTRODUCE2: case RELAY_COMMAND_INTRODUCE_ACK: case RELAY_COMMAND_RENDEZVOUS1: case RELAY_COMMAND_RENDEZVOUS2: case RELAY_COMMAND_INTRO_ESTABLISHED: case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED: rend_process_relay_cell(circ, layer_hint, rh.command, rh.length, cell->payload+RELAY_HEADER_SIZE); return 0; } log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received unknown relay command %d. Perhaps the other side is using " "a newer version of Tor? Dropping.", rh.command); return 0; /* for forward compatibility, don't kill the circuit */ } /** How many relay_data cells have we built, ever? */ uint64_t stats_n_data_cells_packaged = 0; /** How many bytes of data have we put in relay_data cells have we built, * ever? This would be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if * every relay cell we ever sent were completely full of data. */ uint64_t stats_n_data_bytes_packaged = 0; /** How many relay_data cells have we received, ever? */ uint64_t stats_n_data_cells_received = 0; /** How many bytes of data have we received relay_data cells, ever? This would * be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if every relay cell we * ever received were completely full of data. */ uint64_t stats_n_data_bytes_received = 0; /** If <b>conn</b> has an entire relay payload of bytes on its inbuf (or * <b>package_partial</b> is true), and the appropriate package windows aren't * empty, grab a cell and send it down the circuit. * * If *<b>max_cells</b> is given, package no more than max_cells. Decrement * *<b>max_cells</b> by the number of cells packaged. * * Return -1 (and send a RELAY_COMMAND_END cell if necessary) if conn should * be marked for close, else return 0. */ int connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial, int *max_cells) { size_t bytes_to_process, length; char payload[CELL_PAYLOAD_SIZE]; circuit_t *circ; const unsigned domain = conn->base_.type == CONN_TYPE_AP ? LD_APP : LD_EXIT; int sending_from_optimistic = 0; entry_connection_t *entry_conn = conn->base_.type == CONN_TYPE_AP ? EDGE_TO_ENTRY_CONN(conn) : NULL; const int sending_optimistically = entry_conn && conn->base_.type == CONN_TYPE_AP && conn->base_.state != AP_CONN_STATE_OPEN; crypt_path_t *cpath_layer = conn->cpath_layer; tor_assert(conn); if (conn->base_.marked_for_close) { log_warn(LD_BUG, "called on conn that's already marked for close at %s:%d.", conn->base_.marked_for_close_file, conn->base_.marked_for_close); return 0; } if (max_cells && *max_cells <= 0) return 0; repeat_connection_edge_package_raw_inbuf: circ = circuit_get_by_edge_conn(conn); if (!circ) { log_info(domain,"conn has no circuit! Closing."); conn->end_reason = END_STREAM_REASON_CANT_ATTACH; return -1; } if (circuit_consider_stop_edge_reading(circ, cpath_layer)) return 0; if (conn->package_window <= 0) { log_info(domain,"called with package_window %d. Skipping.", conn->package_window); connection_stop_reading(TO_CONN(conn)); return 0; } sending_from_optimistic = entry_conn && entry_conn->sending_optimistic_data != NULL; if (PREDICT_UNLIKELY(sending_from_optimistic)) { bytes_to_process = generic_buffer_len(entry_conn->sending_optimistic_data); if (PREDICT_UNLIKELY(!bytes_to_process)) { log_warn(LD_BUG, "sending_optimistic_data was non-NULL but empty"); bytes_to_process = connection_get_inbuf_len(TO_CONN(conn)); sending_from_optimistic = 0; } } else { bytes_to_process = connection_get_inbuf_len(TO_CONN(conn)); } if (!bytes_to_process) return 0; if (!package_partial && bytes_to_process < RELAY_PAYLOAD_SIZE) return 0; if (bytes_to_process > RELAY_PAYLOAD_SIZE) { length = RELAY_PAYLOAD_SIZE; } else { length = bytes_to_process; } stats_n_data_bytes_packaged += length; stats_n_data_cells_packaged += 1; if (PREDICT_UNLIKELY(sending_from_optimistic)) { /* XXXX We could be more efficient here by sometimes packing * previously-sent optimistic data in the same cell with data * from the inbuf. */ generic_buffer_get(entry_conn->sending_optimistic_data, payload, length); if (!generic_buffer_len(entry_conn->sending_optimistic_data)) { generic_buffer_free(entry_conn->sending_optimistic_data); entry_conn->sending_optimistic_data = NULL; } } else { connection_fetch_from_buf(payload, length, TO_CONN(conn)); } log_debug(domain,TOR_SOCKET_T_FORMAT": Packaging %d bytes (%d waiting).", conn->base_.s, (int)length, (int)connection_get_inbuf_len(TO_CONN(conn))); if (sending_optimistically && !sending_from_optimistic) { /* This is new optimistic data; remember it in case we need to detach and retry */ if (!entry_conn->pending_optimistic_data) entry_conn->pending_optimistic_data = generic_buffer_new(); generic_buffer_add(entry_conn->pending_optimistic_data, payload, length); } if (connection_edge_send_command(conn, RELAY_COMMAND_DATA, payload, length) < 0 ) /* circuit got marked for close, don't continue, don't need to mark conn */ return 0; if (!cpath_layer) { /* non-rendezvous exit */ tor_assert(circ->package_window > 0); circ->package_window--; } else { /* we're an AP, or an exit on a rendezvous circ */ tor_assert(cpath_layer->package_window > 0); cpath_layer->package_window--; } if (--conn->package_window <= 0) { /* is it 0 after decrement? */ connection_stop_reading(TO_CONN(conn)); log_debug(domain,"conn->package_window reached 0."); circuit_consider_stop_edge_reading(circ, cpath_layer); return 0; /* don't process the inbuf any more */ } log_debug(domain,"conn->package_window is now %d",conn->package_window); if (max_cells) { *max_cells -= 1; if (*max_cells <= 0) return 0; } /* handle more if there's more, or return 0 if there isn't */ goto repeat_connection_edge_package_raw_inbuf; } /** Called when we've just received a relay data cell, when * we've just finished flushing all bytes to stream <b>conn</b>, * or when we've flushed *some* bytes to the stream <b>conn</b>. * * If conn->outbuf is not too full, and our deliver window is * low, send back a suitable number of stream-level sendme cells. */ void connection_edge_consider_sending_sendme(edge_connection_t *conn) { circuit_t *circ; if (connection_outbuf_too_full(TO_CONN(conn))) return; circ = circuit_get_by_edge_conn(conn); if (!circ) { /* this can legitimately happen if the destroy has already * arrived and torn down the circuit */ log_info(LD_APP,"No circuit associated with conn. Skipping."); return; } while (conn->deliver_window <= STREAMWINDOW_START - STREAMWINDOW_INCREMENT) { log_debug(conn->base_.type == CONN_TYPE_AP ?LD_APP:LD_EXIT, "Outbuf %d, Queuing stream sendme.", (int)conn->base_.outbuf_flushlen); conn->deliver_window += STREAMWINDOW_INCREMENT; if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME, NULL, 0) < 0) { log_warn(LD_APP,"connection_edge_send_command failed. Skipping."); return; /* the circuit's closed, don't continue */ } } } /** The circuit <b>circ</b> has received a circuit-level sendme * (on hop <b>layer_hint</b>, if we're the OP). Go through all the * attached streams and let them resume reading and packaging, if * their stream windows allow it. */ static void circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint) { if (circuit_queue_streams_are_blocked(circ)) { log_debug(layer_hint?LD_APP:LD_EXIT,"Too big queue, no resuming"); return; } log_debug(layer_hint?LD_APP:LD_EXIT,"resuming"); if (CIRCUIT_IS_ORIGIN(circ)) circuit_resume_edge_reading_helper(TO_ORIGIN_CIRCUIT(circ)->p_streams, circ, layer_hint); else circuit_resume_edge_reading_helper(TO_OR_CIRCUIT(circ)->n_streams, circ, layer_hint); } void stream_choice_seed_weak_rng(void) { crypto_seed_weak_rng(&stream_choice_rng); } /** A helper function for circuit_resume_edge_reading() above. * The arguments are the same, except that <b>conn</b> is the head * of a linked list of edge streams that should each be considered. */ static int circuit_resume_edge_reading_helper(edge_connection_t *first_conn, circuit_t *circ, crypt_path_t *layer_hint) { edge_connection_t *conn; int n_packaging_streams, n_streams_left; int packaged_this_round; int cells_on_queue; int cells_per_conn; edge_connection_t *chosen_stream = NULL; int max_to_package; if (first_conn == NULL) { /* Don't bother to try to do the rest of this if there are no connections * to resume. */ return 0; } /* How many cells do we have space for? It will be the minimum of * the number needed to exhaust the package window, and the minimum * needed to fill the cell queue. */ max_to_package = circ->package_window; if (CIRCUIT_IS_ORIGIN(circ)) { cells_on_queue = circ->n_chan_cells.n; } else { or_circuit_t *or_circ = TO_OR_CIRCUIT(circ); cells_on_queue = or_circ->p_chan_cells.n; } if (CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue < max_to_package) max_to_package = CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue; /* Once we used to start listening on the streams in the order they * appeared in the linked list. That leads to starvation on the * streams that appeared later on the list, since the first streams * would always get to read first. Instead, we just pick a random * stream on the list, and enable reading for streams starting at that * point (and wrapping around as if the list were circular). It would * probably be better to actually remember which streams we've * serviced in the past, but this is simple and effective. */ /* Select a stream uniformly at random from the linked list. We * don't need cryptographic randomness here. */ { int num_streams = 0; for (conn = first_conn; conn; conn = conn->next_stream) { num_streams++; if (tor_weak_random_one_in_n(&stream_choice_rng, num_streams)) { chosen_stream = conn; } /* Invariant: chosen_stream has been chosen uniformly at random from * among the first num_streams streams on first_conn. * * (Note that we iterate over every stream on the circuit, so that after * we've considered the first stream, we've chosen it with P=1; and * after we consider the second stream, we've switched to it with P=1/2 * and stayed with the first stream with P=1/2; and after we've * considered the third stream, we've switched to it with P=1/3 and * remained with one of the first two streams with P=(2/3), giving each * one P=(1/2)(2/3) )=(1/3).) */ } } /* Count how many non-marked streams there are that have anything on * their inbuf, and enable reading on all of the connections. */ n_packaging_streams = 0; /* Activate reading starting from the chosen stream */ for (conn=chosen_stream; conn; conn = conn->next_stream) { /* Start reading for the streams starting from here */ if (conn->base_.marked_for_close || conn->package_window <= 0) continue; if (!layer_hint || conn->cpath_layer == layer_hint) { connection_start_reading(TO_CONN(conn)); if (connection_get_inbuf_len(TO_CONN(conn)) > 0) ++n_packaging_streams; } } /* Go back and do the ones we skipped, circular-style */ for (conn = first_conn; conn != chosen_stream; conn = conn->next_stream) { if (conn->base_.marked_for_close || conn->package_window <= 0) continue; if (!layer_hint || conn->cpath_layer == layer_hint) { connection_start_reading(TO_CONN(conn)); if (connection_get_inbuf_len(TO_CONN(conn)) > 0) ++n_packaging_streams; } } if (n_packaging_streams == 0) /* avoid divide-by-zero */ return 0; again: cells_per_conn = CEIL_DIV(max_to_package, n_packaging_streams); packaged_this_round = 0; n_streams_left = 0; /* Iterate over all connections. Package up to cells_per_conn cells on * each. Update packaged_this_round with the total number of cells * packaged, and n_streams_left with the number that still have data to * package. */ for (conn=first_conn; conn; conn=conn->next_stream) { if (conn->base_.marked_for_close || conn->package_window <= 0) continue; if (!layer_hint || conn->cpath_layer == layer_hint) { int n = cells_per_conn, r; /* handle whatever might still be on the inbuf */ r = connection_edge_package_raw_inbuf(conn, 1, &n); /* Note how many we packaged */ packaged_this_round += (cells_per_conn-n); if (r<0) { /* Problem while packaging. (We already sent an end cell if * possible) */ connection_mark_for_close(TO_CONN(conn)); continue; } /* If there's still data to read, we'll be coming back to this stream. */ if (connection_get_inbuf_len(TO_CONN(conn))) ++n_streams_left; /* If the circuit won't accept any more data, return without looking * at any more of the streams. Any connections that should be stopped * have already been stopped by connection_edge_package_raw_inbuf. */ if (circuit_consider_stop_edge_reading(circ, layer_hint)) return -1; /* XXXX should we also stop immediately if we fill up the cell queue? * Probably. */ } } /* If we made progress, and we are willing to package more, and there are * any streams left that want to package stuff... try again! */ if (packaged_this_round && packaged_this_round < max_to_package && n_streams_left) { max_to_package -= packaged_this_round; n_packaging_streams = n_streams_left; goto again; } return 0; } /** Check if the package window for <b>circ</b> is empty (at * hop <b>layer_hint</b> if it's defined). * * If yes, tell edge streams to stop reading and return 1. * Else return 0. */ static int circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint) { edge_connection_t *conn = NULL; unsigned domain = layer_hint ? LD_APP : LD_EXIT; if (!layer_hint) { or_circuit_t *or_circ = TO_OR_CIRCUIT(circ); log_debug(domain,"considering circ->package_window %d", circ->package_window); if (circ->package_window <= 0) { log_debug(domain,"yes, not-at-origin. stopped."); for (conn = or_circ->n_streams; conn; conn=conn->next_stream) connection_stop_reading(TO_CONN(conn)); return 1; } return 0; } /* else, layer hint is defined, use it */ log_debug(domain,"considering layer_hint->package_window %d", layer_hint->package_window); if (layer_hint->package_window <= 0) { log_debug(domain,"yes, at-origin. stopped."); for (conn = TO_ORIGIN_CIRCUIT(circ)->p_streams; conn; conn=conn->next_stream) { if (conn->cpath_layer == layer_hint) connection_stop_reading(TO_CONN(conn)); } return 1; } return 0; } /** Check if the deliver_window for circuit <b>circ</b> (at hop * <b>layer_hint</b> if it's defined) is low enough that we should * send a circuit-level sendme back down the circuit. If so, send * enough sendmes that the window would be overfull if we sent any * more. */ static void circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint) { // log_fn(LOG_INFO,"Considering: layer_hint is %s", // layer_hint ? "defined" : "null"); while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <= CIRCWINDOW_START - CIRCWINDOW_INCREMENT) { log_debug(LD_CIRC,"Queuing circuit sendme."); if (layer_hint) layer_hint->deliver_window += CIRCWINDOW_INCREMENT; else circ->deliver_window += CIRCWINDOW_INCREMENT; if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME, NULL, 0, layer_hint) < 0) { log_warn(LD_CIRC, "relay_send_command_from_edge failed. Circuit's closed."); return; /* the circuit's closed, don't continue */ } } } #ifdef ACTIVE_CIRCUITS_PARANOIA #define assert_cmux_ok_paranoid(chan) \ assert_circuit_mux_okay(chan) #else #define assert_cmux_ok_paranoid(chan) #endif /** The total number of cells we have allocated from the memory pool. */ static size_t total_cells_allocated = 0; /** A memory pool to allocate packed_cell_t objects. */ static mp_pool_t *cell_pool = NULL; /** Allocate structures to hold cells. */ void init_cell_pool(void) { tor_assert(!cell_pool); cell_pool = mp_pool_new(sizeof(packed_cell_t), 128*1024); } /** Free all storage used to hold cells (and insertion times if we measure * cell statistics). */ void free_cell_pool(void) { /* Maybe we haven't called init_cell_pool yet; need to check for it. */ if (cell_pool) { mp_pool_destroy(cell_pool); cell_pool = NULL; } } /** Free excess storage in cell pool. */ void clean_cell_pool(void) { tor_assert(cell_pool); mp_pool_clean(cell_pool, 0, 1); } /** Release storage held by <b>cell</b>. */ static INLINE void packed_cell_free_unchecked(packed_cell_t *cell) { --total_cells_allocated; mp_pool_release(cell); } /** Allocate and return a new packed_cell_t. */ static INLINE packed_cell_t * packed_cell_new(void) { ++total_cells_allocated; return mp_pool_get(cell_pool); } /** Return a packed cell used outside by channel_t lower layer */ void packed_cell_free(packed_cell_t *cell) { packed_cell_free_unchecked(cell); } /** Log current statistics for cell pool allocation at log level * <b>severity</b>. */ void dump_cell_pool_usage(int severity) { circuit_t *c; int n_circs = 0; int n_cells = 0; for (c = circuit_get_global_list_(); c; c = c->next) { n_cells += c->n_chan_cells.n; if (!CIRCUIT_IS_ORIGIN(c)) n_cells += TO_OR_CIRCUIT(c)->p_chan_cells.n; ++n_circs; } tor_log(severity, LD_MM, "%d cells allocated on %d circuits. %d cells leaked.", n_cells, n_circs, (int)total_cells_allocated - n_cells); mp_pool_log_status(cell_pool, severity); } /** Allocate a new copy of packed <b>cell</b>. */ static INLINE packed_cell_t * packed_cell_copy(const cell_t *cell, int wide_circ_ids) { packed_cell_t *c = packed_cell_new(); cell_pack(c, cell, wide_circ_ids); c->next = NULL; return c; } /** Append <b>cell</b> to the end of <b>queue</b>. */ void cell_queue_append(cell_queue_t *queue, packed_cell_t *cell) { if (queue->tail) { tor_assert(!queue->tail->next); queue->tail->next = cell; } else { queue->head = cell; } queue->tail = cell; cell->next = NULL; ++queue->n; } /** Append a newly allocated copy of <b>cell</b> to the end of <b>queue</b> */ void cell_queue_append_packed_copy(cell_queue_t *queue, const cell_t *cell, int wide_circ_ids) { struct timeval now; packed_cell_t *copy = packed_cell_copy(cell, wide_circ_ids); tor_gettimeofday_cached(&now); copy->inserted_time = (uint32_t)tv_to_msec(&now); cell_queue_append(queue, copy); } /** Remove and free every cell in <b>queue</b>. */ void cell_queue_clear(cell_queue_t *queue) { packed_cell_t *cell, *next; cell = queue->head; while (cell) { next = cell->next; packed_cell_free_unchecked(cell); cell = next; } queue->head = queue->tail = NULL; queue->n = 0; } /** Extract and return the cell at the head of <b>queue</b>; return NULL if * <b>queue</b> is empty. */ static INLINE packed_cell_t * cell_queue_pop(cell_queue_t *queue) { packed_cell_t *cell = queue->head; if (!cell) return NULL; queue->head = cell->next; if (cell == queue->tail) { tor_assert(!queue->head); queue->tail = NULL; } --queue->n; return cell; } /** Return the total number of bytes used for each packed_cell in a queue. * Approximate. */ size_t packed_cell_mem_cost(void) { return sizeof(packed_cell_t) + MP_POOL_ITEM_OVERHEAD; } /** Check whether we've got too much space used for cells. If so, * call the OOM handler and return 1. Otherwise, return 0. */ static int cell_queues_check_size(void) { size_t alloc = total_cells_allocated * packed_cell_mem_cost(); if (alloc >= get_options()->MaxMemInCellQueues) { circuits_handle_oom(alloc); return 1; } return 0; } /** * Update the number of cells available on the circuit's n_chan or p_chan's * circuit mux. */ void update_circuit_on_cmux_(circuit_t *circ, cell_direction_t direction, const char *file, int lineno) { channel_t *chan = NULL; or_circuit_t *or_circ = NULL; circuitmux_t *cmux = NULL; tor_assert(circ); /* Okay, get the channel */ if (direction == CELL_DIRECTION_OUT) { chan = circ->n_chan; } else { or_circ = TO_OR_CIRCUIT(circ); chan = or_circ->p_chan; } tor_assert(chan); tor_assert(chan->cmux); /* Now get the cmux */ cmux = chan->cmux; /* Cmux sanity check */ if (! circuitmux_is_circuit_attached(cmux, circ)) { log_warn(LD_BUG, "called on non-attachd circuit from %s:%d", file, lineno); return; } tor_assert(circuitmux_attached_circuit_direction(cmux, circ) == direction); assert_cmux_ok_paranoid(chan); /* Update the number of cells we have for the circuit mux */ if (direction == CELL_DIRECTION_OUT) { circuitmux_set_num_cells(cmux, circ, circ->n_chan_cells.n); } else { circuitmux_set_num_cells(cmux, circ, or_circ->p_chan_cells.n); } assert_cmux_ok_paranoid(chan); } /** Remove all circuits from the cmux on <b>chan</b>. */ void channel_unlink_all_circuits(channel_t *chan) { tor_assert(chan); tor_assert(chan->cmux); circuitmux_detach_all_circuits(chan->cmux); chan->num_n_circuits = 0; chan->num_p_circuits = 0; } /** Block (if <b>block</b> is true) or unblock (if <b>block</b> is false) * every edge connection that is using <b>circ</b> to write to <b>chan</b>, * and start or stop reading as appropriate. * * If <b>stream_id</b> is nonzero, block only the edge connection whose * stream_id matches it. * * Returns the number of streams whose status we changed. */ static int set_streams_blocked_on_circ(circuit_t *circ, channel_t *chan, int block, streamid_t stream_id) { edge_connection_t *edge = NULL; int n = 0; if (circ->n_chan == chan) { circ->streams_blocked_on_n_chan = block; if (CIRCUIT_IS_ORIGIN(circ)) edge = TO_ORIGIN_CIRCUIT(circ)->p_streams; } else { circ->streams_blocked_on_p_chan = block; tor_assert(!CIRCUIT_IS_ORIGIN(circ)); edge = TO_OR_CIRCUIT(circ)->n_streams; } for (; edge; edge = edge->next_stream) { connection_t *conn = TO_CONN(edge); if (stream_id && edge->stream_id != stream_id) continue; if (edge->edge_blocked_on_circ != block) { ++n; edge->edge_blocked_on_circ = block; } if (!conn->read_event && !HAS_BUFFEREVENT(conn)) { /* This connection is a placeholder for something; probably a DNS * request. It can't actually stop or start reading.*/ continue; } if (block) { if (connection_is_reading(conn)) connection_stop_reading(conn); } else { /* Is this right? */ if (!connection_is_reading(conn)) connection_start_reading(conn); } } return n; } /** Pull as many cells as possible (but no more than <b>max</b>) from the * queue of the first active circuit on <b>chan</b>, and write them to * <b>chan</b>-&gt;outbuf. Return the number of cells written. Advance * the active circuit pointer to the next active circuit in the ring. */ int channel_flush_from_first_active_circuit(channel_t *chan, int max) { circuitmux_t *cmux = NULL; int n_flushed = 0; cell_queue_t *queue; circuit_t *circ; or_circuit_t *or_circ; int streams_blocked; packed_cell_t *cell; /* Get the cmux */ tor_assert(chan); tor_assert(chan->cmux); cmux = chan->cmux; /* Main loop: pick a circuit, send a cell, update the cmux */ while (n_flushed < max) { circ = circuitmux_get_first_active_circuit(cmux); /* If it returns NULL, no cells left to send */ if (!circ) break; assert_cmux_ok_paranoid(chan); if (circ->n_chan == chan) { queue = &circ->n_chan_cells; streams_blocked = circ->streams_blocked_on_n_chan; } else { or_circ = TO_OR_CIRCUIT(circ); tor_assert(or_circ->p_chan == chan); queue = &TO_OR_CIRCUIT(circ)->p_chan_cells; streams_blocked = circ->streams_blocked_on_p_chan; } /* Circuitmux told us this was active, so it should have cells */ tor_assert(queue->n > 0); /* * Get just one cell here; once we've sent it, that can change the circuit * selection, so we have to loop around for another even if this circuit * has more than one. */ cell = cell_queue_pop(queue); /* Calculate the exact time that this cell has spent in the queue. */ if (get_options()->CellStatistics && !CIRCUIT_IS_ORIGIN(circ)) { uint32_t msec_waiting; struct timeval tvnow; or_circ = TO_OR_CIRCUIT(circ); tor_gettimeofday_cached(&tvnow); msec_waiting = ((uint32_t)tv_to_msec(&tvnow)) - cell->inserted_time; or_circ->total_cell_waiting_time += msec_waiting; or_circ->processed_cells++; } /* If we just flushed our queue and this circuit is used for a * tunneled directory request, possibly advance its state. */ if (queue->n == 0 && chan->dirreq_id) geoip_change_dirreq_state(chan->dirreq_id, DIRREQ_TUNNELED, DIRREQ_CIRC_QUEUE_FLUSHED); /* Now send the cell */ channel_write_packed_cell(chan, cell); cell = NULL; /* * Don't packed_cell_free_unchecked(cell) here because the channel will * do so when it gets out of the channel queue (probably already did, in * which case that was an immediate double-free bug). */ /* Update the counter */ ++n_flushed; /* * Now update the cmux; tell it we've just sent a cell, and how many * we have left. */ circuitmux_notify_xmit_cells(cmux, circ, 1); circuitmux_set_num_cells(cmux, circ, queue->n); if (queue->n == 0) log_debug(LD_GENERAL, "Made a circuit inactive."); /* Is the cell queue low enough to unblock all the streams that are waiting * to write to this circuit? */ if (streams_blocked && queue->n <= CELL_QUEUE_LOWWATER_SIZE) set_streams_blocked_on_circ(circ, chan, 0, 0); /* unblock streams */ /* If n_flushed < max still, loop around and pick another circuit */ } /* Okay, we're done sending now */ assert_cmux_ok_paranoid(chan); return n_flushed; } /** Add <b>cell</b> to the queue of <b>circ</b> writing to <b>chan</b> * transmitting in <b>direction</b>. */ void append_cell_to_circuit_queue(circuit_t *circ, channel_t *chan, cell_t *cell, cell_direction_t direction, streamid_t fromstream) { or_circuit_t *orcirc = NULL; cell_queue_t *queue; int streams_blocked; if (circ->marked_for_close) return; if (direction == CELL_DIRECTION_OUT) { queue = &circ->n_chan_cells; streams_blocked = circ->streams_blocked_on_n_chan; } else { orcirc = TO_OR_CIRCUIT(circ); queue = &orcirc->p_chan_cells; streams_blocked = circ->streams_blocked_on_p_chan; } /* * Disabling this for now because of a possible guard discovery attack */ #if 0 /* Are we a middle circuit about to exceed ORCIRC_MAX_MIDDLE_CELLS? */ if ((circ->n_chan != NULL) && CIRCUIT_IS_ORCIRC(circ)) { orcirc = TO_OR_CIRCUIT(circ); if (orcirc->p_chan) { if (queue->n + 1 >= ORCIRC_MAX_MIDDLE_CELLS) { /* Queueing this cell would put queue over the cap */ log_warn(LD_CIRC, "Got a cell exceeding the cap of %u in the %s direction " "on middle circ ID %u on chan ID " U64_FORMAT "; killing the circuit.", ORCIRC_MAX_MIDDLE_CELLS, (direction == CELL_DIRECTION_OUT) ? "n" : "p", (direction == CELL_DIRECTION_OUT) ? circ->n_circ_id : orcirc->p_circ_id, U64_PRINTF_ARG( (direction == CELL_DIRECTION_OUT) ? circ->n_chan->global_identifier : orcirc->p_chan->global_identifier)); circuit_mark_for_close(circ, END_CIRC_REASON_RESOURCELIMIT); return; } } } #endif cell_queue_append_packed_copy(queue, cell, chan->wide_circ_ids); if (PREDICT_UNLIKELY(cell_queues_check_size())) { /* We ran the OOM handler */ if (circ->marked_for_close) return; } /* If we have too many cells on the circuit, we should stop reading from * the edge streams for a while. */ if (!streams_blocked && queue->n >= CELL_QUEUE_HIGHWATER_SIZE) set_streams_blocked_on_circ(circ, chan, 1, 0); /* block streams */ if (streams_blocked && fromstream) { /* This edge connection is apparently not blocked; block it. */ set_streams_blocked_on_circ(circ, chan, 1, fromstream); } update_circuit_on_cmux(circ, direction); if (queue->n == 1) { /* This was the first cell added to the queue. We just made this * circuit active. */ log_debug(LD_GENERAL, "Made a circuit active."); } if (!channel_has_queued_writes(chan)) { /* There is no data at all waiting to be sent on the outbuf. Add a * cell, so that we can notice when it gets flushed, flushed_some can * get called, and we can start putting more data onto the buffer then. */ log_debug(LD_GENERAL, "Primed a buffer."); channel_flush_from_first_active_circuit(chan, 1); } } /** Append an encoded value of <b>addr</b> to <b>payload_out</b>, which must * have at least 18 bytes of free space. The encoding is, as specified in * tor-spec.txt: * RESOLVED_TYPE_IPV4 or RESOLVED_TYPE_IPV6 [1 byte] * LENGTH [1 byte] * ADDRESS [length bytes] * Return the number of bytes added, or -1 on error */ int append_address_to_payload(uint8_t *payload_out, const tor_addr_t *addr) { uint32_t a; switch (tor_addr_family(addr)) { case AF_INET: payload_out[0] = RESOLVED_TYPE_IPV4; payload_out[1] = 4; a = tor_addr_to_ipv4n(addr); memcpy(payload_out+2, &a, 4); return 6; case AF_INET6: payload_out[0] = RESOLVED_TYPE_IPV6; payload_out[1] = 16; memcpy(payload_out+2, tor_addr_to_in6_addr8(addr), 16); return 18; case AF_UNSPEC: default: return -1; } } /** Given <b>payload_len</b> bytes at <b>payload</b>, starting with an address * encoded as by append_address_to_payload(), try to decode the address into * *<b>addr_out</b>. Return the next byte in the payload after the address on * success, or NULL on failure. */ const uint8_t * decode_address_from_payload(tor_addr_t *addr_out, const uint8_t *payload, int payload_len) { if (payload_len < 2) return NULL; if (payload_len < 2+payload[1]) return NULL; switch (payload[0]) { case RESOLVED_TYPE_IPV4: if (payload[1] != 4) return NULL; tor_addr_from_ipv4n(addr_out, get_uint32(payload+2)); break; case RESOLVED_TYPE_IPV6: if (payload[1] != 16) return NULL; tor_addr_from_ipv6_bytes(addr_out, (char*)(payload+2)); break; default: tor_addr_make_unspec(addr_out); break; } return payload + 2 + payload[1]; } /** Remove all the cells queued on <b>circ</b> for <b>chan</b>. */ void circuit_clear_cell_queue(circuit_t *circ, channel_t *chan) { cell_queue_t *queue; cell_direction_t direction; if (circ->n_chan == chan) { queue = &circ->n_chan_cells; direction = CELL_DIRECTION_OUT; } else { or_circuit_t *orcirc = TO_OR_CIRCUIT(circ); tor_assert(orcirc->p_chan == chan); queue = &orcirc->p_chan_cells; direction = CELL_DIRECTION_IN; } /* Clear the queue */ cell_queue_clear(queue); /* Update the cell counter in the cmux */ if (chan->cmux && circuitmux_is_circuit_attached(chan->cmux, circ)) update_circuit_on_cmux(circ, direction); } /** Fail with an assert if the circuit mux on chan is corrupt */ void assert_circuit_mux_okay(channel_t *chan) { tor_assert(chan); tor_assert(chan->cmux); circuitmux_assert_okay(chan->cmux); } /** Return 1 if we shouldn't restart reading on this circuit, even if * we get a SENDME. Else return 0. */ static int circuit_queue_streams_are_blocked(circuit_t *circ) { if (CIRCUIT_IS_ORIGIN(circ)) { return circ->streams_blocked_on_n_chan; } else { return circ->streams_blocked_on_p_chan; } }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2489_1
crossvul-cpp_data_good_2571_2
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/kdc_util.c - Utility functions for the KDC implementation */ /* * Copyright 1990,1991,2007,2008,2009 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include "kdc_util.h" #include "extern.h" #include <stdio.h> #include <ctype.h> #include <syslog.h> #include <kadm5/admin.h> #include "adm_proto.h" #include "net-server.h" #include <limits.h> #ifdef KRBCONF_VAGUE_ERRORS const int vague_errors = 1; #else const int vague_errors = 0; #endif static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey); static krb5_error_code find_server_key(krb5_context, krb5_db_entry *, krb5_enctype, krb5_kvno, krb5_keyblock **, krb5_kvno *); /* * concatenate first two authdata arrays, returning an allocated replacement. * The replacement should be freed with krb5_free_authdata(). */ krb5_error_code concat_authorization_data(krb5_context context, krb5_authdata **first, krb5_authdata **second, krb5_authdata ***output) { register int i, j; register krb5_authdata **ptr, **retdata; /* count up the entries */ i = 0; if (first) for (ptr = first; *ptr; ptr++) i++; if (second) for (ptr = second; *ptr; ptr++) i++; retdata = (krb5_authdata **)malloc((i+1)*sizeof(*retdata)); if (!retdata) return ENOMEM; retdata[i] = 0; /* null-terminated array */ for (i = 0, j = 0, ptr = first; j < 2 ; ptr = second, j++) while (ptr && *ptr) { /* now walk & copy */ retdata[i] = (krb5_authdata *)malloc(sizeof(*retdata[i])); if (!retdata[i]) { krb5_free_authdata(context, retdata); return ENOMEM; } *retdata[i] = **ptr; if (!(retdata[i]->contents = (krb5_octet *)malloc(retdata[i]->length))) { free(retdata[i]); retdata[i] = 0; krb5_free_authdata(context, retdata); return ENOMEM; } memcpy(retdata[i]->contents, (*ptr)->contents, retdata[i]->length); ptr++; i++; } *output = retdata; return 0; } krb5_boolean is_local_principal(kdc_realm_t *kdc_active_realm, krb5_const_principal princ1) { return krb5_realm_compare(kdc_context, princ1, tgs_server); } /* * Returns TRUE if the kerberos principal is the name of a Kerberos ticket * service. */ krb5_boolean krb5_is_tgs_principal(krb5_const_principal principal) { if (krb5_princ_size(kdc_context, principal) != 2) return FALSE; if (data_eq_string(*krb5_princ_component(kdc_context, principal, 0), KRB5_TGS_NAME)) return TRUE; else return FALSE; } /* Returns TRUE if principal is the name of a cross-realm TGS. */ krb5_boolean is_cross_tgs_principal(krb5_const_principal principal) { if (!krb5_is_tgs_principal(principal)) return FALSE; if (!data_eq(*krb5_princ_component(kdc_context, principal, 1), *krb5_princ_realm(kdc_context, principal))) return TRUE; else return FALSE; } /* * given authentication data (provides seed for checksum), verify checksum * for source data. */ static krb5_error_code comp_cksum(krb5_context kcontext, krb5_data *source, krb5_ticket *ticket, krb5_checksum *his_cksum) { krb5_error_code retval; krb5_boolean valid; if (!krb5_c_valid_cksumtype(his_cksum->checksum_type)) return KRB5KDC_ERR_SUMTYPE_NOSUPP; /* must be collision proof */ if (!krb5_c_is_coll_proof_cksum(his_cksum->checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; /* verify checksum */ if ((retval = krb5_c_verify_checksum(kcontext, ticket->enc_part2->session, KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM, source, his_cksum, &valid))) return(retval); if (!valid) return(KRB5KRB_AP_ERR_BAD_INTEGRITY); return(0); } /* If a header ticket is decrypted, *ticket_out is filled in even on error. */ krb5_error_code kdc_process_tgs_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_fulladdr *from, krb5_data *pkt, krb5_ticket **ticket_out, krb5_db_entry **krbtgt_ptr, krb5_keyblock **tgskey, krb5_keyblock **subkey, krb5_pa_data **pa_tgs_req) { krb5_pa_data * tmppa; krb5_ap_req * apreq; krb5_error_code retval; krb5_authdata **authdata = NULL; krb5_data scratch1; krb5_data * scratch = NULL; krb5_boolean foreign_server = FALSE; krb5_auth_context auth_context = NULL; krb5_authenticator * authenticator = NULL; krb5_checksum * his_cksum = NULL; krb5_db_entry * krbtgt = NULL; krb5_ticket * ticket; *ticket_out = NULL; *krbtgt_ptr = NULL; *tgskey = NULL; tmppa = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_AP_REQ); if (!tmppa) return KRB5KDC_ERR_PADATA_TYPE_NOSUPP; scratch1.length = tmppa->length; scratch1.data = (char *)tmppa->contents; if ((retval = decode_krb5_ap_req(&scratch1, &apreq))) return retval; ticket = apreq->ticket; if (isflagset(apreq->ap_options, AP_OPTS_USE_SESSION_KEY) || isflagset(apreq->ap_options, AP_OPTS_MUTUAL_REQUIRED)) { krb5_klog_syslog(LOG_INFO, _("TGS_REQ: SESSION KEY or MUTUAL")); retval = KRB5KDC_ERR_POLICY; goto cleanup; } /* If the "server" principal in the ticket is not something in the local realm, then we must refuse to service the request if the client claims to be from the local realm. If we don't do this, then some other realm's nasty KDC can claim to be authenticating a client from our realm, and we'll give out tickets concurring with it! we set a flag here for checking below. */ foreign_server = !is_local_principal(kdc_active_realm, apreq->ticket->server); if ((retval = krb5_auth_con_init(kdc_context, &auth_context))) goto cleanup; /* Don't use a replay cache. */ if ((retval = krb5_auth_con_setflags(kdc_context, auth_context, 0))) goto cleanup; if ((retval = krb5_auth_con_setaddrs(kdc_context, auth_context, NULL, from->address)) ) goto cleanup_auth_context; retval = kdc_rd_ap_req(kdc_active_realm, apreq, auth_context, &krbtgt, tgskey); if (retval) goto cleanup_auth_context; /* "invalid flag" tickets can must be used to validate */ if (isflagset(ticket->enc_part2->flags, TKT_FLG_INVALID) && !isflagset(request->kdc_options, KDC_OPT_VALIDATE)) { retval = KRB5KRB_AP_ERR_TKT_INVALID; goto cleanup_auth_context; } if ((retval = krb5_auth_con_getrecvsubkey(kdc_context, auth_context, subkey))) goto cleanup_auth_context; if ((retval = krb5_auth_con_getauthenticator(kdc_context, auth_context, &authenticator))) goto cleanup_auth_context; retval = krb5_find_authdata(kdc_context, ticket->enc_part2->authorization_data, authenticator->authorization_data, KRB5_AUTHDATA_FX_ARMOR, &authdata); if (retval != 0) goto cleanup_authenticator; if (authdata&& authdata[0]) { k5_setmsg(kdc_context, KRB5KDC_ERR_POLICY, "ticket valid only as FAST armor"); retval = KRB5KDC_ERR_POLICY; krb5_free_authdata(kdc_context, authdata); goto cleanup_authenticator; } krb5_free_authdata(kdc_context, authdata); /* Check for a checksum */ if (!(his_cksum = authenticator->checksum)) { retval = KRB5KRB_AP_ERR_INAPP_CKSUM; goto cleanup_authenticator; } /* make sure the client is of proper lineage (see above) */ if (foreign_server && !krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER)) { if (is_local_principal(kdc_active_realm, ticket->enc_part2->client)) { /* someone in a foreign realm claiming to be local */ krb5_klog_syslog(LOG_INFO, _("PROCESS_TGS: failed lineage check")); retval = KRB5KDC_ERR_POLICY; goto cleanup_authenticator; } } /* * Check application checksum vs. tgs request * * We try checksumming the req-body two different ways: first we * try reaching into the raw asn.1 stream (if available), and * checksum that directly; if that fails, then we try encoding * using our local asn.1 library. */ if (pkt && (fetch_asn1_field((unsigned char *) pkt->data, 1, 4, &scratch1) >= 0)) { if (comp_cksum(kdc_context, &scratch1, ticket, his_cksum)) { if (!(retval = encode_krb5_kdc_req_body(request, &scratch))) retval = comp_cksum(kdc_context, scratch, ticket, his_cksum); krb5_free_data(kdc_context, scratch); if (retval) goto cleanup_authenticator; } } *pa_tgs_req = tmppa; *krbtgt_ptr = krbtgt; krbtgt = NULL; cleanup_authenticator: krb5_free_authenticator(kdc_context, authenticator); cleanup_auth_context: krb5_auth_con_free(kdc_context, auth_context); cleanup: if (retval != 0) { krb5_free_keyblock(kdc_context, *tgskey); *tgskey = NULL; } if (apreq->ticket->enc_part2 != NULL) { /* Steal the decrypted ticket pointer, even on error. */ *ticket_out = apreq->ticket; apreq->ticket = NULL; } krb5_free_ap_req(kdc_context, apreq); krb5_db_free_principal(kdc_context, krbtgt); return retval; } /* * This is a KDC wrapper around krb5_rd_req_decoded_anyflag(). * * We can't depend on KDB-as-keytab for handling the AP-REQ here for * optimization reasons: we want to minimize the number of KDB lookups. We'll * need the KDB entry for the TGS principal, and the TGS key used to decrypt * the TGT, elsewhere in the TGS code. * * This function also implements key rollover support for kvno 0 cross-realm * TGTs issued by AD. */ static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey) { krb5_error_code retval; krb5_enctype search_enctype = apreq->ticket->enc_part.enctype; krb5_boolean match_enctype = 1; krb5_kvno kvno; size_t tries = 3; /* * When we issue tickets we use the first key in the principals' highest * kvno keyset. For non-cross-realm krbtgt principals we want to only * allow the use of the first key of the principal's keyset that matches * the given kvno. */ if (krb5_is_tgs_principal(apreq->ticket->server) && !is_cross_tgs_principal(apreq->ticket->server)) { search_enctype = -1; match_enctype = 0; } retval = kdc_get_server_key(kdc_context, apreq->ticket, KRB5_KDB_FLAG_ALIAS_OK, match_enctype, server, NULL, NULL); if (retval) return retval; *tgskey = NULL; kvno = apreq->ticket->enc_part.kvno; do { krb5_free_keyblock(kdc_context, *tgskey); retval = find_server_key(kdc_context, *server, search_enctype, kvno, tgskey, &kvno); if (retval) continue; /* Make the TGS key available to krb5_rd_req_decoded_anyflag() */ retval = krb5_auth_con_setuseruserkey(kdc_context, auth_context, *tgskey); if (retval) return retval; retval = krb5_rd_req_decoded_anyflag(kdc_context, &auth_context, apreq, apreq->ticket->server, kdc_active_realm->realm_keytab, NULL, NULL); /* If the ticket was decrypted, don't try any more keys. */ if (apreq->ticket->enc_part2 != NULL) break; } while (retval && apreq->ticket->enc_part.kvno == 0 && kvno-- > 1 && --tries > 0); return retval; } /* * The KDC should take the keytab associated with the realm and pass * that to the krb5_rd_req_decoded_anyflag(), but we still need to use * the service (TGS, here) key elsewhere. This approach is faster than * the KDB keytab approach too. * * This is also used by do_tgs_req() for u2u auth. */ krb5_error_code kdc_get_server_key(krb5_context context, krb5_ticket *ticket, unsigned int flags, krb5_boolean match_enctype, krb5_db_entry **server_ptr, krb5_keyblock **key, krb5_kvno *kvno) { krb5_error_code retval; krb5_db_entry * server = NULL; krb5_enctype search_enctype = -1; krb5_kvno search_kvno = -1; if (match_enctype) search_enctype = ticket->enc_part.enctype; if (ticket->enc_part.kvno) search_kvno = ticket->enc_part.kvno; *server_ptr = NULL; retval = krb5_db_get_principal(context, ticket->server, flags, &server); if (retval == KRB5_KDB_NOENTRY) { char *sname; if (!krb5_unparse_name(context, ticket->server, &sname)) { limit_string(sname); krb5_klog_syslog(LOG_ERR, _("TGS_REQ: UNKNOWN SERVER: server='%s'"), sname); free(sname); } return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; } else if (retval) return retval; if (server->attributes & KRB5_KDB_DISALLOW_SVR || server->attributes & KRB5_KDB_DISALLOW_ALL_TIX) { retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } if (key) { retval = find_server_key(context, server, search_enctype, search_kvno, key, kvno); if (retval) goto errout; } *server_ptr = server; server = NULL; return 0; errout: krb5_db_free_principal(context, server); return retval; } /* * A utility function to get the right key from a KDB entry. Used in handling * of kvno 0 TGTs, for example. */ static krb5_error_code find_server_key(krb5_context context, krb5_db_entry *server, krb5_enctype enctype, krb5_kvno kvno, krb5_keyblock **key_out, krb5_kvno *kvno_out) { krb5_error_code retval; krb5_key_data * server_key; krb5_keyblock * key; *key_out = NULL; retval = krb5_dbe_find_enctype(context, server, enctype, -1, kvno ? (krb5_int32)kvno : -1, &server_key); if (retval) return retval; if (!server_key) return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; if ((key = (krb5_keyblock *)malloc(sizeof *key)) == NULL) return ENOMEM; retval = krb5_dbe_decrypt_key_data(context, NULL, server_key, key, NULL); if (retval) goto errout; if (enctype != -1) { krb5_boolean similar; retval = krb5_c_enctype_compare(context, enctype, key->enctype, &similar); if (retval) goto errout; if (!similar) { retval = KRB5_KDB_NO_PERMITTED_KEY; goto errout; } key->enctype = enctype; } *key_out = key; key = NULL; if (kvno_out) *kvno_out = server_key->key_data_kvno; errout: krb5_free_keyblock(context, key); return retval; } /* * If candidate is the local TGT for realm, set *alias_out to candidate and * *storage_out to NULL. Otherwise, load the local TGT into *storage_out and * set *alias_out to *storage_out. * * In the future we might generalize this to a small per-request principal * cache. For now, it saves a load operation in the common case where the AS * server or TGS header ticket server is the local TGT. */ krb5_error_code get_local_tgt(krb5_context context, const krb5_data *realm, krb5_db_entry *candidate, krb5_db_entry **alias_out, krb5_db_entry **storage_out) { krb5_error_code ret; krb5_principal princ; krb5_db_entry *tgt; *alias_out = NULL; *storage_out = NULL; ret = krb5_build_principal_ext(context, &princ, realm->length, realm->data, KRB5_TGS_NAME_SIZE, KRB5_TGS_NAME, realm->length, realm->data, 0); if (ret) return ret; if (!krb5_principal_compare(context, candidate->princ, princ)) { ret = krb5_db_get_principal(context, princ, 0, &tgt); if (!ret) *storage_out = *alias_out = tgt; } else { *alias_out = candidate; } krb5_free_principal(context, princ); return ret; } /* This probably wants to be updated if you support last_req stuff */ static krb5_last_req_entry nolrentry = { KV5M_LAST_REQ_ENTRY, KRB5_LRQ_NONE, 0 }; static krb5_last_req_entry *nolrarray[] = { &nolrentry, 0 }; krb5_error_code fetch_last_req_info(krb5_db_entry *dbentry, krb5_last_req_entry ***lrentry) { *lrentry = nolrarray; return 0; } /* XXX! This is a temporary place-holder */ krb5_error_code check_hot_list(krb5_ticket *ticket) { return 0; } /* Convert an API error code to a protocol error code. */ int errcode_to_protocol(krb5_error_code code) { int protcode; protcode = code - ERROR_TABLE_BASE_krb5; return (protcode >= 0 && protcode <= 128) ? protcode : KRB_ERR_GENERIC; } /* Return -1 if the AS or TGS request is disallowed due to KDC policy on * anonymous tickets. */ int check_anon(kdc_realm_t *kdc_active_realm, krb5_principal client, krb5_principal server) { /* If restrict_anon is set, reject requests from anonymous to principals * other than the local TGT. */ if (kdc_active_realm->realm_restrict_anon && krb5_principal_compare_any_realm(kdc_context, client, krb5_anonymous_principal()) && !krb5_principal_compare(kdc_context, server, tgs_server)) return -1; return 0; } /* * Routines that validate a AS request; checks a lot of things. :-) * * Returns a Kerberos protocol error number, which is _not_ the same * as a com_err error number! */ #define AS_INVALID_OPTIONS (KDC_OPT_FORWARDED | KDC_OPT_PROXY | \ KDC_OPT_VALIDATE | KDC_OPT_RENEW | \ KDC_OPT_ENC_TKT_IN_SKEY | KDC_OPT_CNAME_IN_ADDL_TKT) int validate_as_request(kdc_realm_t *kdc_active_realm, register krb5_kdc_req *request, krb5_db_entry client, krb5_db_entry server, krb5_timestamp kdc_time, const char **status, krb5_pa_data ***e_data) { int errcode; krb5_error_code ret; /* * If an option is set that is only allowed in TGS requests, complain. */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KDC_ERR_BADOPTION; } /* The client must not be expired */ if (client.expiration && ts_after(kdc_time, client.expiration)) { *status = "CLIENT EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_NAME_EXP); } /* The client's password must not be expired, unless the server is a KRB5_KDC_PWCHANGE_SERVICE. */ if (client.pw_expiration && ts_after(kdc_time, client.pw_expiration) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "CLIENT KEY EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_KEY_EXP); } /* The server must not be expired */ if (server.expiration && ts_after(kdc_time, server.expiration)) { *status = "SERVICE EXPIRED"; return(KDC_ERR_SERVICE_EXP); } /* * If the client requires password changing, then only allow the * pwchange service. */ if (isflagset(client.attributes, KRB5_KDB_REQUIRES_PWCHANGE) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "REQUIRED PWCHANGE"; return(KDC_ERR_KEY_EXP); } /* Client and server must allow postdating tickets */ if ((isflagset(request->kdc_options, KDC_OPT_ALLOW_POSTDATE) || isflagset(request->kdc_options, KDC_OPT_POSTDATED)) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_POSTDATED) || isflagset(server.attributes, KRB5_KDB_DISALLOW_POSTDATED))) { *status = "POSTDATE NOT ALLOWED"; return(KDC_ERR_CANNOT_POSTDATE); } /* * A Windows KDC will return KDC_ERR_PREAUTH_REQUIRED instead of * KDC_ERR_POLICY in the following case: * * - KDC_OPT_FORWARDABLE is set in KDCOptions but local * policy has KRB5_KDB_DISALLOW_FORWARDABLE set for the * client, and; * - KRB5_KDB_REQUIRES_PRE_AUTH is set for the client but * preauthentication data is absent in the request. * * Hence, this check most be done after the check for preauth * data, and is now performed by validate_forwardable() (the * contents of which were previously below). */ /* Client and server must allow proxiable tickets */ if (isflagset(request->kdc_options, KDC_OPT_PROXIABLE) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_PROXIABLE) || isflagset(server.attributes, KRB5_KDB_DISALLOW_PROXIABLE))) { *status = "PROXIABLE NOT ALLOWED"; return(KDC_ERR_POLICY); } /* Check to see if client is locked out */ if (isflagset(client.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "CLIENT LOCKED OUT"; return(KDC_ERR_CLIENT_REVOKED); } /* Check to see if server is locked out */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "SERVICE LOCKED OUT"; return(KDC_ERR_S_PRINCIPAL_UNKNOWN); } /* Check to see if server is allowed to be a service */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_SVR)) { *status = "SERVICE NOT ALLOWED"; return(KDC_ERR_MUST_USE_USER2USER); } if (check_anon(kdc_active_realm, client.princ, request->server) != 0) { *status = "ANONYMOUS NOT ALLOWED"; return(KDC_ERR_POLICY); } /* Perform KDB module policy checks. */ ret = krb5_db_check_policy_as(kdc_context, request, &client, &server, kdc_time, status, e_data); if (ret && ret != KRB5_PLUGIN_OP_NOTSUPP) return errcode_to_protocol(ret); /* Check against local policy. */ errcode = against_local_policy_as(request, client, server, kdc_time, status, e_data); if (errcode) return errcode; return 0; } int validate_forwardable(krb5_kdc_req *request, krb5_db_entry client, krb5_db_entry server, krb5_timestamp kdc_time, const char **status) { *status = NULL; if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_FORWARDABLE) || isflagset(server.attributes, KRB5_KDB_DISALLOW_FORWARDABLE))) { *status = "FORWARDABLE NOT ALLOWED"; return(KDC_ERR_POLICY); } else return 0; } /* Return KRB5KDC_ERR_POLICY if indicators does not contain the required auth * indicators for server, ENOMEM on allocation error, 0 otherwise. */ krb5_error_code check_indicators(krb5_context context, krb5_db_entry *server, krb5_data *const *indicators) { krb5_error_code ret; char *str = NULL, *copy = NULL, *save, *ind; ret = krb5_dbe_get_string(context, server, KRB5_KDB_SK_REQUIRE_AUTH, &str); if (ret || str == NULL) goto cleanup; copy = strdup(str); if (copy == NULL) { ret = ENOMEM; goto cleanup; } /* Look for any of the space-separated strings in indicators. */ ind = strtok_r(copy, " ", &save); while (ind != NULL) { if (authind_contains(indicators, ind)) goto cleanup; ind = strtok_r(NULL, " ", &save); } ret = KRB5KDC_ERR_POLICY; k5_setmsg(context, ret, _("Required auth indicators not present in ticket: %s"), str); cleanup: krb5_dbe_free_string(context, str); free(copy); return ret; } #define ASN1_ID_CLASS (0xc0) #define ASN1_ID_TYPE (0x20) #define ASN1_ID_TAG (0x1f) #define ASN1_CLASS_UNIV (0) #define ASN1_CLASS_APP (1) #define ASN1_CLASS_CTX (2) #define ASN1_CLASS_PRIV (3) #define asn1_id_constructed(x) (x & ASN1_ID_TYPE) #define asn1_id_primitive(x) (!asn1_id_constructed(x)) #define asn1_id_class(x) ((x & ASN1_ID_CLASS) >> 6) #define asn1_id_tag(x) (x & ASN1_ID_TAG) /* * asn1length - return encoded length of value. * * passed a pointer into the asn.1 stream, which is updated * to point right after the length bits. * * returns -1 on failure. */ static int asn1length(unsigned char **astream) { int length; /* resulting length */ int sublen; /* sublengths */ int blen; /* bytes of length */ unsigned char *p; /* substring searching */ if (**astream & 0x80) { blen = **astream & 0x7f; if (blen > 3) { return(-1); } for (++*astream, length = 0; blen; ++*astream, blen--) { length = (length << 8) | **astream; } if (length == 0) { /* indefinite length, figure out by hand */ p = *astream; p++; while (1) { /* compute value length. */ if ((sublen = asn1length(&p)) < 0) { return(-1); } p += sublen; /* check for termination */ if ((!*p++) && (!*p)) { p++; break; } } length = p - *astream; } } else { length = **astream; ++*astream; } return(length); } /* * fetch_asn1_field - return raw asn.1 stream of subfield. * * this routine is passed a context-dependent tag number and "level" and returns * the size and length of the corresponding level subfield. * * levels and are numbered starting from 1. * * returns 0 on success, -1 otherwise. */ int fetch_asn1_field(unsigned char *astream, unsigned int level, unsigned int field, krb5_data *data) { unsigned char *estream; /* end of stream */ int classes; /* # classes seen so far this level */ unsigned int levels = 0; /* levels seen so far */ int lastlevel = 1000; /* last level seen */ int length; /* various lengths */ int tag; /* tag number */ unsigned char savelen; /* saved length of our field */ classes = -1; /* we assume that the first identifier/length will tell us how long the entire stream is. */ astream++; estream = astream; if ((length = asn1length(&astream)) < 0) { return(-1); } estream += length; /* search down the stream, checking identifiers. we process identifiers until we hit the "level" we want, and then process that level for our subfield, always making sure we don't go off the end of the stream. */ while (astream < estream) { if (!asn1_id_constructed(*astream)) { return(-1); } if (asn1_id_class(*astream) == ASN1_CLASS_CTX) { if ((tag = (int)asn1_id_tag(*astream)) <= lastlevel) { levels++; classes = -1; } lastlevel = tag; if (levels == level) { /* in our context-dependent class, is this the one we're looking for ? */ if (tag == (int)field) { /* return length and data */ astream++; savelen = *astream; if ((length = asn1length(&astream)) < 0) { return(-1); } data->length = length; /* if the field length is indefinite, we will have to subtract two (terminating octets) from the length returned since we don't want to pass any info from the "wrapper" back. asn1length will always return the *total* length of the field, not just what's contained in it */ if ((savelen & 0xff) == 0x80) { data->length -=2 ; } data->data = (char *)astream; return(0); } else if (tag <= classes) { /* we've seen this class before, something must be wrong */ return(-1); } else { classes = tag; } } } /* if we're not on our level yet, process this value. otherwise skip over it */ astream++; if ((length = asn1length(&astream)) < 0) { return(-1); } if (levels == level) { astream += length; } } return(-1); } /* Return true if we believe server can support enctype as a session key. */ static krb5_boolean dbentry_supports_enctype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, krb5_enctype enctype) { krb5_error_code retval; krb5_key_data *datap; char *etypes_str = NULL; krb5_enctype default_enctypes[1] = { 0 }; krb5_enctype *etypes = NULL; krb5_boolean in_list; /* Look up the supported session key enctypes list in the KDB. */ retval = krb5_dbe_get_string(kdc_context, server, KRB5_KDB_SK_SESSION_ENCTYPES, &etypes_str); if (retval == 0 && etypes_str != NULL && *etypes_str != '\0') { /* Pass a fake profile key for tracing of unrecognized tokens. */ retval = krb5int_parse_enctype_list(kdc_context, "KDB-session_etypes", etypes_str, default_enctypes, &etypes); if (retval == 0 && etypes != NULL && etypes[0]) { in_list = k5_etypes_contains(etypes, enctype); free(etypes_str); free(etypes); return in_list; } /* Fall through on error or empty list */ } free(etypes_str); free(etypes); /* If configured to, assume every server without a session_enctypes * attribute supports DES_CBC_CRC. */ if (kdc_active_realm->realm_assume_des_crc_sess && enctype == ENCTYPE_DES_CBC_CRC) return TRUE; /* Due to an ancient interop problem, assume nothing supports des-cbc-md5 * unless there's a session_enctypes explicitly saying that it does. */ if (enctype == ENCTYPE_DES_CBC_MD5) return FALSE; /* Assume the server supports any enctype it has a long-term key for. */ return !krb5_dbe_find_enctype(kdc_context, server, enctype, -1, 0, &datap); } /* * This function returns the keytype which should be selected for the * session key. It is based on the ordered list which the user * requested, and what the KDC and the application server can support. */ krb5_enctype select_session_keytype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, int nktypes, krb5_enctype *ktype) { int i; for (i = 0; i < nktypes; i++) { if (!krb5_c_valid_enctype(ktype[i])) continue; if (!krb5_is_permitted_enctype(kdc_context, ktype[i])) continue; if (dbentry_supports_enctype(kdc_active_realm, server, ktype[i])) return ktype[i]; } return 0; } /* * Limit strings to a "reasonable" length to prevent crowding out of * other useful information in the log entry */ #define NAME_LENGTH_LIMIT 128 void limit_string(char *name) { int i; if (!name) return; if (strlen(name) < NAME_LENGTH_LIMIT) return; i = NAME_LENGTH_LIMIT-4; name[i++] = '.'; name[i++] = '.'; name[i++] = '.'; name[i] = '\0'; return; } /* * L10_2 = log10(2**x), rounded up; log10(2) ~= 0.301. */ #define L10_2(x) ((int)(((x * 301) + 999) / 1000)) /* * Max length of sprintf("%ld") for an int of type T; includes leading * minus sign and terminating NUL. */ #define D_LEN(t) (L10_2(sizeof(t) * CHAR_BIT) + 2) void ktypes2str(char *s, size_t len, int nktypes, krb5_enctype *ktype) { int i; char stmp[D_LEN(krb5_enctype) + 1]; char *p; if (nktypes < 0 || len < (sizeof(" etypes {...}") + D_LEN(int))) { *s = '\0'; return; } snprintf(s, len, "%d etypes {", nktypes); for (i = 0; i < nktypes; i++) { snprintf(stmp, sizeof(stmp), "%s%ld", i ? " " : "", (long)ktype[i]); if (strlen(s) + strlen(stmp) + sizeof("}") > len) break; strlcat(s, stmp, len); } if (i < nktypes) { /* * We broke out of the loop. Try to truncate the list. */ p = s + strlen(s); while (p - s + sizeof("...}") > len) { while (p > s && *p != ' ' && *p != '{') *p-- = '\0'; if (p > s && *p == ' ') { *p-- = '\0'; continue; } } strlcat(s, "...", len); } strlcat(s, "}", len); return; } void rep_etypes2str(char *s, size_t len, krb5_kdc_rep *rep) { char stmp[sizeof("ses=") + D_LEN(krb5_enctype)]; if (len < (3 * D_LEN(krb5_enctype) + sizeof("etypes {rep= tkt= ses=}"))) { *s = '\0'; return; } snprintf(s, len, "etypes {rep=%ld", (long)rep->enc_part.enctype); if (rep->ticket != NULL) { snprintf(stmp, sizeof(stmp), " tkt=%ld", (long)rep->ticket->enc_part.enctype); strlcat(s, stmp, len); } if (rep->ticket != NULL && rep->ticket->enc_part2 != NULL && rep->ticket->enc_part2->session != NULL) { snprintf(stmp, sizeof(stmp), " ses=%ld", (long)rep->ticket->enc_part2->session->enctype); strlcat(s, stmp, len); } strlcat(s, "}", len); return; } static krb5_error_code verify_for_user_checksum(krb5_context context, krb5_keyblock *key, krb5_pa_for_user *req) { krb5_error_code code; int i; krb5_int32 name_type; char *p; krb5_data data; krb5_boolean valid = FALSE; if (!krb5_c_is_keyed_cksum(req->cksum.checksum_type)) { return KRB5KRB_AP_ERR_INAPP_CKSUM; } /* * Checksum is over name type and string components of * client principal name and auth_package. */ data.length = 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { data.length += krb5_princ_component(context, req->user, i)->length; } data.length += krb5_princ_realm(context, req->user)->length; data.length += req->auth_package.length; p = data.data = malloc(data.length); if (data.data == NULL) { return ENOMEM; } name_type = krb5_princ_type(context, req->user); p[0] = (name_type >> 0 ) & 0xFF; p[1] = (name_type >> 8 ) & 0xFF; p[2] = (name_type >> 16) & 0xFF; p[3] = (name_type >> 24) & 0xFF; p += 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { if (krb5_princ_component(context, req->user, i)->length > 0) { memcpy(p, krb5_princ_component(context, req->user, i)->data, krb5_princ_component(context, req->user, i)->length); } p += krb5_princ_component(context, req->user, i)->length; } if (krb5_princ_realm(context, req->user)->length > 0) { memcpy(p, krb5_princ_realm(context, req->user)->data, krb5_princ_realm(context, req->user)->length); } p += krb5_princ_realm(context, req->user)->length; if (req->auth_package.length > 0) memcpy(p, req->auth_package.data, req->auth_package.length); p += req->auth_package.length; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_APP_DATA_CKSUM, &data, &req->cksum, &valid); if (code == 0 && valid == FALSE) code = KRB5KRB_AP_ERR_MODIFIED; free(data.data); return code; } /* * Legacy protocol transition (Windows 2003 and above) */ static krb5_error_code kdc_process_for_user(kdc_realm_t *kdc_active_realm, krb5_pa_data *pa_data, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_pa_for_user *for_user; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_for_user(&req_data, &for_user); if (code) { *status = "DECODE_PA_FOR_USER"; return code; } code = verify_for_user_checksum(kdc_context, tgs_session, for_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_for_user(kdc_context, for_user); return code; } *s4u_x509_user = calloc(1, sizeof(krb5_pa_s4u_x509_user)); if (*s4u_x509_user == NULL) { krb5_free_pa_for_user(kdc_context, for_user); return ENOMEM; } (*s4u_x509_user)->user_id.user = for_user->user; for_user->user = NULL; krb5_free_pa_for_user(kdc_context, for_user); return 0; } static krb5_error_code verify_s4u_x509_user_checksum(krb5_context context, krb5_keyblock *key, krb5_data *req_data, krb5_int32 kdc_req_nonce, krb5_pa_s4u_x509_user *req) { krb5_error_code code; krb5_data scratch; krb5_boolean valid = FALSE; if (enctype_requires_etype_info_2(key->enctype) && !krb5_c_is_keyed_cksum(req->cksum.checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; if (req->user_id.nonce != kdc_req_nonce) return KRB5KRB_AP_ERR_MODIFIED; /* * Verify checksum over the encoded userid. If that fails, * re-encode, and verify that. This is similar to the * behaviour in kdc_process_tgs_req(). */ if (fetch_asn1_field((unsigned char *)req_data->data, 1, 0, &scratch) < 0) return ASN1_PARSE_ERROR; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, &scratch, &req->cksum, &valid); if (code != 0) return code; if (valid == FALSE) { krb5_data *data; code = encode_krb5_s4u_userid(&req->user_id, &data); if (code != 0) return code; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data, &req->cksum, &valid); krb5_free_data(context, data); if (code != 0) return code; } return valid ? 0 : KRB5KRB_AP_ERR_MODIFIED; } /* * New protocol transition request (Windows 2008 and above) */ static krb5_error_code kdc_process_s4u_x509_user(krb5_context context, krb5_kdc_req *request, krb5_pa_data *pa_data, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_s4u_x509_user(&req_data, s4u_x509_user); if (code) { *status = "DECODE_PA_S4U_X509_USER"; return code; } code = verify_s4u_x509_user_checksum(context, tgs_subkey ? tgs_subkey : tgs_session, &req_data, request->nonce, *s4u_x509_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return code; } if (krb5_princ_size(context, (*s4u_x509_user)->user_id.user) == 0 || (*s4u_x509_user)->user_id.subject_cert.length != 0) { *status = "INVALID_S4U2SELF_REQUEST"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } return 0; } krb5_error_code kdc_make_s4u2self_rep(krb5_context context, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user *req_s4u_user, krb5_kdc_rep *reply, krb5_enc_kdc_rep_part *reply_encpart) { krb5_error_code code; krb5_data *data = NULL; krb5_pa_s4u_x509_user rep_s4u_user; krb5_pa_data padata; krb5_enctype enctype; krb5_keyusage usage; memset(&rep_s4u_user, 0, sizeof(rep_s4u_user)); rep_s4u_user.user_id.nonce = req_s4u_user->user_id.nonce; rep_s4u_user.user_id.user = req_s4u_user->user_id.user; rep_s4u_user.user_id.options = req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE; code = encode_krb5_s4u_userid(&rep_s4u_user.user_id, &data); if (code != 0) goto cleanup; if (req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY; else usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST; code = krb5_c_make_checksum(context, req_s4u_user->cksum.checksum_type, tgs_subkey != NULL ? tgs_subkey : tgs_session, usage, data, &rep_s4u_user.cksum); if (code != 0) goto cleanup; krb5_free_data(context, data); data = NULL; code = encode_krb5_pa_s4u_x509_user(&rep_s4u_user, &data); if (code != 0) goto cleanup; padata.magic = KV5M_PA_DATA; padata.pa_type = KRB5_PADATA_S4U_X509_USER; padata.length = data->length; padata.contents = (krb5_octet *)data->data; code = add_pa_data_element(context, &padata, &reply->padata, FALSE); if (code != 0) goto cleanup; free(data); data = NULL; if (tgs_subkey != NULL) enctype = tgs_subkey->enctype; else enctype = tgs_session->enctype; /* * Owing to a bug in Windows, unkeyed checksums were used for older * enctypes, including rc4-hmac. A forthcoming workaround for this * includes the checksum bytes in the encrypted padata. */ if ((req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) && enctype_requires_etype_info_2(enctype) == FALSE) { padata.length = req_s4u_user->cksum.length + rep_s4u_user.cksum.length; padata.contents = malloc(padata.length); if (padata.contents == NULL) { code = ENOMEM; goto cleanup; } memcpy(padata.contents, req_s4u_user->cksum.contents, req_s4u_user->cksum.length); memcpy(&padata.contents[req_s4u_user->cksum.length], rep_s4u_user.cksum.contents, rep_s4u_user.cksum.length); code = add_pa_data_element(context,&padata, &reply_encpart->enc_padata, FALSE); if (code != 0) { free(padata.contents); goto cleanup; } } cleanup: if (rep_s4u_user.cksum.contents != NULL) krb5_free_checksum_contents(context, &rep_s4u_user.cksum); krb5_free_data(context, data); return code; } /* * Protocol transition (S4U2Self) */ krb5_error_code kdc_process_s4u2self_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_const_principal client_princ, const krb5_db_entry *server, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_timestamp kdc_time, krb5_pa_s4u_x509_user **s4u_x509_user, krb5_db_entry **princ_ptr, const char **status) { krb5_error_code code; krb5_pa_data *pa_data; int flags; krb5_db_entry *princ; *princ_ptr = NULL; pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_S4U_X509_USER); if (pa_data != NULL) { code = kdc_process_s4u_x509_user(kdc_context, request, pa_data, tgs_subkey, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else { pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER); if (pa_data != NULL) { code = kdc_process_for_user(kdc_active_realm, pa_data, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else return 0; } /* * We need to compare the client name in the TGT with the requested * server name. Supporting server name aliases without assuming a * global name service makes this difficult to do. * * The comparison below handles the following cases (note that the * term "principal name" below excludes the realm). * * (1) The requested service is a host-based service with two name * components, in which case we assume the principal name to * contain sufficient qualifying information. The realm is * ignored for the purpose of comparison. * * (2) The requested service name is an enterprise principal name: * the service principal name is compared with the unparsed * form of the client name (including its realm). * * (3) The requested service is some other name type: an exact * match is required. * * An alternative would be to look up the server once again with * FLAG_CANONICALIZE | FLAG_CLIENT_REFERRALS_ONLY set, do an exact * match between the returned name and client_princ. However, this * assumes that the client set FLAG_CANONICALIZE when requesting * the TGT and that we have a global name service. */ flags = 0; switch (krb5_princ_type(kdc_context, request->server)) { case KRB5_NT_SRV_HST: /* (1) */ if (krb5_princ_size(kdc_context, request->server) == 2) flags |= KRB5_PRINCIPAL_COMPARE_IGNORE_REALM; break; case KRB5_NT_ENTERPRISE_PRINCIPAL: /* (2) */ flags |= KRB5_PRINCIPAL_COMPARE_ENTERPRISE; break; default: /* (3) */ break; } if (!krb5_principal_compare_flags(kdc_context, request->server, client_princ, flags)) { *status = "INVALID_S4U2SELF_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error code */ } /* * Protocol transition is mutually exclusive with renew/forward/etc * as well as user-to-user and constrained delegation. This check * is also made in validate_as_request(). * * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* * Do not attempt to lookup principals in foreign realms. */ if (is_local_principal(kdc_active_realm, (*s4u_x509_user)->user_id.user)) { krb5_db_entry no_server; krb5_pa_data **e_data = NULL; code = krb5_db_get_principal(kdc_context, (*s4u_x509_user)->user_id.user, KRB5_KDB_FLAG_INCLUDE_PAC, &princ); if (code == KRB5_KDB_NOENTRY) { *status = "UNKNOWN_S4U2SELF_PRINCIPAL"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } else if (code) { *status = "LOOKING_UP_S4U2SELF_PRINCIPAL"; return code; /* caller can free for_user */ } memset(&no_server, 0, sizeof(no_server)); code = validate_as_request(kdc_active_realm, request, *princ, no_server, kdc_time, status, &e_data); if (code) { krb5_db_free_principal(kdc_context, princ); krb5_free_pa_data(kdc_context, e_data); return code; } *princ_ptr = princ; } return 0; } static krb5_error_code check_allowed_to_delegate_to(krb5_context context, krb5_const_principal client, const krb5_db_entry *server, krb5_const_principal proxy) { /* Can't get a TGT (otherwise it would be unconstrained delegation) */ if (krb5_is_tgs_principal(proxy)) return KRB5KDC_ERR_POLICY; /* Must be in same realm */ if (!krb5_realm_compare(context, server->princ, proxy)) return KRB5KDC_ERR_POLICY; return krb5_db_check_allowed_to_delegate(context, client, server, proxy); } krb5_error_code kdc_process_s4u2proxy_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_enc_tkt_part *t2enc, const krb5_db_entry *server, krb5_const_principal server_princ, krb5_const_principal proxy_princ, const char **status) { krb5_error_code errcode; /* * Constrained delegation is mutually exclusive with renew/forward/etc. * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & (NON_TGT_OPTION | KDC_OPT_ENC_TKT_IN_SKEY)) { *status = "INVALID_S4U2PROXY_OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* Ensure that evidence ticket server matches TGT client */ if (!krb5_principal_compare(kdc_context, server->princ, /* after canon */ server_princ)) { *status = "EVIDENCE_TICKET_MISMATCH"; return KRB5KDC_ERR_SERVER_NOMATCH; } if (!isflagset(t2enc->flags, TKT_FLG_FORWARDABLE)) { *status = "EVIDENCE_TKT_NOT_FORWARDABLE"; return KRB5_TKT_NOT_FORWARDABLE; } /* Backend policy check */ errcode = check_allowed_to_delegate_to(kdc_context, t2enc->client, server, proxy_princ); if (errcode) { *status = "NOT_ALLOWED_TO_DELEGATE"; return errcode; } return 0; } krb5_error_code kdc_check_transited_list(kdc_realm_t *kdc_active_realm, const krb5_data *trans, const krb5_data *realm1, const krb5_data *realm2) { krb5_error_code code; /* Check against the KDB module. Treat this answer as authoritative if the * method is supported and doesn't explicitly pass control. */ code = krb5_db_check_transited_realms(kdc_context, trans, realm1, realm2); if (code != KRB5_PLUGIN_OP_NOTSUPP && code != KRB5_PLUGIN_NO_HANDLE) return code; /* Check using krb5.conf [capaths] or hierarchical relationships. */ return krb5_check_transited_list(kdc_context, trans, realm1, realm2); } krb5_error_code validate_transit_path(krb5_context context, krb5_const_principal client, krb5_db_entry *server, krb5_db_entry *header_srv) { /* Incoming */ if (isflagset(server->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE)) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } /* Outgoing */ if (isflagset(header_srv->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE) && (!krb5_principal_compare(context, server->princ, header_srv->princ) || !krb5_realm_compare(context, client, header_srv->princ))) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } return 0; } krb5_boolean enctype_requires_etype_info_2(krb5_enctype enctype) { switch(enctype) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: case ENCTYPE_DES3_CBC_SHA1: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC_EXP : return 0; default: return krb5_c_valid_enctype(enctype); } } /* XXX where are the generic helper routines for this? */ krb5_error_code add_pa_data_element(krb5_context context, krb5_pa_data *padata, krb5_pa_data ***inout_padata, krb5_boolean copy) { int i; krb5_pa_data **p; if (*inout_padata != NULL) { for (i = 0; (*inout_padata)[i] != NULL; i++) ; } else i = 0; p = realloc(*inout_padata, (i + 2) * sizeof(krb5_pa_data *)); if (p == NULL) return ENOMEM; *inout_padata = p; p[i] = (krb5_pa_data *)malloc(sizeof(krb5_pa_data)); if (p[i] == NULL) return ENOMEM; *(p[i]) = *padata; p[i + 1] = NULL; if (copy) { p[i]->contents = (krb5_octet *)malloc(padata->length); if (p[i]->contents == NULL) { free(p[i]); p[i] = NULL; return ENOMEM; } memcpy(p[i]->contents, padata->contents, padata->length); } return 0; } void kdc_get_ticket_endtime(kdc_realm_t *kdc_active_realm, krb5_timestamp starttime, krb5_timestamp endtime, krb5_timestamp till, krb5_db_entry *client, krb5_db_entry *server, krb5_timestamp *out_endtime) { krb5_timestamp until, life; if (till == 0) till = kdc_infinity; until = ts_min(till, endtime); life = ts_delta(until, starttime); if (client != NULL && client->max_life != 0) life = min(life, client->max_life); if (server->max_life != 0) life = min(life, server->max_life); if (kdc_active_realm->realm_maxlife != 0) life = min(life, kdc_active_realm->realm_maxlife); *out_endtime = ts_incr(starttime, life); } /* * Set tkt->renew_till to the requested renewable lifetime as modified by * policy. Set the TKT_FLG_RENEWABLE flag if we set a nonzero renew_till. * client and tgt may be NULL. */ void kdc_get_ticket_renewtime(kdc_realm_t *realm, krb5_kdc_req *request, krb5_enc_tkt_part *tgt, krb5_db_entry *client, krb5_db_entry *server, krb5_enc_tkt_part *tkt) { krb5_timestamp rtime, max_rlife; tkt->times.renew_till = 0; /* Don't issue renewable tickets if the client or server don't allow it, * or if this is a TGS request and the TGT isn't renewable. */ if (server->attributes & KRB5_KDB_DISALLOW_RENEWABLE) return; if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_RENEWABLE)) return; if (tgt != NULL && !(tgt->flags & TKT_FLG_RENEWABLE)) return; /* Determine the requested renewable time. */ if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE)) rtime = request->rtime ? request->rtime : kdc_infinity; else if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE_OK) && ts_after(request->till, tkt->times.endtime)) rtime = request->till; else return; /* Truncate it to the allowable renewable time. */ if (tgt != NULL) rtime = ts_min(rtime, tgt->times.renew_till); max_rlife = min(server->max_renewable_life, realm->realm_maxrlife); if (client != NULL) max_rlife = min(max_rlife, client->max_renewable_life); rtime = ts_min(rtime, ts_incr(tkt->times.starttime, max_rlife)); /* Make the ticket renewable if the truncated requested time is larger than * the ticket end time. */ if (ts_after(rtime, tkt->times.endtime)) { setflag(tkt->flags, TKT_FLG_RENEWABLE); tkt->times.renew_till = rtime; } } /** * Handle protected negotiation of FAST using enc_padata * - If ENCPADATA_REQ_ENC_PA_REP is present, then: * - Return ENCPADATA_REQ_ENC_PA_REP with checksum of AS-REQ from client * - Include PADATA_FX_FAST in the enc_padata to indicate FAST * @pre @c out_enc_padata has space for at least two more padata * @param index in/out index into @c out_enc_padata for next item */ krb5_error_code kdc_handle_protected_negotiation(krb5_context context, krb5_data *req_pkt, krb5_kdc_req *request, const krb5_keyblock *reply_key, krb5_pa_data ***out_enc_padata) { krb5_error_code retval = 0; krb5_checksum checksum; krb5_data *out = NULL; krb5_pa_data pa, *pa_in; pa_in = krb5int_find_pa_data(context, request->padata, KRB5_ENCPADATA_REQ_ENC_PA_REP); if (pa_in == NULL) return 0; pa.magic = KV5M_PA_DATA; pa.pa_type = KRB5_ENCPADATA_REQ_ENC_PA_REP; memset(&checksum, 0, sizeof(checksum)); retval = krb5_c_make_checksum(context,0, reply_key, KRB5_KEYUSAGE_AS_REQ, req_pkt, &checksum); if (retval != 0) goto cleanup; retval = encode_krb5_checksum(&checksum, &out); if (retval != 0) goto cleanup; pa.contents = (krb5_octet *) out->data; pa.length = out->length; retval = add_pa_data_element(context, &pa, out_enc_padata, FALSE); if (retval) goto cleanup; out->data = NULL; pa.magic = KV5M_PA_DATA; pa.pa_type = KRB5_PADATA_FX_FAST; pa.length = 0; pa.contents = NULL; retval = add_pa_data_element(context, &pa, out_enc_padata, FALSE); cleanup: if (checksum.contents) krb5_free_checksum_contents(context, &checksum); if (out != NULL) krb5_free_data(context, out); return retval; } /* * Although the KDC doesn't call this function directly, * process_tcp_connection_read() in net-server.c does call it. */ krb5_error_code make_toolong_error (void *handle, krb5_data **out) { krb5_error errpkt; krb5_error_code retval; krb5_data *scratch; struct server_handle *h = handle; retval = krb5_us_timeofday(h->kdc_err_context, &errpkt.stime, &errpkt.susec); if (retval) return retval; errpkt.error = KRB_ERR_FIELD_TOOLONG; errpkt.server = h->kdc_realmlist[0]->realm_tgsprinc; errpkt.client = NULL; errpkt.cusec = 0; errpkt.ctime = 0; errpkt.text.length = 0; errpkt.text.data = 0; errpkt.e_data.length = 0; errpkt.e_data.data = 0; scratch = malloc(sizeof(*scratch)); if (scratch == NULL) return ENOMEM; retval = krb5_mk_error(h->kdc_err_context, &errpkt, scratch); if (retval) { free(scratch); return retval; } *out = scratch; return 0; } void reset_for_hangup(void *ctx) { int k; struct server_handle *h = ctx; for (k = 0; k < h->kdc_numrealms; k++) krb5_db_refresh_config(h->kdc_realmlist[k]->realm_context); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2571_2
crossvul-cpp_data_bad_370_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP N N GGGG % % P P NN N G % % PPPP N N N G GG % % P N NN G G % % P N N GGG % % % % % % Read/Write Portable Network Graphics Image Format % % % % Software Design % % Cristy % % Glenn Randers-Pehrson % % November 1997 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/static.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/transform.h" #include "magick/utility.h" #if defined(MAGICKCORE_PNG_DELEGATE) /* Suppress libpng pedantic warnings that were added in * libpng-1.2.41 and libpng-1.4.0. If you are working on * migration to libpng-1.5, remove these defines and then * fix any code that generates warnings. */ /* #define PNG_DEPRECATED Use of this function is deprecated */ /* #define PNG_USE_RESULT The result of this function must be checked */ /* #define PNG_NORETURN This function does not return */ /* #define PNG_ALLOCATED The result of the function is new memory */ /* #define PNG_DEPSTRUCT Access to this struct member is deprecated */ /* PNG_PTR_NORETURN does not work on some platforms, in libpng-1.5.x */ #define PNG_PTR_NORETURN #include "png.h" #include "zlib.h" /* ImageMagick differences */ #define first_scene scene #if PNG_LIBPNG_VER > 10011 /* Optional declarations. Define or undefine them as you like. */ /* #define PNG_DEBUG -- turning this on breaks VisualC compiling */ /* Features under construction. Define these to work on them. */ #undef MNG_OBJECT_BUFFERS #undef MNG_BASI_SUPPORTED #define MNG_COALESCE_LAYERS /* In 5.4.4, this interfered with MMAP'ed files. */ #define MNG_INSERT_LAYERS /* Troublesome, but seem to work as of 5.4.4 */ #if defined(MAGICKCORE_JPEG_DELEGATE) # define JNG_SUPPORTED /* Not finished as of 5.5.2. See "To do" comments. */ #endif #if !defined(RGBColorMatchExact) #define IsPNGColorEqual(color,target) \ (((color).red == (target).red) && \ ((color).green == (target).green) && \ ((color).blue == (target).blue)) #endif /* Table of recognized sRGB ICC profiles */ struct sRGB_info_struct { png_uint_32 len; png_uint_32 crc; png_byte intent; }; const struct sRGB_info_struct sRGB_info[] = { /* ICC v2 perceptual sRGB_IEC61966-2-1_black_scaled.icc */ { 3048, 0x3b8772b9UL, 0}, /* ICC v2 relative sRGB_IEC61966-2-1_no_black_scaling.icc */ { 3052, 0x427ebb21UL, 1}, /* ICC v4 perceptual sRGB_v4_ICC_preference_displayclass.icc */ {60988, 0x306fd8aeUL, 0}, /* ICC v4 perceptual sRGB_v4_ICC_preference.icc perceptual */ {60960, 0xbbef7812UL, 0}, /* HP? sRGB v2 media-relative sRGB_IEC61966-2-1_noBPC.icc */ { 3024, 0x5d5129ceUL, 1}, /* HP-Microsoft sRGB v2 perceptual */ { 3144, 0x182ea552UL, 0}, /* HP-Microsoft sRGB v2 media-relative */ { 3144, 0xf29e526dUL, 1}, /* Facebook's "2012/01/25 03:41:57", 524, "TINYsRGB.icc" */ { 524, 0xd4938c39UL, 0}, /* "2012/11/28 22:35:21", 3212, "Argyll_sRGB.icm") */ { 3212, 0x034af5a1UL, 0}, /* Not recognized */ { 0, 0x00000000UL, 0}, }; /* Macros for left-bit-replication to ensure that pixels * and PixelPackets all have the same image->depth, and for use * in PNG8 quantization. */ /* LBR01: Replicate top bit */ #define LBR01PacketRed(pixelpacket) \ (pixelpacket).red=(ScaleQuantumToChar((pixelpacket).red) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketGreen(pixelpacket) \ (pixelpacket).green=(ScaleQuantumToChar((pixelpacket).green) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketBlue(pixelpacket) \ (pixelpacket).blue=(ScaleQuantumToChar((pixelpacket).blue) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketOpacity(pixelpacket) \ (pixelpacket).opacity=(ScaleQuantumToChar((pixelpacket).opacity) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketRGB(pixelpacket) \ { \ LBR01PacketRed((pixelpacket)); \ LBR01PacketGreen((pixelpacket)); \ LBR01PacketBlue((pixelpacket)); \ } #define LBR01PacketRGBO(pixelpacket) \ { \ LBR01PacketRGB((pixelpacket)); \ LBR01PacketOpacity((pixelpacket)); \ } #define LBR01PixelRed(pixel) \ (SetPixelRed((pixel), \ ScaleQuantumToChar(GetPixelRed((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelGreen(pixel) \ (SetPixelGreen((pixel), \ ScaleQuantumToChar(GetPixelGreen((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelBlue(pixel) \ (SetPixelBlue((pixel), \ ScaleQuantumToChar(GetPixelBlue((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelOpacity(pixel) \ (SetPixelOpacity((pixel), \ ScaleQuantumToChar(GetPixelOpacity((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelRGB(pixel) \ { \ LBR01PixelRed((pixel)); \ LBR01PixelGreen((pixel)); \ LBR01PixelBlue((pixel)); \ } #define LBR01PixelRGBO(pixel) \ { \ LBR01PixelRGB((pixel)); \ LBR01PixelOpacity((pixel)); \ } /* LBR02: Replicate top 2 bits */ #define LBR02PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xc0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xc0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xc0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xc0; \ (pixelpacket).opacity=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketRGB(pixelpacket) \ { \ LBR02PacketRed((pixelpacket)); \ LBR02PacketGreen((pixelpacket)); \ LBR02PacketBlue((pixelpacket)); \ } #define LBR02PacketRGBO(pixelpacket) \ { \ LBR02PacketRGB((pixelpacket)); \ LBR02PacketOpacity((pixelpacket)); \ } #define LBR02PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xc0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xc0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xc0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02Opacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xc0; \ SetPixelOpacity((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelRGB(pixel) \ { \ LBR02PixelRed((pixel)); \ LBR02PixelGreen((pixel)); \ LBR02PixelBlue((pixel)); \ } #define LBR02PixelRGBO(pixel) \ { \ LBR02PixelRGB((pixel)); \ LBR02Opacity((pixel)); \ } /* LBR03: Replicate top 3 bits (only used with opaque pixels during PNG8 quantization) */ #define LBR03PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xe0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xe0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xe0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketRGB(pixelpacket) \ { \ LBR03PacketRed((pixelpacket)); \ LBR03PacketGreen((pixelpacket)); \ LBR03PacketBlue((pixelpacket)); \ } #define LBR03PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xe0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xe0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelBlue(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelBlue((pixel))) \ & 0xe0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelRGB(pixel) \ { \ LBR03PixelRed((pixel)); \ LBR03PixelGreen((pixel)); \ LBR03PixelBlue((pixel)); \ } /* LBR04: Replicate top 4 bits */ #define LBR04PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xf0; \ (pixelpacket).red=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xf0; \ (pixelpacket).green=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xf0; \ (pixelpacket).blue=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xf0; \ (pixelpacket).opacity=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketRGB(pixelpacket) \ { \ LBR04PacketRed((pixelpacket)); \ LBR04PacketGreen((pixelpacket)); \ LBR04PacketBlue((pixelpacket)); \ } #define LBR04PacketRGBO(pixelpacket) \ { \ LBR04PacketRGB((pixelpacket)); \ LBR04PacketOpacity((pixelpacket)); \ } #define LBR04PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xf0; \ SetPixelRed((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xf0; \ SetPixelGreen((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xf0; \ SetPixelBlue((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelOpacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xf0; \ SetPixelOpacity((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelRGB(pixel) \ { \ LBR04PixelRed((pixel)); \ LBR04PixelGreen((pixel)); \ LBR04PixelBlue((pixel)); \ } #define LBR04PixelRGBO(pixel) \ { \ LBR04PixelRGB((pixel)); \ LBR04PixelOpacity((pixel)); \ } /* Establish thread safety. setjmp/longjmp is claimed to be safe on these platforms: setjmp/longjmp is alleged to be unsafe on these platforms: */ #ifdef PNG_SETJMP_SUPPORTED # ifndef IMPNG_SETJMP_IS_THREAD_SAFE # define IMPNG_SETJMP_NOT_THREAD_SAFE # endif # ifdef IMPNG_SETJMP_NOT_THREAD_SAFE static SemaphoreInfo *ping_semaphore = (SemaphoreInfo *) NULL; # endif #endif /* This temporary until I set up malloc'ed object attributes array. Recompile with MNG_MAX_OBJECTS=65536L to avoid this limit but waste more memory. */ #define MNG_MAX_OBJECTS 256 /* If this not defined, spec is interpreted strictly. If it is defined, an attempt will be made to recover from some errors, including o global PLTE too short */ #undef MNG_LOOSE /* Don't try to define PNG_MNG_FEATURES_SUPPORTED here. Make sure it's defined in libpng/pngconf.h, version 1.0.9 or later. It won't work with earlier versions of libpng. From libpng-1.0.3a to libpng-1.0.8, PNG_READ|WRITE_EMPTY_PLTE were used but those have been deprecated in libpng in favor of PNG_MNG_FEATURES_SUPPORTED, so we set them here. PNG_MNG_FEATURES_SUPPORTED is disabled by default in libpng-1.0.9 and will be enabled by default in libpng-1.2.0. */ #ifdef PNG_MNG_FEATURES_SUPPORTED # ifndef PNG_READ_EMPTY_PLTE_SUPPORTED # define PNG_READ_EMPTY_PLTE_SUPPORTED # endif # ifndef PNG_WRITE_EMPTY_PLTE_SUPPORTED # define PNG_WRITE_EMPTY_PLTE_SUPPORTED # endif #endif /* Maximum valid size_t in PNG/MNG chunks is (2^31)-1 This macro is only defined in libpng-1.0.3 and later. Previously it was PNG_MAX_UINT but that was deprecated in libpng-1.2.6 */ #ifndef PNG_UINT_31_MAX #define PNG_UINT_31_MAX (png_uint_32) 0x7fffffffL #endif /* Constant strings for known chunk types. If you need to add a chunk, add a string holding the name here. To make the code more portable, we use ASCII numbers like this, not characters. */ static const png_byte mng_MHDR[5]={ 77, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_BACK[5]={ 66, 65, 67, 75, (png_byte) '\0'}; static const png_byte mng_BASI[5]={ 66, 65, 83, 73, (png_byte) '\0'}; static const png_byte mng_CLIP[5]={ 67, 76, 73, 80, (png_byte) '\0'}; static const png_byte mng_CLON[5]={ 67, 76, 79, 78, (png_byte) '\0'}; static const png_byte mng_DEFI[5]={ 68, 69, 70, 73, (png_byte) '\0'}; static const png_byte mng_DHDR[5]={ 68, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_DISC[5]={ 68, 73, 83, 67, (png_byte) '\0'}; static const png_byte mng_ENDL[5]={ 69, 78, 68, 76, (png_byte) '\0'}; static const png_byte mng_FRAM[5]={ 70, 82, 65, 77, (png_byte) '\0'}; static const png_byte mng_IEND[5]={ 73, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_IHDR[5]={ 73, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_JHDR[5]={ 74, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_LOOP[5]={ 76, 79, 79, 80, (png_byte) '\0'}; static const png_byte mng_MAGN[5]={ 77, 65, 71, 78, (png_byte) '\0'}; static const png_byte mng_MEND[5]={ 77, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_MOVE[5]={ 77, 79, 86, 69, (png_byte) '\0'}; static const png_byte mng_PAST[5]={ 80, 65, 83, 84, (png_byte) '\0'}; static const png_byte mng_PLTE[5]={ 80, 76, 84, 69, (png_byte) '\0'}; static const png_byte mng_SAVE[5]={ 83, 65, 86, 69, (png_byte) '\0'}; static const png_byte mng_SEEK[5]={ 83, 69, 69, 75, (png_byte) '\0'}; static const png_byte mng_SHOW[5]={ 83, 72, 79, 87, (png_byte) '\0'}; static const png_byte mng_TERM[5]={ 84, 69, 82, 77, (png_byte) '\0'}; static const png_byte mng_bKGD[5]={ 98, 75, 71, 68, (png_byte) '\0'}; static const png_byte mng_caNv[5]={ 99, 97, 78, 118, (png_byte) '\0'}; static const png_byte mng_cHRM[5]={ 99, 72, 82, 77, (png_byte) '\0'}; static const png_byte mng_eXIf[5]={101, 88, 73, 102, (png_byte) '\0'}; static const png_byte mng_gAMA[5]={103, 65, 77, 65, (png_byte) '\0'}; static const png_byte mng_iCCP[5]={105, 67, 67, 80, (png_byte) '\0'}; static const png_byte mng_nEED[5]={110, 69, 69, 68, (png_byte) '\0'}; static const png_byte mng_pHYg[5]={112, 72, 89, 103, (png_byte) '\0'}; static const png_byte mng_vpAg[5]={118, 112, 65, 103, (png_byte) '\0'}; static const png_byte mng_pHYs[5]={112, 72, 89, 115, (png_byte) '\0'}; static const png_byte mng_sBIT[5]={115, 66, 73, 84, (png_byte) '\0'}; static const png_byte mng_sRGB[5]={115, 82, 71, 66, (png_byte) '\0'}; static const png_byte mng_tRNS[5]={116, 82, 78, 83, (png_byte) '\0'}; #if defined(JNG_SUPPORTED) static const png_byte mng_IDAT[5]={ 73, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAT[5]={ 74, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAA[5]={ 74, 68, 65, 65, (png_byte) '\0'}; static const png_byte mng_JdAA[5]={ 74, 100, 65, 65, (png_byte) '\0'}; static const png_byte mng_JSEP[5]={ 74, 83, 69, 80, (png_byte) '\0'}; static const png_byte mng_oFFs[5]={111, 70, 70, 115, (png_byte) '\0'}; #endif #if 0 /* Other known chunks that are not yet supported by ImageMagick: */ static const png_byte mng_hIST[5]={104, 73, 83, 84, (png_byte) '\0'}; static const png_byte mng_iTXt[5]={105, 84, 88, 116, (png_byte) '\0'}; static const png_byte mng_sPLT[5]={115, 80, 76, 84, (png_byte) '\0'}; static const png_byte mng_sTER[5]={115, 84, 69, 82, (png_byte) '\0'}; static const png_byte mng_tEXt[5]={116, 69, 88, 116, (png_byte) '\0'}; static const png_byte mng_tIME[5]={116, 73, 77, 69, (png_byte) '\0'}; static const png_byte mng_zTXt[5]={122, 84, 88, 116, (png_byte) '\0'}; #endif typedef struct _MngBox { long left, right, top, bottom; } MngBox; typedef struct _MngPair { volatile long a, b; } MngPair; #ifdef MNG_OBJECT_BUFFERS typedef struct _MngBuffer { size_t height, width; Image *image; png_color plte[256]; int reference_count; unsigned char alpha_sample_depth, compression_method, color_type, concrete, filter_method, frozen, image_type, interlace_method, pixel_sample_depth, plte_length, sample_depth, viewable; } MngBuffer; #endif typedef struct _MngInfo { #ifdef MNG_OBJECT_BUFFERS MngBuffer *ob[MNG_MAX_OBJECTS]; #endif Image * image; RectangleInfo page; int adjoin, #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED bytes_in_read_buffer, found_empty_plte, #endif equal_backgrounds, equal_chrms, equal_gammas, #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) equal_palettes, #endif equal_physs, equal_srgbs, framing_mode, have_global_bkgd, have_global_chrm, have_global_gama, have_global_phys, have_global_sbit, have_global_srgb, have_saved_bkgd_index, have_write_global_chrm, have_write_global_gama, have_write_global_plte, have_write_global_srgb, need_fram, object_id, old_framing_mode, saved_bkgd_index; int new_number_colors; ssize_t image_found, loop_count[256], loop_iteration[256], scenes_found, x_off[MNG_MAX_OBJECTS], y_off[MNG_MAX_OBJECTS]; MngBox clip, frame, image_box, object_clip[MNG_MAX_OBJECTS]; unsigned char /* These flags could be combined into one byte */ exists[MNG_MAX_OBJECTS], frozen[MNG_MAX_OBJECTS], loop_active[256], invisible[MNG_MAX_OBJECTS], viewable[MNG_MAX_OBJECTS]; MagickOffsetType loop_jump[256]; png_colorp global_plte; png_color_8 global_sbit; png_byte #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED read_buffer[8], #endif global_trns[256]; float global_gamma; ChromaticityInfo global_chrm; RenderingIntent global_srgb_intent; unsigned long delay, global_plte_length, global_trns_length, global_x_pixels_per_unit, global_y_pixels_per_unit, mng_width, mng_height, ticks_per_second; MagickBooleanType need_blob; unsigned int IsPalette, global_phys_unit_type, basi_warning, clon_warning, dhdr_warning, jhdr_warning, magn_warning, past_warning, phyg_warning, phys_warning, sbit_warning, show_warning, mng_type, write_mng, write_png_colortype, write_png_depth, write_png_compression_level, write_png_compression_strategy, write_png_compression_filter, write_png8, write_png24, write_png32, write_png48, write_png64; #ifdef MNG_BASI_SUPPORTED unsigned long basi_width, basi_height; unsigned int basi_depth, basi_color_type, basi_compression_method, basi_filter_type, basi_interlace_method, basi_red, basi_green, basi_blue, basi_alpha, basi_viewable; #endif png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; PixelPacket mng_global_bkgd; /* Added at version 6.6.6-7 */ MagickBooleanType ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, ping_exclude_eXIf, ping_exclude_EXIF, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tRNS, ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, /* Added at version 6.8.5-7 */ ping_preserve_iCCP, /* Added at version 6.8.9-9 */ ping_exclude_tIME; } MngInfo; #endif /* VER */ /* Forward declarations. */ static MagickBooleanType WritePNGImage(const ImageInfo *,Image *); static MagickBooleanType WriteMNGImage(const ImageInfo *,Image *); #if defined(JNG_SUPPORTED) static MagickBooleanType WriteJNGImage(const ImageInfo *,Image *); #endif #if PNG_LIBPNG_VER > 10011 #if (MAGICKCORE_QUANTUM_DEPTH >= 16) static MagickBooleanType LosslessReduceDepthOK(Image *image) { /* Reduce bit depth if it can be reduced losslessly from 16+ to 8. * * This is true if the high byte and the next highest byte of * each sample of the image, the colormap, and the background color * are equal to each other. We check this by seeing if the samples * are unchanged when we scale them down to 8 and back up to Quantum. * * We don't use the method GetImageDepth() because it doesn't check * background and doesn't handle PseudoClass specially. */ #define QuantumToCharToQuantumEqQuantum(quantum) \ ((ScaleCharToQuantum((unsigned char) ScaleQuantumToChar(quantum))) == quantum) MagickBooleanType ok_to_reduce=MagickFalse; if (image->depth >= 16) { const PixelPacket *p; ok_to_reduce= QuantumToCharToQuantumEqQuantum(image->background_color.red) && QuantumToCharToQuantumEqQuantum(image->background_color.green) && QuantumToCharToQuantumEqQuantum(image->background_color.blue) ? MagickTrue : MagickFalse; if (ok_to_reduce != MagickFalse && image->storage_class == PseudoClass) { int indx; for (indx=0; indx < (ssize_t) image->colors; indx++) { ok_to_reduce=( QuantumToCharToQuantumEqQuantum( image->colormap[indx].red) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].green) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].blue)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; } } if ((ok_to_reduce != MagickFalse) && (image->storage_class != PseudoClass)) { ssize_t y; register ssize_t x; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) { ok_to_reduce = MagickFalse; break; } for (x=(ssize_t) image->columns-1; x >= 0; x--) { ok_to_reduce= QuantumToCharToQuantumEqQuantum(GetPixelRed(p)) && QuantumToCharToQuantumEqQuantum(GetPixelGreen(p)) && QuantumToCharToQuantumEqQuantum(GetPixelBlue(p)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; p++; } if (x >= 0) break; } } if (ok_to_reduce != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " OK to reduce PNG bit depth to 8 without loss of info"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Not OK to reduce PNG bit depth to 8 without loss of info"); } } return ok_to_reduce; } #endif /* MAGICKCORE_QUANTUM_DEPTH >= 16 */ static const char* PngColorTypeToString(const unsigned int color_type) { const char *result = "Unknown"; switch (color_type) { case PNG_COLOR_TYPE_GRAY: result = "Gray"; break; case PNG_COLOR_TYPE_GRAY_ALPHA: result = "Gray+Alpha"; break; case PNG_COLOR_TYPE_PALETTE: result = "Palette"; break; case PNG_COLOR_TYPE_RGB: result = "RGB"; break; case PNG_COLOR_TYPE_RGB_ALPHA: result = "RGB+Alpha"; break; } return result; } static int Magick_RenderingIntent_to_PNG_RenderingIntent(const RenderingIntent intent) { switch (intent) { case PerceptualIntent: return 0; case RelativeIntent: return 1; case SaturationIntent: return 2; case AbsoluteIntent: return 3; default: return -1; } } static RenderingIntent Magick_RenderingIntent_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return PerceptualIntent; case 1: return RelativeIntent; case 2: return SaturationIntent; case 3: return AbsoluteIntent; default: return UndefinedIntent; } } static const char * Magick_RenderingIntentString_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return "Perceptual Intent"; case 1: return "Relative Intent"; case 2: return "Saturation Intent"; case 3: return "Absolute Intent"; default: return "Undefined Intent"; } } static const char * Magick_ColorType_from_PNG_ColorType(const int ping_colortype) { switch (ping_colortype) { case 0: return "Grayscale"; case 2: return "Truecolor"; case 3: return "Indexed"; case 4: return "GrayAlpha"; case 6: return "RGBA"; default: return "UndefinedColorType"; } } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* MAGICKCORE_PNG_DELEGATE */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMNG() returns MagickTrue if the image format type, identified by the % magick string, is MNG. % % The format of the IsMNG method is: % % MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\212MNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJNG() returns MagickTrue if the image format type, identified by the % magick string, is JNG. % % The format of the IsJNG method is: % % MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\213JNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPNG() returns MagickTrue if the image format type, identified by the % magick string, is PNG. % % The format of the IsPNG method is: % % MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\211PNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_PNG_DELEGATE) #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #if (PNG_LIBPNG_VER > 10011) static size_t WriteBlobMSBULong(Image *image,const size_t value) { unsigned char buffer[4]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; return((size_t) WriteBlob(image,4,buffer)); } static void PNGLong(png_bytep p,png_uint_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGsLong(png_bytep p,png_int_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGShort(png_bytep p,png_uint_16 value) { *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGType(png_bytep p,const png_byte *type) { (void) memcpy(p,type,4*sizeof(png_byte)); } static void LogPNGChunk(MagickBooleanType logging, const png_byte *type, size_t length) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing %c%c%c%c chunk, length: %.20g", type[0],type[1],type[2],type[3],(double) length); } #endif /* PNG_LIBPNG_VER > 10011 */ #if defined(__cplusplus) || defined(c_plusplus) } #endif #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPNGImage() reads a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image or set of images. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadPNGImage method is: % % Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % % To do, more or less in chronological order (as of version 5.5.2, % November 26, 2002 -- glennrp -- see also "To do" under WriteMNGImage): % % Get 16-bit cheap transparency working. % % (At this point, PNG decoding is supposed to be in full MNG-LC compliance) % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % (At this point, PNG encoding should be in full MNG compliance) % % Provide options for choice of background to use when the MNG BACK % chunk is not present or is not mandatory (i.e., leave transparent, % user specified, MNG BACK, PNG bKGD) % % Implement LOOP/ENDL [done, but could do discretionary loops more % efficiently by linking in the duplicate frames.]. % % Decode and act on the MHDR simplicity profile (offer option to reject % files or attempt to process them anyway when the profile isn't LC or VLC). % % Upgrade to full MNG without Delta-PNG. % % o BACK [done a while ago except for background image ID] % o MOVE [done 15 May 1999] % o CLIP [done 15 May 1999] % o DISC [done 19 May 1999] % o SAVE [partially done 19 May 1999 (marks objects frozen)] % o SEEK [partially done 19 May 1999 (discard function only)] % o SHOW % o PAST % o BASI % o MNG-level tEXt/iTXt/zTXt % o pHYg % o pHYs % o sBIT % o bKGD % o iTXt (wait for libpng implementation). % % Use the scene signature to discover when an identical scene is % being reused, and just point to the original image->exception instead % of storing another set of pixels. This not specific to MNG % but could be applied generally. % % Upgrade to full MNG with Delta-PNG. % % JNG tEXt/iTXt/zTXt % % We will not attempt to read files containing the CgBI chunk. % They are really Xcode files meant for display on the iPhone. % These are not valid PNG files and it is impossible to recover % the original PNG from files that have been converted to Xcode-PNG, % since irretrievable loss of color data has occurred due to the % use of premultiplied alpha. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* This the function that does the actual reading of data. It is the same as the one supplied in libpng, except that it receives the datastream from the ReadBlob() function instead of standard input. */ static void png_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) ReadBlob(image,(size_t) length,data); if (check != length) { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent, "Expected %.20g bytes; found %.20g bytes",(double) length, (double) check); png_warning(png_ptr,msg); png_error(png_ptr,"Read Exception"); } } } #if !defined(PNG_READ_EMPTY_PLTE_SUPPORTED) && \ !defined(PNG_MNG_FEATURES_SUPPORTED) /* We use mng_get_data() instead of png_get_data() if we have a libpng * older than libpng-1.0.3a, which was the first to allow the empty * PLTE, or a newer libpng in which PNG_MNG_FEATURES_SUPPORTED was * ifdef'ed out. Earlier versions would crash if the bKGD chunk was * encountered after an empty PLTE, so we have to look ahead for bKGD * chunks and remove them from the datastream that is passed to libpng, * and store their contents for later use. */ static void mng_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { MngInfo *mng_info; Image *image; png_size_t check; register ssize_t i; i=0; mng_info=(MngInfo *) png_get_io_ptr(png_ptr); image=(Image *) mng_info->image; while (mng_info->bytes_in_read_buffer && length) { data[i]=mng_info->read_buffer[i]; mng_info->bytes_in_read_buffer--; length--; i++; } if (length != 0) { check=(png_size_t) ReadBlob(image,(size_t) length,(char *) data); if (check != length) png_error(png_ptr,"Read Exception"); if (length == 4) { if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 0)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_PLTE,4) == 0) mng_info->found_empty_plte=MagickTrue; if (memcmp(mng_info->read_buffer,mng_IEND,4) == 0) { mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; } } if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 1)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_bKGD,4) == 0) if (mng_info->found_empty_plte) { /* Skip the bKGD data byte and CRC. */ check=(png_size_t) ReadBlob(image,5,(char *) mng_info->read_buffer); check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->saved_bkgd_index=mng_info->read_buffer[0]; mng_info->have_saved_bkgd_index=MagickTrue; mng_info->bytes_in_read_buffer=0; } } } } } #endif static void png_put_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) WriteBlob(image,(size_t) length,data); if (check != length) png_error(png_ptr,"WriteBlob Failed"); } } static void png_flush_data(png_structp png_ptr) { (void) png_ptr; } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED static int PalettesAreEqual(Image *a,Image *b) { ssize_t i; if ((a == (Image *) NULL) || (b == (Image *) NULL)) return((int) MagickFalse); if (a->storage_class != PseudoClass || b->storage_class != PseudoClass) return((int) MagickFalse); if (a->colors != b->colors) return((int) MagickFalse); for (i=0; i < (ssize_t) a->colors; i++) { if ((a->colormap[i].red != b->colormap[i].red) || (a->colormap[i].green != b->colormap[i].green) || (a->colormap[i].blue != b->colormap[i].blue)) return((int) MagickFalse); } return((int) MagickTrue); } #endif static void MngInfoDiscardObject(MngInfo *mng_info,int i) { if (i && (i < MNG_MAX_OBJECTS) && (mng_info != (MngInfo *) NULL) && mng_info->exists[i] && !mng_info->frozen[i]) { #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) { if (mng_info->ob[i]->reference_count > 0) mng_info->ob[i]->reference_count--; if (mng_info->ob[i]->reference_count == 0) { if (mng_info->ob[i]->image != (Image *) NULL) mng_info->ob[i]->image=DestroyImage(mng_info->ob[i]->image); mng_info->ob[i]=DestroyString(mng_info->ob[i]); } } mng_info->ob[i]=(MngBuffer *) NULL; #endif mng_info->exists[i]=MagickFalse; mng_info->invisible[i]=MagickFalse; mng_info->viewable[i]=MagickFalse; mng_info->frozen[i]=MagickFalse; mng_info->x_off[i]=0; mng_info->y_off[i]=0; mng_info->object_clip[i].left=0; mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].top=0; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } } static MngInfo *MngInfoFreeStruct(MngInfo *mng_info) { register ssize_t i; if (mng_info == (MngInfo *) NULL) return((MngInfo *) NULL); for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); mng_info->global_plte=(png_colorp) RelinquishMagickMemory(mng_info->global_plte); return((MngInfo *) RelinquishMagickMemory(mng_info)); } static long mng_get_long(unsigned char *p) { return ((long) (((png_uint_32) p[0] << 24) | ((png_uint_32) p[1] << 16) | ((png_uint_32) p[2] << 8) | (png_uint_32) p[3])); } static MngBox mng_minimum_box(MngBox box1,MngBox box2) { MngBox box; box=box1; if (box.left < box2.left) box.left=box2.left; if (box.top < box2.top) box.top=box2.top; if (box.right > box2.right) box.right=box2.right; if (box.bottom > box2.bottom) box.bottom=box2.bottom; return box; } static MngBox mng_read_box(MngBox previous_box,char delta_type,unsigned char *p) { MngBox box; /* Read clipping boundaries from DEFI, CLIP, FRAM, or PAST chunk. */ box.left=mng_get_long(p); box.right=mng_get_long(&p[4]); box.top=mng_get_long(&p[8]); box.bottom=mng_get_long(&p[12]); if (delta_type != 0) { box.left+=previous_box.left; box.right+=previous_box.right; box.top+=previous_box.top; box.bottom+=previous_box.bottom; } return(box); } static MngPair mng_read_pair(MngPair previous_pair,int delta_type, unsigned char *p) { MngPair pair; /* Read two ssize_t's from CLON, MOVE or PAST chunk */ pair.a=mng_get_long(p); pair.b=mng_get_long(&p[4]); if (delta_type != 0) { pair.a+=previous_pair.a; pair.b+=previous_pair.b; } return(pair); } typedef struct _PNGErrorInfo { Image *image; ExceptionInfo *exception; } PNGErrorInfo; static void MagickPNGErrorHandler(png_struct *ping,png_const_charp message) { Image *image; image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s error: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderError, message,"`%s'",image->filename); #if (PNG_LIBPNG_VER < 10500) /* A warning about deprecated use of jmpbuf here is unavoidable if you * are building with libpng-1.4.x and can be ignored. */ longjmp(ping->jmpbuf,1); #else png_longjmp(ping,1); #endif } static void MagickPNGWarningHandler(png_struct *ping,png_const_charp message) { Image *image; if (LocaleCompare(message, "Missing PLTE before tRNS") == 0) png_error(ping, message); image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s warning: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderWarning, message,"`%s'",image->filename); } #ifdef PNG_USER_MEM_SUPPORTED #if PNG_LIBPNG_VER >= 10400 static png_voidp Magick_png_malloc(png_structp png_ptr,png_alloc_size_t size) #else static png_voidp Magick_png_malloc(png_structp png_ptr,png_size_t size) #endif { (void) png_ptr; return((png_voidp) AcquireMagickMemory((size_t) size)); } /* Free a pointer. It is removed from the list at the same time. */ static png_free_ptr Magick_png_free(png_structp png_ptr,png_voidp ptr) { (void) png_ptr; ptr=RelinquishMagickMemory(ptr); return((png_free_ptr) NULL); } #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif static int Magick_png_read_raw_profile(png_struct *ping,Image *image, const ImageInfo *image_info, png_textp text,int ii) { register ssize_t i; register unsigned char *dp; register png_charp sp; size_t extent, length, nibbles; StringInfo *profile; const unsigned char unhex[103]={0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,1, 2,3,4,5,6,7,8,9,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,10,11,12, 13,14,15}; sp=text[ii].text+1; extent=text[ii].text_length; /* look for newline */ while ((*sp != '\n') && extent--) sp++; /* look for length */ while (((*sp == '\0' || *sp == ' ' || *sp == '\n')) && extent--) sp++; if (extent == 0) { png_warning(ping,"missing profile length"); return(MagickFalse); } length=StringToLong(sp); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu",(unsigned long) length); while ((*sp != ' ' && *sp != '\n') && extent--) sp++; if (extent == 0) { png_warning(ping,"invalid profile length"); return(MagickFalse); } /* allocate space */ if (length == 0) { png_warning(ping,"invalid profile length"); return(MagickFalse); } profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { png_warning(ping, "unable to copy profile"); return(MagickFalse); } /* copy profile, skipping white space and column 1 "=" signs */ dp=GetStringInfoDatum(profile); nibbles=length*2; for (i=0; i < (ssize_t) nibbles; i++) { while (*sp < '0' || (*sp > '9' && *sp < 'a') || *sp > 'f') { if (*sp == '\0') { png_warning(ping, "ran out of profile data"); return(MagickFalse); } sp++; } if (i%2 == 0) *dp=(unsigned char) (16*unhex[(int) *sp++]); else (*dp++)+=unhex[(int) *sp++]; } /* We have already read "Raw profile type. */ (void) SetImageProfile(image,&text[ii].key[17],profile); profile=DestroyStringInfo(profile); if (image_info->verbose) (void) printf(" Found a generic profile, type %s\n",&text[ii].key[17]); return MagickTrue; } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) static int read_user_chunk_callback(png_struct *ping, png_unknown_chunkp chunk) { Image *image; /* The unknown chunk structure contains the chunk data: png_byte name[5]; png_byte *data; png_size_t size; Note that libpng has already taken care of the CRC handling. */ LogMagickEvent(CoderEvent,GetMagickModule(), " read_user_chunk: found %c%c%c%c chunk", chunk->name[0],chunk->name[1],chunk->name[2],chunk->name[3]); if (chunk->name[0] == 101 && (chunk->name[1] == 88 || chunk->name[1] == 120 ) && chunk->name[2] == 73 && chunk-> name[3] == 102) { /* process eXIf or exIf chunk */ PNGErrorInfo *error_info; StringInfo *profile; unsigned char *p; png_byte *s; int i; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " recognized eXIf chunk"); image=(Image *) png_get_user_chunk_ptr(ping); error_info=(PNGErrorInfo *) png_get_error_ptr(ping); profile=BlobToStringInfo((const void *) NULL,chunk->size+6); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(error_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1); } p=GetStringInfoDatum(profile); /* Initialize profile with "Exif\0\0" */ *p++ ='E'; *p++ ='x'; *p++ ='i'; *p++ ='f'; *p++ ='\0'; *p++ ='\0'; s=chunk->data; i=0; if (chunk->size > 6) { /* Skip first 6 bytes if "Exif\0\0" is already present by accident */ if (s[0] == 'E' && s[1] == 'x' && s[2] == 'i' && s[3] == 'f' && s[4] == '\0' && s[5] == '\0') { s+=6; i=6; SetStringInfoLength(profile,chunk->size); p=GetStringInfoDatum(profile); } } /* copy chunk->data to profile */ for (; i<chunk->size; i++) *p++ = *s++; (void) SetImageProfile(image,"exif",profile); return(1); } /* vpAg (deprecated, replaced by caNv) */ if (chunk->name[0] == 118 && chunk->name[1] == 112 && chunk->name[2] == 65 && chunk->name[3] == 103) { /* recognized vpAg */ if (chunk->size != 9) return(-1); /* Error return */ if (chunk->data[8] != 0) return(0); /* ImageMagick requires pixel units */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t)mng_get_long(chunk->data); image->page.height=(size_t)mng_get_long(&chunk->data[4]); return(1); } /* caNv */ if (chunk->name[0] == 99 && chunk->name[1] == 97 && chunk->name[2] == 78 && chunk->name[3] == 118) { /* recognized caNv */ if (chunk->size != 16) return(-1); /* Error return */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t)mng_get_long(chunk->data); image->page.height=(size_t)mng_get_long(&chunk->data[4]); image->page.x=(size_t)mng_get_long(&chunk->data[8]); image->page.y=(size_t)mng_get_long(&chunk->data[12]); /* Return one of the following: */ /* return(-n); chunk had an error */ /* return(0); did not recognize */ /* return(n); success */ return(1); } return(0); /* Did not recognize */ } #endif #if defined(PNG_tIME_SUPPORTED) static void read_tIME_chunk(Image *image,png_struct *ping,png_info *info) { png_timep time; if (png_get_tIME(ping,info,&time)) { char timestamp[21]; FormatLocaleString(timestamp,21,"%04d-%02d-%02dT%02d:%02d:%02dZ", time->year,time->month,time->day,time->hour,time->minute,time->second); SetImageProperty(image,"png:tIME",timestamp); } } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOnePNGImage() reads a Portable Network Graphics (PNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ReadOnePNGImage method is: % % Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { /* Read one PNG image */ /* To do: Read the tEXt/Creation Time chunk into the date:create property */ Image *image; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; int intent, /* "PNG Rendering intent", which is ICC intent + 1 */ num_raw_profiles, num_text, num_text_total, num_passes, number_colors, pass, ping_bit_depth, ping_color_type, ping_file_depth, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans, unit_type; double file_gamma; LongPixelPacket transparent_color; MagickBooleanType logging, ping_found_cHRM, ping_found_gAMA, ping_found_iCCP, ping_found_sRGB, ping_found_sRGB_cHRM, ping_preserve_iCCP, status; MemoryInfo *volatile pixel_info; png_bytep ping_trans_alpha; png_color_16p ping_background, ping_trans_color; png_info *end_info, *ping_info; png_struct *ping; png_textp text; png_uint_32 ping_height, ping_width, x_resolution, y_resolution; ssize_t ping_rowbytes, y; register unsigned char *p; register IndexPacket *indexes; register ssize_t i, x; register PixelPacket *q; size_t length, row_offset; Quantum *volatile quantum_scanline; QuantumInfo *volatile quantum_info; ssize_t j; unsigned char *ping_pixels; #ifdef PNG_UNKNOWN_CHUNKS_SUPPORTED png_byte unused_chunks[]= { 104, 73, 83, 84, (png_byte) '\0', /* hIST */ 105, 84, 88, 116, (png_byte) '\0', /* iTXt */ 112, 67, 65, 76, (png_byte) '\0', /* pCAL */ 115, 67, 65, 76, (png_byte) '\0', /* sCAL */ 115, 80, 76, 84, (png_byte) '\0', /* sPLT */ #if !defined(PNG_tIME_SUPPORTED) 116, 73, 77, 69, (png_byte) '\0', /* tIME */ #endif #ifdef PNG_APNG_SUPPORTED /* libpng was built with APNG patch; */ /* ignore the APNG chunks */ 97, 99, 84, 76, (png_byte) '\0', /* acTL */ 102, 99, 84, 76, (png_byte) '\0', /* fcTL */ 102, 100, 65, 84, (png_byte) '\0', /* fdAT */ #endif }; #endif /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,32); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,32); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOnePNGImage()\n" " IM version = %s\n" " Libpng version = %s", im_vers, libpng_vers); if (logging != MagickFalse) { if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", zlib_runv); } } #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif #if (PNG_LIBPNG_VER >= 10400) # ifndef PNG_TRANSFORM_GRAY_TO_RGB /* Added at libpng-1.4.0beta67 */ if (image_info->verbose) { printf("Your PNG library (libpng-%s) is an old beta version.\n", PNG_LIBPNG_VER_STRING); printf("Please update it.\n"); } # endif #endif image=mng_info->image; if (logging != MagickFalse) { (void)LogMagickEvent(CoderEvent,GetMagickModule(), " Before reading:\n" " image->matte=%d\n" " image->rendering_intent=%d\n" " image->colorspace=%d\n" " image->gamma=%f", (int) image->matte, (int) image->rendering_intent, (int) image->colorspace, image->gamma); } intent=Magick_RenderingIntent_to_PNG_RenderingIntent(image->rendering_intent); /* Set to an out-of-range color unless tRNS chunk is present */ transparent_color.red=65537; transparent_color.green=65537; transparent_color.blue=65537; transparent_color.opacity=65537; number_colors=0; num_text = 0; num_text_total = 0; num_raw_profiles = 0; ping_found_cHRM = MagickFalse; ping_found_gAMA = MagickFalse; ping_found_iCCP = MagickFalse; ping_found_sRGB = MagickFalse; ping_found_sRGB_cHRM = MagickFalse; ping_preserve_iCCP = MagickFalse; /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_read_struct_2(PNG_LIBPNG_VER_STRING, image, MagickPNGErrorHandler,MagickPNGWarningHandler, NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_read_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_read_struct(&ping,(png_info **) NULL,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } end_info=png_create_info_struct(ping); if (end_info == (png_info *) NULL) { png_destroy_read_struct(&ping,&ping_info,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixel_info=(MemoryInfo *) NULL; quantum_scanline = (Quantum *) NULL; quantum_info = (QuantumInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG image is corrupt. */ png_destroy_read_struct(&ping,&ping_info,&end_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() with error."); if (image != (Image *) NULL) { InheritException(exception,&image->exception); image=DestroyImageList(image); } return(image); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for reading. */ mng_info->image_found++; png_set_sig_bytes(ping,8); if (LocaleCompare(image_info->magick,"MNG") == 0) { #if defined(PNG_MNG_FEATURES_SUPPORTED) (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); png_set_read_fn(ping,image,png_get_data); #else #if defined(PNG_READ_EMPTY_PLTE_SUPPORTED) png_permit_empty_plte(ping,MagickTrue); png_set_read_fn(ping,image,png_get_data); #else mng_info->image=image; mng_info->bytes_in_read_buffer=0; mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; png_set_read_fn(ping,mng_info,mng_get_data); #endif #endif } else png_set_read_fn(ping,image,png_get_data); { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",value) == MagickFalse) { value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) ping_preserve_iCCP=MagickTrue; #if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) /* Don't let libpng check for ICC/sRGB profile because we're going * to do that anyway. This feature was added at libpng-1.6.12. * If logging, go ahead and check and issue a warning as appropriate. */ if (logging == MagickFalse) png_set_option(ping, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) else { /* Ignore the iCCP chunk */ png_set_keep_unknown_chunks(ping, 1, (png_bytep)mng_iCCP, 1); } #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) /* Ignore unused chunks and all unknown chunks except for caNv and vpAg */ # if PNG_LIBPNG_VER < 10700 /* Avoid libpng16 warning */ png_set_keep_unknown_chunks(ping, 2, (png_bytep)NULL, 0); # else png_set_keep_unknown_chunks(ping, 1, (png_bytep)NULL, 0); # endif png_set_keep_unknown_chunks(ping, 2, (png_bytep)mng_caNv, 1); png_set_keep_unknown_chunks(ping, 2, (png_bytep)mng_vpAg, 1); png_set_keep_unknown_chunks(ping, 1, unused_chunks, (int)sizeof(unused_chunks)/5); /* Callback for other unknown chunks */ png_set_read_user_chunk_fn(ping, image, read_user_chunk_callback); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED #if (PNG_LIBPNG_VER >= 10400) /* Limit the size of the chunk storage cache used for sPLT, text, * and unknown chunks. */ png_set_chunk_cache_max(ping, 32767); #endif #endif #ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature */ png_set_check_for_invalid_index (ping, 0); #endif #if (PNG_LIBPNG_VER < 10400) # if defined(PNG_USE_PNGGCCRD) && defined(PNG_ASSEMBLER_CODE_SUPPORTED) && \ (PNG_LIBPNG_VER >= 10200) && (PNG_LIBPNG_VER < 10220) && defined(__i386__) /* Disable thread-unsafe features of pnggccrd */ if (png_access_version_number() >= 10200) { png_uint_32 mmx_disable_mask=0; png_uint_32 asm_flags; mmx_disable_mask |= ( PNG_ASM_FLAG_MMX_READ_COMBINE_ROW \ | PNG_ASM_FLAG_MMX_READ_FILTER_SUB \ | PNG_ASM_FLAG_MMX_READ_FILTER_AVG \ | PNG_ASM_FLAG_MMX_READ_FILTER_PAETH ); asm_flags=png_get_asm_flags(ping); png_set_asm_flags(ping, asm_flags & ~mmx_disable_mask); } # endif #endif png_read_info(ping,ping_info); /* Read and check IHDR chunk data */ png_get_IHDR(ping,ping_info,&ping_width,&ping_height, &ping_bit_depth,&ping_color_type, &ping_interlace_method,&ping_compression_method, &ping_filter_method); ping_file_depth = ping_bit_depth; /* Swap bytes if requested */ if (ping_file_depth == 16) { const char *value; value=GetImageOption(image_info,"png:swap-bytes"); if (value == NULL) value=GetImageArtifact(image,"png:swap-bytes"); if (value != NULL) png_set_swap(ping); } /* Save bit-depth and color-type in case we later want to write a PNG00 */ { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_color_type); (void) SetImageProperty(image,"png:IHDR.color-type-orig",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_bit_depth); (void) SetImageProperty(image,"png:IHDR.bit-depth-orig",msg); } (void) png_get_tRNS(ping, ping_info, &ping_trans_alpha, &ping_num_trans, &ping_trans_color); (void) png_get_bKGD(ping, ping_info, &ping_background); if (ping_bit_depth < 8) { png_set_packing(ping); ping_bit_depth = 8; } image->depth=ping_bit_depth; image->depth=GetImageQuantumDepth(image,MagickFalse); image->interlace=ping_interlace_method != 0 ? PNGInterlace : NoInterlace; if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { image->rendering_intent=UndefinedIntent; intent=Magick_RenderingIntent_to_PNG_RenderingIntent(UndefinedIntent); (void) memset(&image->chromaticity,0, sizeof(image->chromaticity)); } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG width: %.20g, height: %.20g\n" " PNG color_type: %d, bit_depth: %d\n" " PNG compression_method: %d\n" " PNG interlace_method: %d, filter_method: %d", (double) ping_width, (double) ping_height, ping_color_type, ping_bit_depth, ping_compression_method, ping_interlace_method,ping_filter_method); } if (png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_gAMA)) { ping_found_gAMA=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG gAMA chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { ping_found_cHRM=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG cHRM chunk."); } if (ping_found_iCCP != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { ping_found_sRGB=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG sRGB chunk."); } #ifdef PNG_READ_iCCP_SUPPORTED if (ping_found_iCCP !=MagickTrue && ping_found_sRGB != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_iCCP)) { int compression; #if (PNG_LIBPNG_VER < 10500) png_charp info; #else png_bytep info; #endif png_charp name; png_uint_32 profile_length; (void) png_get_iCCP(ping,ping_info,&name,(int *) &compression,&info, &profile_length); if (profile_length != 0) { StringInfo *profile; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG iCCP chunk."); profile=BlobToStringInfo(info,profile_length); if (profile == (StringInfo *) NULL) { png_warning(ping, "ICC profile is NULL"); profile=DestroyStringInfo(profile); } else { if (ping_preserve_iCCP == MagickFalse) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } break; } } } if (sRGB_info[icheck].len == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); (void) SetImageProfile(image,"icc",profile); } } else /* Preserve-iCCP */ { (void) SetImageProfile(image,"icc",profile); } profile=DestroyStringInfo(profile); } } } #endif #if defined(PNG_READ_sRGB_SUPPORTED) { if (ping_found_iCCP==MagickFalse && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { if (png_get_sRGB(ping,ping_info,&intent)) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (intent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG sRGB chunk: rendering_intent: %d",intent); } } else if (mng_info->have_global_srgb) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (mng_info->global_srgb_intent); } } #endif { if (!png_get_gAMA(ping,ping_info,&file_gamma)) if (mng_info->have_global_gama) png_set_gAMA(ping,ping_info,mng_info->global_gamma); if (png_get_gAMA(ping,ping_info,&file_gamma)) { image->gamma=(float) file_gamma; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG gAMA chunk: gamma: %f",file_gamma); } } if (!png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { if (mng_info->have_global_chrm != MagickFalse) { (void) png_set_cHRM(ping,ping_info, mng_info->global_chrm.white_point.x, mng_info->global_chrm.white_point.y, mng_info->global_chrm.red_primary.x, mng_info->global_chrm.red_primary.y, mng_info->global_chrm.green_primary.x, mng_info->global_chrm.green_primary.y, mng_info->global_chrm.blue_primary.x, mng_info->global_chrm.blue_primary.y); } } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { (void) png_get_cHRM(ping,ping_info, &image->chromaticity.white_point.x, &image->chromaticity.white_point.y, &image->chromaticity.red_primary.x, &image->chromaticity.red_primary.y, &image->chromaticity.green_primary.x, &image->chromaticity.green_primary.y, &image->chromaticity.blue_primary.x, &image->chromaticity.blue_primary.y); ping_found_cHRM=MagickTrue; if (image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f) ping_found_sRGB_cHRM=MagickTrue; } if (image->rendering_intent != UndefinedIntent) { if (ping_found_sRGB != MagickTrue && (ping_found_gAMA != MagickTrue || (image->gamma > .45 && image->gamma < .46)) && (ping_found_cHRM != MagickTrue || ping_found_sRGB_cHRM != MagickFalse) && ping_found_iCCP != MagickTrue) { png_set_sRGB(ping,ping_info, Magick_RenderingIntent_to_PNG_RenderingIntent (image->rendering_intent)); file_gamma=1.000f/2.200f; ping_found_sRGB=MagickTrue; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting sRGB as if in input"); } } #if defined(PNG_oFFs_SUPPORTED) if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { image->page.x=(ssize_t) png_get_x_offset_pixels(ping, ping_info); image->page.y=(ssize_t) png_get_y_offset_pixels(ping, ping_info); if (logging != MagickFalse) if (image->page.x || image->page.y) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG oFFs chunk: x: %.20g, y: %.20g.",(double) image->page.x,(double) image->page.y); } #endif #if defined(PNG_pHYs_SUPPORTED) if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { if (mng_info->have_global_phys) { png_set_pHYs(ping,ping_info, mng_info->global_x_pixels_per_unit, mng_info->global_y_pixels_per_unit, mng_info->global_phys_unit_type); } } x_resolution=0; y_resolution=0; unit_type=0; if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { /* Set image resolution. */ (void) png_get_pHYs(ping,ping_info,&x_resolution,&y_resolution, &unit_type); image->x_resolution=(double) x_resolution; image->y_resolution=(double) y_resolution; if (unit_type == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=(double) x_resolution/100.0; image->y_resolution=(double) y_resolution/100.0; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) x_resolution,(double) y_resolution,unit_type); } #endif if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); if ((number_colors == 0) && ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)) { if (mng_info->global_plte_length) { png_set_PLTE(ping,ping_info,mng_info->global_plte, (int) mng_info->global_plte_length); if (!png_get_valid(ping,ping_info,PNG_INFO_tRNS)) if (mng_info->global_trns_length) { if (mng_info->global_trns_length > mng_info->global_plte_length) { png_warning(ping, "global tRNS has more entries than global PLTE"); } else { png_set_tRNS(ping,ping_info,mng_info->global_trns, (int) mng_info->global_trns_length,NULL); } } #ifdef PNG_READ_bKGD_SUPPORTED if ( #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED mng_info->have_saved_bkgd_index || #endif png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { png_color_16 background; #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED if (mng_info->have_saved_bkgd_index) background.index=mng_info->saved_bkgd_index; #endif if (png_get_valid(ping, ping_info, PNG_INFO_bKGD)) background.index=ping_background->index; background.red=(png_uint_16) mng_info->global_plte[background.index].red; background.green=(png_uint_16) mng_info->global_plte[background.index].green; background.blue=(png_uint_16) mng_info->global_plte[background.index].blue; background.gray=(png_uint_16) mng_info->global_plte[background.index].green; png_set_bKGD(ping,ping_info,&background); } #endif } else png_error(ping,"No global PLTE in file"); } } #ifdef PNG_READ_bKGD_SUPPORTED if (mng_info->have_global_bkgd && (!png_get_valid(ping,ping_info,PNG_INFO_bKGD))) image->background_color=mng_info->mng_global_bkgd; if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { unsigned int bkgd_scale; /* Set image background color. * Scale background components to 16-bit, then scale * to quantum depth */ bkgd_scale = 1; if (ping_file_depth == 1) bkgd_scale = 255; else if (ping_file_depth == 2) bkgd_scale = 85; else if (ping_file_depth == 4) bkgd_scale = 17; if (ping_file_depth <= 8) bkgd_scale *= 257; ping_background->red *= bkgd_scale; ping_background->green *= bkgd_scale; ping_background->blue *= bkgd_scale; if (logging != MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG bKGD chunk, raw ping_background=(%d,%d,%d).\n" " bkgd_scale=%d. ping_background=(%d,%d,%d).", ping_background->red,ping_background->green, ping_background->blue, bkgd_scale,ping_background->red, ping_background->green,ping_background->blue); } image->background_color.red= ScaleShortToQuantum(ping_background->red); image->background_color.green= ScaleShortToQuantum(ping_background->green); image->background_color.blue= ScaleShortToQuantum(ping_background->blue); image->background_color.opacity=OpaqueOpacity; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->background_color=(%.20g,%.20g,%.20g).", (double) image->background_color.red, (double) image->background_color.green, (double) image->background_color.blue); } #endif /* PNG_READ_bKGD_SUPPORTED */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { /* Image has a tRNS chunk. */ int max_sample; size_t one = 1; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG tRNS chunk."); max_sample = (int) ((one << ping_file_depth) - 1); if ((ping_color_type == PNG_COLOR_TYPE_GRAY && (int)ping_trans_color->gray > max_sample) || (ping_color_type == PNG_COLOR_TYPE_RGB && ((int)ping_trans_color->red > max_sample || (int)ping_trans_color->green > max_sample || (int)ping_trans_color->blue > max_sample))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Ignoring PNG tRNS chunk with out-of-range sample."); png_free_data(ping, ping_info, PNG_FREE_TRNS, 0); png_set_invalid(ping,ping_info,PNG_INFO_tRNS); image->matte=MagickFalse; } else { int scale_to_short; scale_to_short = 65535L/((1UL << ping_file_depth)-1); /* Scale transparent_color to short */ transparent_color.red= scale_to_short*ping_trans_color->red; transparent_color.green= scale_to_short*ping_trans_color->green; transparent_color.blue= scale_to_short*ping_trans_color->blue; transparent_color.opacity= scale_to_short*ping_trans_color->gray; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Raw tRNS graylevel = %d, scaled graylevel = %d.", ping_trans_color->gray,transparent_color.opacity); } transparent_color.red=transparent_color.opacity; transparent_color.green=transparent_color.opacity; transparent_color.blue=transparent_color.opacity; } } } #if defined(PNG_READ_sBIT_SUPPORTED) if (mng_info->have_global_sbit) { if (!png_get_valid(ping,ping_info,PNG_INFO_sBIT)) png_set_sBIT(ping,ping_info,&mng_info->global_sbit); } #endif num_passes=png_set_interlace_handling(ping); png_read_update_info(ping,ping_info); ping_rowbytes=png_get_rowbytes(ping,ping_info); /* Initialize image structure. */ mng_info->image_box.left=0; mng_info->image_box.right=(ssize_t) ping_width; mng_info->image_box.top=0; mng_info->image_box.bottom=(ssize_t) ping_height; if (mng_info->mng_type == 0) { mng_info->mng_width=ping_width; mng_info->mng_height=ping_height; mng_info->frame=mng_info->image_box; mng_info->clip=mng_info->image_box; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } image->compression=ZipCompression; image->columns=ping_width; image->rows=ping_height; if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { double image_gamma = image->gamma; (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%f",(float) image_gamma); if (image_gamma > 0.75) { /* Set image->rendering_intent to Undefined, * image->colorspace to GRAY, and reset image->chromaticity. */ image->intensity = Rec709LuminancePixelIntensityMethod; SetImageColorspace(image,LinearGRAYColorspace); } else { RenderingIntent save_rendering_intent = image->rendering_intent; ChromaticityInfo save_chromaticity = image->chromaticity; SetImageColorspace(image,GRAYColorspace); image->rendering_intent = save_rendering_intent; image->chromaticity = save_chromaticity; } image->gamma = image_gamma; } else { double image_gamma = image->gamma; (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%f",(float) image_gamma); if (image_gamma > 0.75) { /* Set image->rendering_intent to Undefined, * image->colorspace to GRAY, and reset image->chromaticity. */ image->intensity = Rec709LuminancePixelIntensityMethod; SetImageColorspace(image,RGBColorspace); } else { RenderingIntent save_rendering_intent = image->rendering_intent; ChromaticityInfo save_chromaticity = image->chromaticity; SetImageColorspace(image,sRGBColorspace); image->rendering_intent = save_rendering_intent; image->chromaticity = save_chromaticity; } image->gamma = image_gamma; } (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->colorspace=%d",(int) image->colorspace); if (((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || ((int) ping_bit_depth < 16 && (int) ping_color_type == PNG_COLOR_TYPE_GRAY)) { size_t one; image->storage_class=PseudoClass; one=1; image->colors=one << ping_file_depth; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->colors > 256) image->colors=256; #else if (image->colors > 65536L) image->colors=65536L; #endif if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); image->colors=(size_t) number_colors; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG PLTE chunk: number_colors: %d.",number_colors); } } if (image->storage_class == PseudoClass) { /* Initialize image colormap. */ if (AcquireImageColormap(image,image->colors) == MagickFalse) png_error(ping,"Memory allocation failed"); if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); for (i=0; i < (ssize_t) number_colors; i++) { image->colormap[i].red=ScaleCharToQuantum(palette[i].red); image->colormap[i].green=ScaleCharToQuantum(palette[i].green); image->colormap[i].blue=ScaleCharToQuantum(palette[i].blue); } for ( ; i < (ssize_t) image->colors; i++) { image->colormap[i].red=0; image->colormap[i].green=0; image->colormap[i].blue=0; } } } /* Set some properties for reporting by "identify" */ { char msg[MaxTextExtent]; /* encode ping_width, ping_height, ping_file_depth, ping_color_type, ping_interlace_method in value */ (void) FormatLocaleString(msg,MaxTextExtent, "%d, %d",(int) ping_width, (int) ping_height); (void) SetImageProperty(image,"png:IHDR.width,height",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_file_depth); (void) SetImageProperty(image,"png:IHDR.bit_depth",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d (%s)", (int) ping_color_type, Magick_ColorType_from_PNG_ColorType((int)ping_color_type)); (void) SetImageProperty(image,"png:IHDR.color_type",msg); if (ping_interlace_method == 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Not interlaced)", (int) ping_interlace_method); } else if (ping_interlace_method == 1) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Adam7 method)", (int) ping_interlace_method); } else { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Unknown method)", (int) ping_interlace_method); } (void) SetImageProperty(image,"png:IHDR.interlace_method",msg); if (number_colors != 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d", (int) number_colors); (void) SetImageProperty(image,"png:PLTE.number_colors",msg); } } #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,ping_info); #endif /* Read image scanlines. */ if (image->delay != 0) mng_info->scenes_found++; if ((mng_info->mng_type == 0 && (image->ping != MagickFalse)) || ( (image_info->number_scenes != 0) && (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)))) { /* This happens later in non-ping decodes */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) image->storage_class=DirectClass; image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping PNG image data for scene %.20g",(double) mng_info->scenes_found-1); png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()."); return(image); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG IDAT chunk(s)"); if (num_passes > 1) pixel_info=AcquireVirtualMemory(image->rows,ping_rowbytes* sizeof(*ping_pixels)); else pixel_info=AcquireVirtualMemory(ping_rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Memory allocation failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting PNG pixels to pixel packets"); /* Convert PNG pixels to pixel packets. */ { MagickBooleanType found_transparent_pixel; found_transparent_pixel=MagickFalse; if (image->storage_class == DirectClass) { quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Failed to allocate quantum_info"); (void) SetQuantumEndian(image,quantum_info,MSBEndian); for (pass=0; pass < num_passes; pass++) { /* Convert image to DirectClass pixel packets. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; for (y=0; y < (ssize_t) image->rows; y++) { if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; else { if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayAlphaQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBAQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, IndexQuantum,ping_pixels+row_offset,exception); else /* ping_color_type == PNG_COLOR_TYPE_RGB */ (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBQuantum,ping_pixels+row_offset,exception); } if (found_transparent_pixel == MagickFalse) { /* Is there a transparent pixel in the row? */ if (y== 0 && logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Looking for cheap transparent pixel"); for (x=(ssize_t) image->columns-1; x >= 0; x--) { if ((ping_color_type == PNG_COLOR_TYPE_RGBA || ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) && (GetPixelOpacity(q) != OpaqueOpacity)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } if ((ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_GRAY) && (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } q++; } } if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag, (MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (y < (long) image->rows) break; if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } } quantum_info=DestroyQuantumInfo(quantum_info); } else /* image->storage_class != DirectClass */ for (pass=0; pass < num_passes; pass++) { register Quantum *r; /* Convert grayscale image to PseudoClass pixel packets. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting grayscale pixels to pixel packets"); image->matte=ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA ? MagickTrue : MagickFalse; quantum_scanline=(Quantum *) AcquireQuantumMemory(image->columns, (image->matte ? 2 : 1)*sizeof(*quantum_scanline)); if (quantum_scanline == (Quantum *) NULL) png_error(ping,"Memory allocation failed"); for (y=0; y < (ssize_t) image->rows; y++) { Quantum alpha; if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); p=ping_pixels+row_offset; r=quantum_scanline; switch (ping_bit_depth) { case 8: { if (ping_color_type == 4) for (x=(ssize_t) image->columns-1; x >= 0; x--) { *r++=*p++; /* In image.h, OpaqueOpacity is 0 * TransparentOpacity is QuantumRange * In a PNG datastream, Opaque is QuantumRange * and Transparent is 0. */ alpha=ScaleCharToQuantum((unsigned char)*p++); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } else for (x=(ssize_t) image->columns-1; x >= 0; x--) *r++=*p++; break; } case 16: { for (x=(ssize_t) image->columns-1; x >= 0; x--) { #if (MAGICKCORE_QUANTUM_DEPTH >= 16) unsigned long quantum; if (image->colors > 256) quantum=(((unsigned int) *p++) << 8); else quantum=0; quantum|=(*p++); *r=ScaleShortToQuantum(quantum); r++; if (ping_color_type == 4) { if (image->colors > 256) quantum=(((unsigned int) *p++) << 8); else quantum=0; quantum|=(*p++); alpha=ScaleShortToQuantum(quantum); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } #else /* MAGICKCORE_QUANTUM_DEPTH == 8 */ *r++=(*p++); p++; /* strip low byte */ if (ping_color_type == 4) { alpha=*p++; SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; p++; q++; } #endif } break; } default: break; } /* Transfer image scanline. */ r=quantum_scanline; for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*r++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline); if (y < (long) image->rows) break; if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } } image->matte=found_transparent_pixel; if (logging != MagickFalse) { if (found_transparent_pixel != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found transparent pixel"); else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No transparent pixel was found"); ping_color_type&=0x03; } } } if (image->storage_class == PseudoClass) { MagickBooleanType matte; matte=image->matte; image->matte=MagickFalse; (void) SyncImage(image); image->matte=matte; } png_read_end(ping,end_info); if (image_info->number_scenes != 0 && mng_info->scenes_found-1 < (ssize_t) image_info->first_scene && image->delay != 0) { png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); image->colors=2; (void) SetImageBackgroundColor(image); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() early."); return(image); } if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { ClassType storage_class; /* Image has a transparent background. */ storage_class=image->storage_class; image->matte=MagickTrue; /* Balfour fix from imagemagick discourse server, 5 Feb 2010 */ if (storage_class == PseudoClass) { if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { for (x=0; x < ping_num_trans; x++) { image->colormap[x].opacity = ScaleCharToQuantum((unsigned char)(255-ping_trans_alpha[x])); } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY) { for (x=0; x < (int) image->colors; x++) { if (ScaleQuantumToShort(image->colormap[x].red) == transparent_color.opacity) { image->colormap[x].opacity = (Quantum) TransparentOpacity; } } } (void) SyncImage(image); } #if 1 /* Should have already been done above, but glennrp problem P10 * needs this. */ else { for (y=0; y < (ssize_t) image->rows; y++) { image->storage_class=storage_class; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); /* Caution: on a Q8 build, this does not distinguish between * 16-bit colors that differ only in the low byte */ for (x=(ssize_t) image->columns-1; x >= 0; x--) { if (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue) { SetPixelOpacity(q,TransparentOpacity); } else { SetPixelOpacity(q,OpaqueOpacity); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif image->storage_class=DirectClass; } for (j = 0; j < 2; j++) { if (j == 0) status = png_get_text(ping,ping_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; else status = png_get_text(ping,end_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; if (status != MagickFalse) for (i=0; i < (ssize_t) num_text; i++) { /* Check for a profile */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG text chunk"); if (strlen(text[i].key) > 16 && memcmp(text[i].key, "Raw profile type ",17) == 0) { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember(text[i].key+17,value) == MagickFalse) { (void) Magick_png_read_raw_profile(ping,image,image_info,text, (int) i); num_raw_profiles++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Read raw profile %s",text[i].key+17); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping raw profile %s",text[i].key+17); } } else { char *value; length=text[i].text_length; value=(char *) AcquireQuantumMemory(length+MaxTextExtent, sizeof(*value)); if (value == (char *) NULL) png_error(ping,"Memory allocation failed"); *value='\0'; (void) ConcatenateMagickString(value,text[i].text,length+2); /* Don't save "density" or "units" property if we have a pHYs * chunk */ if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs) || (LocaleCompare(text[i].key,"density") != 0 && LocaleCompare(text[i].key,"units") != 0)) (void) SetImageProperty(image,text[i].key,value); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu\n" " Keyword: %s", (unsigned long) length, text[i].key); } value=DestroyString(value); } } num_text_total += num_text; } #ifdef MNG_OBJECT_BUFFERS /* Store the object if necessary. */ if (object_id && !mng_info->frozen[object_id]) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) { /* create a new object buffer. */ mng_info->ob[object_id]=(MngBuffer *) AcquireMagickMemory(sizeof(MngBuffer)); if (mng_info->ob[object_id] != (MngBuffer *) NULL) { mng_info->ob[object_id]->image=(Image *) NULL; mng_info->ob[object_id]->reference_count=1; } } if ((mng_info->ob[object_id] == (MngBuffer *) NULL) || mng_info->ob[object_id]->frozen) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) png_error(ping,"Memory allocation failed"); if (mng_info->ob[object_id]->frozen) png_error(ping,"Cannot overwrite frozen MNG object buffer"); } else { if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image=DestroyImage (mng_info->ob[object_id]->image); mng_info->ob[object_id]->image=CloneImage(image,0,0,MagickTrue, &image->exception); if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image->file=(FILE *) NULL; else png_error(ping, "Cloning image for object buffer failed"); if (ping_width > 250000L || ping_height > 250000L) png_error(ping,"PNG Image dimensions are too large."); mng_info->ob[object_id]->width=ping_width; mng_info->ob[object_id]->height=ping_height; mng_info->ob[object_id]->color_type=ping_color_type; mng_info->ob[object_id]->sample_depth=ping_bit_depth; mng_info->ob[object_id]->interlace_method=ping_interlace_method; mng_info->ob[object_id]->compression_method= ping_compression_method; mng_info->ob[object_id]->filter_method=ping_filter_method; if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp plte; /* Copy the PLTE to the object buffer. */ png_get_PLTE(ping,ping_info,&plte,&number_colors); mng_info->ob[object_id]->plte_length=number_colors; for (i=0; i < number_colors; i++) { mng_info->ob[object_id]->plte[i]=plte[i]; } } else mng_info->ob[object_id]->plte_length=0; } } #endif /* Set image->matte to MagickTrue if the input colortype supports * alpha or if a valid tRNS chunk is present, no matter whether there * is actual transparency present. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; #if 0 /* I'm not sure what's wrong here but it does not work. */ if (image->matte != MagickFalse) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) SetImageType(image,GrayscaleMatteType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteMatteType); else (void) SetImageType(image,TrueColorMatteType); } else { if (ping_color_type == PNG_COLOR_TYPE_GRAY) (void) SetImageType(image,GrayscaleType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteType); else (void) SetImageType(image,TrueColorType); } #endif /* Set more properties for identify to retrieve */ { char msg[MaxTextExtent]; if (num_text_total != 0) { /* libpng doesn't tell us whether they were tEXt, zTXt, or iTXt */ (void) FormatLocaleString(msg,MaxTextExtent, "%d tEXt/zTXt/iTXt chunks were found", num_text_total); (void) SetImageProperty(image,"png:text",msg); } if (num_raw_profiles != 0) { (void) FormatLocaleString(msg,MaxTextExtent, "%d were found", num_raw_profiles); (void) SetImageProperty(image,"png:text-encoded profiles",msg); } /* cHRM chunk: */ if (ping_found_cHRM != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Chromaticity, above)"); (void) SetImageProperty(image,"png:cHRM",msg); } /* bKGD chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Background color, above)"); (void) SetImageProperty(image,"png:bKGD",msg); } (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found"); /* iCCP chunk: */ if (ping_found_iCCP != MagickFalse) (void) SetImageProperty(image,"png:iCCP",msg); if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) (void) SetImageProperty(image,"png:tRNS",msg); #if defined(PNG_sRGB_SUPPORTED) /* sRGB chunk: */ if (ping_found_sRGB != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "intent=%d (%s)", (int) intent, Magick_RenderingIntentString_from_PNG_RenderingIntent(intent)); (void) SetImageProperty(image,"png:sRGB",msg); } #endif /* gAMA chunk: */ if (ping_found_gAMA != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "gamma=%.8g (See Gamma, above)", file_gamma); (void) SetImageProperty(image,"png:gAMA",msg); } #if defined(PNG_pHYs_SUPPORTED) /* pHYs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { (void) FormatLocaleString(msg,MaxTextExtent, "x_res=%.10g, y_res=%.10g, units=%d", (double) x_resolution,(double) y_resolution, unit_type); (void) SetImageProperty(image,"png:pHYs",msg); } #endif #if defined(PNG_oFFs_SUPPORTED) /* oFFs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { (void) FormatLocaleString(msg,MaxTextExtent,"x_off=%.20g, y_off=%.20g", (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:oFFs",msg); } #endif #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,end_info); #endif /* caNv chunk: */ if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || (image->page.x != 0 || image->page.y != 0)) { (void) FormatLocaleString(msg,MaxTextExtent, "width=%.20g, height=%.20g, x_offset=%.20g, y_offset=%.20g", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:caNv",msg); } } /* Relinquish resources. */ png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block, revert to * Throwing an Exception when an error occurs. */ return(image); /* end of reading one PNG image */ } static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; ssize_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadPNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) ThrowReaderException(FileOpenError,"UnableToOpenFile"); /* Verify PNG signature. */ count=ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\211PNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify that file size large enough to contain a PNG datastream. */ if (GetBlobSize(image) < 61) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOnePNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if ((image->columns == 0) || (image->rows == 0)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error."); ThrowReaderException(CorruptImageError,"CorruptImage"); } if ((IssRGBColorspace(image->colorspace) != MagickFalse) && ((image->gamma < .45) || (image->gamma > .46)) && !(image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "SetImageColorspace to RGBColorspace"); SetImageColorspace(image,RGBColorspace); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " page.w: %.20g, page.h: %.20g,page.x: %.20g, page.y: %.20g.", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadPNGImage()"); return(image); } #if defined(JNG_SUPPORTED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOneJNGImage() reads a JPEG Network Graphics (JNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadOneJNGImage method is: % % Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static void DestroyJNG(unsigned char *chunk,Image **color_image, ImageInfo **color_image_info,Image **alpha_image,ImageInfo **alpha_image_info) { (void) RelinquishMagickMemory(chunk); if (color_image_info && *color_image_info) { DestroyImageInfo(*color_image_info); *color_image_info = (ImageInfo *)NULL; } if (alpha_image_info && *alpha_image_info) { DestroyImageInfo(*alpha_image_info); *alpha_image_info = (ImageInfo *)NULL; } if (color_image && *color_image) { DestroyImage(*color_image); *color_image = (Image *)NULL; } if (alpha_image && *alpha_image) { DestroyImage(*alpha_image); *alpha_image = (Image *)NULL; } } static Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { Image *alpha_image, *color_image, *image, *jng_image; ImageInfo *alpha_image_info, *color_image_info; MagickBooleanType logging; int unique_filenames; ssize_t y; MagickBooleanType status; png_uint_32 jng_height, jng_width; png_byte jng_color_type, jng_image_sample_depth, jng_image_compression_method, jng_image_interlace_method, jng_alpha_sample_depth, jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method; register const PixelPacket *s; register ssize_t i, x; register PixelPacket *q; register unsigned char *p; unsigned int read_JSEP, reading_idat; size_t length; jng_alpha_compression_method=0; jng_alpha_sample_depth=8; jng_color_type=0; jng_height=0; jng_width=0; alpha_image=(Image *) NULL; color_image=(Image *) NULL; alpha_image_info=(ImageInfo *) NULL; color_image_info=(ImageInfo *) NULL; unique_filenames=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneJNGImage()"); image=mng_info->image; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireNextImage()"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; /* Signature bytes have already been read. */ read_JSEP=MagickFalse; reading_idat=MagickFalse; for (;;) { char type[MaxTextExtent]; unsigned char *chunk; unsigned int count; /* Read a new JNG chunk. */ status=SetImageProgress(image,LoadImagesTag,TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) break; type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=(size_t) ReadBlobMSBLong(image); count=(unsigned int) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading JNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX || count == 0) { DestroyJNG(NULL,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError,"CorruptImage"); } p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { if (length > GetBlobSize(image)) { DestroyJNG(NULL,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) { int c; c=ReadBlobByte(image); if (c == EOF) break; chunk[i]=(unsigned char) c; } for ( ; i < (ssize_t) length; i++) chunk[i]='\0'; p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ if (memcmp(type,mng_JHDR,4) == 0) { if (length == 16) { jng_width=(png_uint_32)mng_get_long(p); jng_height=(png_uint_32)mng_get_long(&p[4]); if ((jng_width == 0) || (jng_height == 0)) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); } jng_color_type=p[8]; jng_image_sample_depth=p[9]; jng_image_compression_method=p[10]; jng_image_interlace_method=p[11]; image->interlace=jng_image_interlace_method != 0 ? PNGInterlace : NoInterlace; jng_alpha_sample_depth=p[12]; jng_alpha_compression_method=p[13]; jng_alpha_filter_method=p[14]; jng_alpha_interlace_method=p[15]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_width: %16lu, jng_height: %16lu\n" " jng_color_type: %16d, jng_image_sample_depth: %3d\n" " jng_image_compression_method:%3d", (unsigned long) jng_width, (unsigned long) jng_height, jng_color_type, jng_image_sample_depth, jng_image_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_image_interlace_method: %3d" " jng_alpha_sample_depth: %3d", jng_image_interlace_method, jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_alpha_compression_method:%3d\n" " jng_alpha_filter_method: %3d\n" " jng_alpha_interlace_method: %3d", jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method); } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (jng_width > 65535 || jng_height > 65535 || (long) jng_width > GetMagickResourceLimit(WidthResource) || (long) jng_height > GetMagickResourceLimit(HeightResource)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG width or height too large: (%lu x %lu)", (long) jng_width, (long) jng_height); DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } continue; } if ((reading_idat == MagickFalse) && (read_JSEP == MagickFalse) && ((memcmp(type,mng_JDAT,4) == 0) || (memcmp(type,mng_JdAA,4) == 0) || (memcmp(type,mng_IDAT,4) == 0) || (memcmp(type,mng_JDAA,4) == 0))) { /* o create color_image o open color_blob, attached to color_image o if (color type has alpha) open alpha_blob, attached to alpha_image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating color_blob."); color_image_info=(ImageInfo *)AcquireMagickMemory(sizeof(ImageInfo)); if (color_image_info == (ImageInfo *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } GetImageInfo(color_image_info); color_image=AcquireImage(color_image_info); if (color_image == (Image *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } (void) AcquireUniqueFilename(color_image->filename); unique_filenames++; status=OpenBlob(color_image_info,color_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); return(DestroyImageList(image)); } if ((image_info->ping == MagickFalse) && (jng_color_type >= 12)) { alpha_image_info=(ImageInfo *) AcquireMagickMemory(sizeof(ImageInfo)); if (alpha_image_info == (ImageInfo *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } GetImageInfo(alpha_image_info); alpha_image=AcquireImage(alpha_image_info); if (alpha_image == (Image *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating alpha_blob."); (void) AcquireUniqueFilename(alpha_image->filename); unique_filenames++; status=OpenBlob(alpha_image_info,alpha_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); color_image=DestroyImage(color_image); return(DestroyImageList(image)); } if (jng_alpha_compression_method == 0) { unsigned char data[18]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing IHDR chunk to alpha_blob."); (void) WriteBlob(alpha_image,8,(const unsigned char *) "\211PNG\r\n\032\n"); (void) WriteBlobMSBULong(alpha_image,13L); PNGType(data,mng_IHDR); LogPNGChunk(logging,mng_IHDR,13L); PNGLong(data+4,jng_width); PNGLong(data+8,jng_height); data[12]=jng_alpha_sample_depth; data[13]=0; /* color_type gray */ data[14]=0; /* compression method 0 */ data[15]=0; /* filter_method 0 */ data[16]=0; /* interlace_method 0 */ (void) WriteBlob(alpha_image,17,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,17)); } } reading_idat=MagickTrue; } if (memcmp(type,mng_JDAT,4) == 0) { /* Copy chunk to color_image->blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAT chunk data to color_blob."); if (length != 0) { (void) WriteBlob(color_image,length,chunk); chunk=(unsigned char *) RelinquishMagickMemory(chunk); } continue; } if (memcmp(type,mng_IDAT,4) == 0) { png_byte data[5]; /* Copy IDAT header and chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying IDAT chunk data to alpha_blob."); (void) WriteBlobMSBULong(alpha_image,(size_t) length); PNGType(data,mng_IDAT); LogPNGChunk(logging,mng_IDAT,length); (void) WriteBlob(alpha_image,4,data); (void) WriteBlob(alpha_image,length,chunk); (void) WriteBlobMSBULong(alpha_image, crc32(crc32(0,data,4),chunk,(uInt) length)); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_JDAA,4) == 0) || (memcmp(type,mng_JdAA,4) == 0)) { /* Copy chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAA chunk data to alpha_blob."); (void) WriteBlob(alpha_image,length,chunk); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_JSEP,4) == 0) { read_JSEP=MagickTrue; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { if (length == 2) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=image->background_color.red; image->background_color.blue=image->background_color.red; } if (length == 6) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=ScaleCharToQuantum(p[3]); image->background_color.blue=ScaleCharToQuantum(p[5]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) image->gamma=((float) mng_get_long(p))*0.00001; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { if (length == 32) { image->chromaticity.white_point.x=0.00001*mng_get_long(p); image->chromaticity.white_point.y=0.00001*mng_get_long(&p[4]); image->chromaticity.red_primary.x=0.00001*mng_get_long(&p[8]); image->chromaticity.red_primary.y=0.00001*mng_get_long(&p[12]); image->chromaticity.green_primary.x=0.00001*mng_get_long(&p[16]); image->chromaticity.green_primary.y=0.00001*mng_get_long(&p[20]); image->chromaticity.blue_primary.x=0.00001*mng_get_long(&p[24]); image->chromaticity.blue_primary.y=0.00001*mng_get_long(&p[28]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { if (length == 1) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_oFFs,4) == 0) { if (length > 8) { image->page.x=(ssize_t) mng_get_long(p); image->page.y=(ssize_t) mng_get_long(&p[4]); if ((int) p[8] != 0) { image->page.x/=10000; image->page.y/=10000; } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { image->x_resolution=(double) mng_get_long(p); image->y_resolution=(double) mng_get_long(&p[4]); if ((int) p[8] == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=image->x_resolution/100.0f; image->y_resolution=image->y_resolution/100.0f; } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if 0 if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (memcmp(type,mng_IEND,4)) continue; break; } /* IEND found */ /* Finish up reading image data: o read main image from color_blob. o close color_blob. o if (color_type has alpha) if alpha_encoding is PNG read secondary image from alpha_blob via ReadPNG if alpha_encoding is JPEG read secondary image from alpha_blob via ReadJPEG o close alpha_blob. o copy intensity of secondary image into opacity samples of main image. o destroy the secondary image. */ if (color_image_info == (ImageInfo *) NULL) { assert(color_image == (Image *) NULL); assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } if (color_image == (Image *) NULL) { assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } (void) SeekBlob(color_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading jng_image from color_blob."); assert(color_image_info != (ImageInfo *) NULL); (void) FormatLocaleString(color_image_info->filename,MaxTextExtent,"%s", color_image->filename); color_image_info->ping=MagickFalse; /* To do: avoid this */ jng_image=ReadImage(color_image_info,exception); (void) RelinquishUniqueFileResource(color_image->filename); unique_filenames--; color_image=DestroyImage(color_image); color_image_info=DestroyImageInfo(color_image_info); if (jng_image == (Image *) NULL) { DestroyJNG(NULL,NULL,NULL,&alpha_image,&alpha_image_info); return(DestroyImageList(image)); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying jng_image pixels to main image."); image->columns=jng_width; image->rows=jng_height; length=image->columns*sizeof(PixelPacket); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jng_image=DestroyImageList(jng_image); DestroyJNG(NULL,&color_image,&color_image_info,&alpha_image, &alpha_image_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((image->columns != jng_image->columns) || (image->rows != jng_image->rows)) { jng_image=DestroyImageList(jng_image); DestroyJNG(NULL,&color_image,&color_image_info,&alpha_image, &alpha_image_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1,&image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if ((s == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; (void) memcpy(q,s,length); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } jng_image=DestroyImage(jng_image); if ((image_info->ping == MagickFalse) && (jng_color_type >= 12)) { if (jng_alpha_compression_method == 0) { png_byte data[5]; (void) WriteBlobMSBULong(alpha_image,0x00000000L); PNGType(data,mng_IEND); LogPNGChunk(logging,mng_IEND,0L); (void) WriteBlob(alpha_image,4,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,4)); } (void) SeekBlob(alpha_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading opacity from alpha_blob."); (void) FormatLocaleString(alpha_image_info->filename,MaxTextExtent, "%s",alpha_image->filename); jng_image=ReadImage(alpha_image_info,exception); if (jng_image != (Image *) NULL) for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1,&image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if ((s == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; if (image->matte != MagickFalse) for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) SetPixelOpacity(q,QuantumRange-GetPixelRed(s)); else for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) { SetPixelAlpha(q,GetPixelRed(s)); if (GetPixelOpacity(q) != OpaqueOpacity) image->matte=MagickTrue; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } (void) RelinquishUniqueFileResource(alpha_image->filename); unique_filenames--; alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); if (jng_image != (Image *) NULL) jng_image=DestroyImage(jng_image); } /* Read the JNG image. */ if (mng_info->mng_type == 0) { mng_info->mng_width=jng_width; mng_info->mng_height=jng_height; } if (image->page.width == 0 && image->page.height == 0) { image->page.width=jng_width; image->page.height=jng_height; } if (image->page.x == 0 && image->page.y == 0) { image->page.x=mng_info->x_off[mng_info->object_id]; image->page.y=mng_info->y_off[mng_info->object_id]; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } mng_info->image_found++; status=SetImageProgress(image,LoadImagesTag,2*TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) return(DestroyImageList(image)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage(); unique_filenames=%d",unique_filenames); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJNGImage() reads a JPEG Network Graphics (JNG) image file % (including the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadJNGImage method is: % % Image *ReadJNGImage(const ImageInfo *image_info, ExceptionInfo % *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadJNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; size_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadJNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); if (LocaleCompare(image_info->magick,"JNG") != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify JNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\213JNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify that file size large enough to contain a JNG datastream. */ if (GetBlobSize(image) < 147) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(*mng_info)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneJNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (image->columns == 0 || image->rows == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); ThrowReaderException(CorruptImageError,"CorruptImage"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadJNGImage()"); return(image); } #endif static Image *ReadOneMNGImage(MngInfo* mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { char page_geometry[MaxTextExtent]; Image *image; MagickBooleanType logging; volatile int first_mng_object, object_id, term_chunk_found, skip_to_iend; volatile ssize_t image_count=0; MagickBooleanType status; MagickOffsetType offset; MngBox default_fb, fb, previous_fb; #if defined(MNG_INSERT_LAYERS) PixelPacket mng_background_color; #endif register unsigned char *p; register ssize_t i; size_t count; ssize_t loop_level; volatile short skipping_loop; #if defined(MNG_INSERT_LAYERS) unsigned int mandatory_back=0; #endif volatile unsigned int #ifdef MNG_OBJECT_BUFFERS mng_background_object=0, #endif mng_type=0; /* 0: PNG or JNG; 1: MNG; 2: MNG-LC; 3: MNG-VLC */ size_t default_frame_timeout, frame_timeout, #if defined(MNG_INSERT_LAYERS) image_height, image_width, #endif length; /* These delays are all measured in image ticks_per_second, * not in MNG ticks_per_second */ volatile size_t default_frame_delay, final_delay, final_image_delay, frame_delay, #if defined(MNG_INSERT_LAYERS) insert_layers, #endif mng_iterations=1, simplicity=0, subframe_height=0, subframe_width=0; previous_fb.top=0; previous_fb.bottom=0; previous_fb.left=0; previous_fb.right=0; default_fb.top=0; default_fb.bottom=0; default_fb.left=0; default_fb.right=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneMNGImage()"); image=mng_info->image; if (LocaleCompare(image_info->magick,"MNG") == 0) { char magic_number[MaxTextExtent]; /* Verify MNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (memcmp(magic_number,"\212MNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize some nonzero members of the MngInfo structure. */ for (i=0; i < MNG_MAX_OBJECTS; i++) { mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } mng_info->exists[0]=MagickTrue; } skipping_loop=(-1); first_mng_object=MagickTrue; mng_type=0; #if defined(MNG_INSERT_LAYERS) insert_layers=MagickFalse; /* should be False when converting or mogrifying */ #endif default_frame_delay=0; default_frame_timeout=0; frame_delay=0; final_delay=1; mng_info->ticks_per_second=1UL*image->ticks_per_second; object_id=0; skip_to_iend=MagickFalse; term_chunk_found=MagickFalse; mng_info->framing_mode=1; #if defined(MNG_INSERT_LAYERS) mandatory_back=MagickFalse; #endif #if defined(MNG_INSERT_LAYERS) mng_background_color=image->background_color; #endif default_fb=mng_info->frame; previous_fb=mng_info->frame; do { char type[MaxTextExtent]; if (LocaleCompare(image_info->magick,"MNG") == 0) { unsigned char *chunk; /* Read a new chunk. */ type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=(size_t) ReadBlobMSBLong(image); count=(size_t) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading MNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX) { status=MagickFalse; break; } if (count == 0) ThrowReaderException(CorruptImageError,"CorruptImage"); p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) { int c; c=ReadBlobByte(image); if (c == EOF) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } chunk[i]=(unsigned char) c; } p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ #if !defined(JNG_SUPPORTED) if (memcmp(type,mng_JHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->jhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"JNGCompressNotSupported","`%s'",image->filename); mng_info->jhdr_warning++; } #endif if (memcmp(type,mng_DHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->dhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DeltaPNGNotSupported","`%s'",image->filename); mng_info->dhdr_warning++; } if (memcmp(type,mng_MEND,4) == 0) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); break; } if (skip_to_iend) { if (memcmp(type,mng_IEND,4) == 0) skip_to_iend=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skip to IEND."); continue; } if (memcmp(type,mng_MHDR,4) == 0) { if (length != 28) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"CorruptImage"); } mng_info->mng_width=(unsigned long)mng_get_long(p); mng_info->mng_height=(unsigned long)mng_get_long(&p[4]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG width: %.20g",(double) mng_info->mng_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG height: %.20g",(double) mng_info->mng_height); } p+=8; mng_info->ticks_per_second=(size_t) mng_get_long(p); if (mng_info->ticks_per_second == 0) default_frame_delay=0; else default_frame_delay=1UL*image->ticks_per_second/ mng_info->ticks_per_second; frame_delay=default_frame_delay; simplicity=0; /* Skip nominal layer count, frame count, and play time */ p+=16; simplicity=(size_t) mng_get_long(p); mng_type=1; /* Full MNG */ if ((simplicity != 0) && ((simplicity | 11) == 11)) mng_type=2; /* LC */ if ((simplicity != 0) && ((simplicity | 9) == 9)) mng_type=3; /* VLC */ #if defined(MNG_INSERT_LAYERS) if (mng_type != 3) insert_layers=MagickTrue; #endif if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); mng_info->image=image; } if ((mng_info->mng_width > 65535L) || (mng_info->mng_height > 65535L)) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit"); } (void) FormatLocaleString(page_geometry,MaxTextExtent, "%.20gx%.20g+0+0",(double) mng_info->mng_width,(double) mng_info->mng_height); mng_info->frame.left=0; mng_info->frame.right=(ssize_t) mng_info->mng_width; mng_info->frame.top=0; mng_info->frame.bottom=(ssize_t) mng_info->mng_height; mng_info->clip=default_fb=previous_fb=mng_info->frame; for (i=0; i < MNG_MAX_OBJECTS; i++) mng_info->object_clip[i]=mng_info->frame; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_TERM,4) == 0) { int repeat=0; if (length != 0) repeat=p[0]; if (repeat == 3 && length > 8) { final_delay=(png_uint_32) mng_get_long(&p[2]); mng_iterations=(png_uint_32) mng_get_long(&p[6]); if (mng_iterations == PNG_UINT_31_MAX) mng_iterations=0; image->iterations=mng_iterations; term_chunk_found=MagickTrue; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " repeat=%d, final_delay=%.20g, iterations=%.20g", repeat,(double) final_delay, (double) image->iterations); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_DEFI,4) == 0) { if (mng_type == 3) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DEFI chunk found in MNG-VLC datastream","`%s'", image->filename); chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (length < 2) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"CorruptImage"); } object_id=((unsigned int) p[0] << 8) | (unsigned int) p[1]; if (mng_type == 2 && object_id != 0) (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError,"Nonzero object_id in MNG-LC datastream", "`%s'", image->filename); if (object_id > MNG_MAX_OBJECTS) { /* Instead of using a warning we should allocate a larger MngInfo structure and continue. */ (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError, "object id too large","`%s'",image->filename); object_id=MNG_MAX_OBJECTS; } if (mng_info->exists[object_id]) if (mng_info->frozen[object_id]) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "DEFI cannot redefine a frozen MNG object","`%s'", image->filename); continue; } mng_info->exists[object_id]=MagickTrue; if (length > 2) mng_info->invisible[object_id]=p[2]; /* Extract object offset info. */ if (length > 11) { mng_info->x_off[object_id]=(ssize_t) mng_get_long(&p[4]); mng_info->y_off[object_id]=(ssize_t) mng_get_long(&p[8]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_off[%d]: %.20g, y_off[%d]: %.20g", object_id,(double) mng_info->x_off[object_id], object_id,(double) mng_info->y_off[object_id]); } } /* Extract object clipping info. */ if (length > 27) mng_info->object_clip[object_id]= mng_read_box(mng_info->frame,0, &p[12]); chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { mng_info->have_global_bkgd=MagickFalse; if (length > 5) { mng_info->mng_global_bkgd.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_info->mng_global_bkgd.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_info->mng_global_bkgd.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_info->have_global_bkgd=MagickTrue; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_BACK,4) == 0) { #if defined(MNG_INSERT_LAYERS) if (length > 6) mandatory_back=p[6]; else mandatory_back=0; if (mandatory_back && length > 5) { mng_background_color.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_background_color.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_background_color.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_background_color.opacity=OpaqueOpacity; } #ifdef MNG_OBJECT_BUFFERS if (length > 8) mng_background_object=(p[7] << 8) | p[8]; #endif #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_PLTE,4) == 0) { /* Read global PLTE. */ if (length && (length < 769)) { if (mng_info->global_plte == (png_colorp) NULL) mng_info->global_plte=(png_colorp) AcquireQuantumMemory(256, sizeof(*mng_info->global_plte)); if (mng_info->global_plte == (png_colorp) NULL) { mng_info->global_plte_length=0; chunk=(unsigned char *) RelinquishMagickMemory(chunk); mng_info=MngInfoFreeStruct(mng_info); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (length/3); i++) { mng_info->global_plte[i].red=p[3*i]; mng_info->global_plte[i].green=p[3*i+1]; mng_info->global_plte[i].blue=p[3*i+2]; } mng_info->global_plte_length=(unsigned int) (length/3); } #ifdef MNG_LOOSE for ( ; i < 256; i++) { mng_info->global_plte[i].red=i; mng_info->global_plte[i].green=i; mng_info->global_plte[i].blue=i; } if (length != 0) mng_info->global_plte_length=256; #endif else mng_info->global_plte_length=0; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_tRNS,4) == 0) { /* read global tRNS */ if (length > 0 && length < 257) for (i=0; i < (ssize_t) length; i++) mng_info->global_trns[i]=p[i]; #ifdef MNG_LOOSE for ( ; i < 256; i++) mng_info->global_trns[i]=255; #endif mng_info->global_trns_length=(unsigned int) length; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) { ssize_t igamma; igamma=mng_get_long(p); mng_info->global_gamma=((float) igamma)*0.00001; mng_info->have_global_gama=MagickTrue; } else mng_info->have_global_gama=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { /* Read global cHRM */ if (length == 32) { mng_info->global_chrm.white_point.x=0.00001*mng_get_long(p); mng_info->global_chrm.white_point.y=0.00001*mng_get_long(&p[4]); mng_info->global_chrm.red_primary.x=0.00001*mng_get_long(&p[8]); mng_info->global_chrm.red_primary.y=0.00001* mng_get_long(&p[12]); mng_info->global_chrm.green_primary.x=0.00001* mng_get_long(&p[16]); mng_info->global_chrm.green_primary.y=0.00001* mng_get_long(&p[20]); mng_info->global_chrm.blue_primary.x=0.00001* mng_get_long(&p[24]); mng_info->global_chrm.blue_primary.y=0.00001* mng_get_long(&p[28]); mng_info->have_global_chrm=MagickTrue; } else mng_info->have_global_chrm=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { /* Read global sRGB. */ if (length != 0) { mng_info->global_srgb_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); mng_info->have_global_srgb=MagickTrue; } else mng_info->have_global_srgb=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ /* Read global iCCP. */ chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_FRAM,4) == 0) { if (mng_type == 3) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"FRAM chunk found in MNG-VLC datastream","`%s'", image->filename); if ((mng_info->framing_mode == 2) || (mng_info->framing_mode == 4)) image->delay=frame_delay; frame_delay=default_frame_delay; frame_timeout=default_frame_timeout; fb=default_fb; if (length > 0) if (p[0]) mng_info->framing_mode=p[0]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_mode=%d",mng_info->framing_mode); if (length > 6) { /* Note the delay and frame clipping boundaries. */ p++; /* framing mode */ while (((p-chunk) < (long) length) && *p) p++; /* frame name */ p++; /* frame name terminator */ if ((p-chunk) < (ssize_t) (length-4)) { int change_delay, change_timeout, change_clipping; change_delay=(*p++); change_timeout=(*p++); change_clipping=(*p++); p++; /* change_sync */ if (change_delay && ((p-chunk) < (ssize_t) (length-4))) { frame_delay=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_delay/=mng_info->ticks_per_second; else frame_delay=PNG_UINT_31_MAX; if (change_delay == 2) default_frame_delay=frame_delay; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_delay=%.20g",(double) frame_delay); } if (change_timeout && ((p-chunk) < (ssize_t) (length-4))) { frame_timeout=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_timeout/=mng_info->ticks_per_second; else frame_timeout=PNG_UINT_31_MAX; if (change_timeout == 2) default_frame_timeout=frame_timeout; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_timeout=%.20g",(double) frame_timeout); } if (change_clipping && ((p-chunk) < (ssize_t) (length-16))) { fb=mng_read_box(previous_fb,(char) p[0],&p[1]); p+=16; previous_fb=fb; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Frame_clip: L=%.20g R=%.20g T=%.20g B=%.20g", (double) fb.left,(double) fb.right,(double) fb.top, (double) fb.bottom); if (change_clipping == 2) default_fb=fb; } } } mng_info->clip=fb; mng_info->clip=mng_minimum_box(fb,mng_info->frame); subframe_width=(size_t) (mng_info->clip.right -mng_info->clip.left); subframe_height=(size_t) (mng_info->clip.bottom -mng_info->clip.top); /* Insert a background layer behind the frame if framing_mode is 4. */ #if defined(MNG_INSERT_LAYERS) if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " subframe_width=%.20g, subframe_height=%.20g",(double) subframe_width,(double) subframe_height); if (insert_layers && (mng_info->framing_mode == 4) && (subframe_width) && (subframe_height)) { /* Allocate next image structure. */ if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; image->delay=0; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert backgd layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLIP,4) == 0) { unsigned int first_object, last_object; /* Read CLIP. */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(int) first_object; i <= (int) last_object; i++) { if ((i < 0) || (i >= MNG_MAX_OBJECTS)) continue; if (mng_info->exists[i] && !mng_info->frozen[i]) { MngBox box; box=mng_info->object_clip[i]; if ((p-chunk) < (ssize_t) (length-17)) mng_info->object_clip[i]= mng_read_box(box,(char) p[0],&p[1]); } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_SAVE,4) == 0) { for (i=1; i < MNG_MAX_OBJECTS; i++) if (mng_info->exists[i]) { mng_info->frozen[i]=MagickTrue; #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) mng_info->ob[i]->frozen=MagickTrue; #endif } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_DISC,4) == 0) || (memcmp(type,mng_SEEK,4) == 0)) { /* Read DISC or SEEK. */ if ((length == 0) || !memcmp(type,mng_SEEK,4)) { for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); } else { register ssize_t j; for (j=1; j < (ssize_t) length; j+=2) { i=p[j-1] << 8 | p[j]; MngInfoDiscardObject(mng_info,i); } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_MOVE,4) == 0) { size_t first_object, last_object; /* read MOVE */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(ssize_t) first_object; i <= (ssize_t) last_object; i++) { if ((i < 0) || (i >= MNG_MAX_OBJECTS)) continue; if (mng_info->exists[i] && !mng_info->frozen[i] && (p-chunk) < (ssize_t) (length-8)) { MngPair new_pair; MngPair old_pair; old_pair.a=mng_info->x_off[i]; old_pair.b=mng_info->y_off[i]; new_pair=mng_read_pair(old_pair,(int) p[0],&p[1]); mng_info->x_off[i]=new_pair.a; mng_info->y_off[i]=new_pair.b; } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_LOOP,4) == 0) { ssize_t loop_iters=1; if (length > 4) { loop_level=chunk[0]; mng_info->loop_active[loop_level]=1; /* mark loop active */ /* Record starting point. */ loop_iters=mng_get_long(&chunk[1]); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " LOOP level %.20g has %.20g iterations ", (double) loop_level, (double) loop_iters); if (loop_iters <= 0) skipping_loop=loop_level; else { if (loop_iters > GetMagickResourceLimit(ListLengthResource)) loop_iters=GetMagickResourceLimit(ListLengthResource); if (loop_iters >= 2147483647L) loop_iters=2147483647L; mng_info->loop_jump[loop_level]=TellBlob(image); mng_info->loop_count[loop_level]=loop_iters; } mng_info->loop_iteration[loop_level]=0; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_ENDL,4) == 0) { if (length > 0) { loop_level=chunk[0]; if (skipping_loop > 0) { if (skipping_loop == loop_level) { /* Found end of zero-iteration loop. */ skipping_loop=(-1); mng_info->loop_active[loop_level]=0; } } else { if (mng_info->loop_active[loop_level] == 1) { mng_info->loop_count[loop_level]--; mng_info->loop_iteration[loop_level]++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ENDL: LOOP level %.20g has %.20g remaining iters ", (double) loop_level,(double) mng_info->loop_count[loop_level]); if (mng_info->loop_count[loop_level] != 0) { offset=SeekBlob(image, mng_info->loop_jump[loop_level], SEEK_SET); if (offset < 0) { chunk=(unsigned char *) RelinquishMagickMemory( chunk); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } } else { short last_level; /* Finished loop. */ mng_info->loop_active[loop_level]=0; last_level=(-1); for (i=0; i < loop_level; i++) if (mng_info->loop_active[i] == 1) last_level=(short) i; loop_level=last_level; } } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLON,4) == 0) { if (mng_info->clon_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CLON is not implemented yet","`%s'", image->filename); mng_info->clon_warning++; } if (memcmp(type,mng_MAGN,4) == 0) { png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; if (length > 1) magn_first=(p[0] << 8) | p[1]; else magn_first=0; if (length > 3) magn_last=(p[2] << 8) | p[3]; else magn_last=magn_first; #ifndef MNG_OBJECT_BUFFERS if (magn_first || magn_last) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "MAGN is not implemented yet for nonzero objects", "`%s'",image->filename); mng_info->magn_warning++; } #endif if (length > 4) magn_methx=p[4]; else magn_methx=0; if (length > 6) magn_mx=(p[5] << 8) | p[6]; else magn_mx=1; if (magn_mx == 0) magn_mx=1; if (length > 8) magn_my=(p[7] << 8) | p[8]; else magn_my=magn_mx; if (magn_my == 0) magn_my=1; if (length > 10) magn_ml=(p[9] << 8) | p[10]; else magn_ml=magn_mx; if (magn_ml == 0) magn_ml=1; if (length > 12) magn_mr=(p[11] << 8) | p[12]; else magn_mr=magn_mx; if (magn_mr == 0) magn_mr=1; if (length > 14) magn_mt=(p[13] << 8) | p[14]; else magn_mt=magn_my; if (magn_mt == 0) magn_mt=1; if (length > 16) magn_mb=(p[15] << 8) | p[16]; else magn_mb=magn_my; if (magn_mb == 0) magn_mb=1; if (length > 17) magn_methy=p[17]; else magn_methy=magn_methx; if (magn_methx > 5 || magn_methy > 5) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Unknown MAGN method in MNG datastream","`%s'", image->filename); mng_info->magn_warning++; } #ifdef MNG_OBJECT_BUFFERS /* Magnify existing objects in the range magn_first to magn_last */ #endif if (magn_first == 0 || magn_last == 0) { /* Save the magnification factors for object 0 */ mng_info->magn_mb=magn_mb; mng_info->magn_ml=magn_ml; mng_info->magn_mr=magn_mr; mng_info->magn_mt=magn_mt; mng_info->magn_mx=magn_mx; mng_info->magn_my=magn_my; mng_info->magn_methx=magn_methx; mng_info->magn_methy=magn_methy; } } if (memcmp(type,mng_PAST,4) == 0) { if (mng_info->past_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"PAST is not implemented yet","`%s'", image->filename); mng_info->past_warning++; } if (memcmp(type,mng_SHOW,4) == 0) { if (mng_info->show_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"SHOW is not implemented yet","`%s'", image->filename); mng_info->show_warning++; } if (memcmp(type,mng_sBIT,4) == 0) { if (length < 4) mng_info->have_global_sbit=MagickFalse; else { mng_info->global_sbit.gray=p[0]; mng_info->global_sbit.red=p[0]; mng_info->global_sbit.green=p[1]; mng_info->global_sbit.blue=p[2]; mng_info->global_sbit.alpha=p[3]; mng_info->have_global_sbit=MagickTrue; } } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { mng_info->global_x_pixels_per_unit= (size_t) mng_get_long(p); mng_info->global_y_pixels_per_unit= (size_t) mng_get_long(&p[4]); mng_info->global_phys_unit_type=p[8]; mng_info->have_global_phys=MagickTrue; } else mng_info->have_global_phys=MagickFalse; } if (memcmp(type,mng_pHYg,4) == 0) { if (mng_info->phyg_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"pHYg is not implemented.","`%s'",image->filename); mng_info->phyg_warning++; } if (memcmp(type,mng_BASI,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->basi_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"BASI is not implemented yet","`%s'", image->filename); mng_info->basi_warning++; #ifdef MNG_BASI_SUPPORTED if (length > 11) { basi_width=(unsigned long) mng_get_long(p); basi_width=(unsigned long) mng_get_long(&p[4]); basi_color_type=p[8]; basi_compression_method=p[9]; basi_filter_type=p[10]; basi_interlace_method=p[11]; } if (length > 13) basi_red=(png_uint_32) p[12] << 8) & png_uint_32) p[13]; else basi_red=0; if (length > 15) basi_green=(png_uint_32) p[14] << 8) & png_uint_32) p[15]; else basi_green=0; if (length > 17) basi_blue=(png_uint_32) p[16] << 8) & png_uint_32) p[17]; else basi_blue=0; if (length > 19) basi_alpha=(png_uint_32) p[18] << 8) & png_uint_32) p[19]; else { if (basi_sample_depth == 16) basi_alpha=65535L; else basi_alpha=255; } if (length > 20) basi_viewable=p[20]; else basi_viewable=0; #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_IHDR,4) #if defined(JNG_SUPPORTED) && memcmp(type,mng_JHDR,4) #endif ) { /* Not an IHDR or JHDR chunk */ chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } /* Process IHDR */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing %c%c%c%c chunk",type[0],type[1],type[2],type[3]); mng_info->exists[object_id]=MagickTrue; mng_info->viewable[object_id]=MagickTrue; if (mng_info->invisible[object_id]) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping invisible object"); skip_to_iend=MagickTrue; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if defined(MNG_INSERT_LAYERS) if (length < 8) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } image_width=(size_t) mng_get_long(p); image_height=(size_t) mng_get_long(&p[4]); #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); /* Insert a transparent background layer behind the entire animation if it is not full screen. */ #if defined(MNG_INSERT_LAYERS) if (insert_layers && mng_type && first_mng_object) { if ((mng_info->clip.left > 0) || (mng_info->clip.top > 0) || (image_width < mng_info->mng_width) || (mng_info->clip.right < (ssize_t) mng_info->mng_width) || (image_height < mng_info->mng_height) || (mng_info->clip.bottom < (ssize_t) mng_info->mng_height)) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; /* Make a background rectangle. */ image->delay=0; image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Inserted transparent background layer, W=%.20g, H=%.20g", (double) mng_info->mng_width,(double) mng_info->mng_height); } } /* Insert a background layer behind the upcoming image if framing_mode is 3, and we haven't already inserted one. */ if (insert_layers && (mng_info->framing_mode == 3) && (subframe_width) && (subframe_height) && (simplicity == 0 || (simplicity & 0x08))) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->delay=0; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert background layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif /* MNG_INSERT_LAYERS */ first_mng_object=MagickFalse; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; if (term_chunk_found) { image->start_loop=MagickTrue; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; if (mng_info->framing_mode == 1 || mng_info->framing_mode == 3) { image->delay=frame_delay; frame_delay=default_frame_delay; } else image->delay=0; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=mng_info->x_off[object_id]; image->page.y=mng_info->y_off[object_id]; image->iterations=mng_iterations; /* Seek back to the beginning of the IHDR or JHDR chunk's length field. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Seeking back to beginning of %c%c%c%c chunk",type[0],type[1], type[2],type[3]); offset=SeekBlob(image,-((ssize_t) length+12),SEEK_CUR); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } mng_info->image=image; mng_info->mng_type=mng_type; mng_info->object_id=object_id; if (memcmp(type,mng_IHDR,4) == 0) image=ReadOnePNGImage(mng_info,image_info,exception); #if defined(JNG_SUPPORTED) else image=ReadOneJNGImage(mng_info,image_info,exception); #endif if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } if (image->columns == 0 || image->rows == 0) { (void) CloseBlob(image); return(DestroyImageList(image)); } mng_info->image=image; if (mng_type) { MngBox crop_box; if (mng_info->magn_methx || mng_info->magn_methy) { png_uint_32 magnified_height, magnified_width; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing MNG MAGN chunk"); if (mng_info->magn_methx == 1) { magnified_width=mng_info->magn_ml; if (image->columns > 1) magnified_width += mng_info->magn_mr; if (image->columns > 2) magnified_width += (png_uint_32) ((image->columns-2)*(mng_info->magn_mx)); } else { magnified_width=(png_uint_32) image->columns; if (image->columns > 1) magnified_width += mng_info->magn_ml-1; if (image->columns > 2) magnified_width += mng_info->magn_mr-1; if (image->columns > 3) magnified_width += (png_uint_32) ((image->columns-3)*(mng_info->magn_mx-1)); } if (mng_info->magn_methy == 1) { magnified_height=mng_info->magn_mt; if (image->rows > 1) magnified_height += mng_info->magn_mb; if (image->rows > 2) magnified_height += (png_uint_32) ((image->rows-2)*(mng_info->magn_my)); } else { magnified_height=(png_uint_32) image->rows; if (image->rows > 1) magnified_height += mng_info->magn_mt-1; if (image->rows > 2) magnified_height += mng_info->magn_mb-1; if (image->rows > 3) magnified_height += (png_uint_32) ((image->rows-3)*(mng_info->magn_my-1)); } if (magnified_height > image->rows || magnified_width > image->columns) { Image *large_image; int yy; ssize_t m, y; register ssize_t x; register PixelPacket *n, *q; PixelPacket *next, *prev; png_uint_16 magn_methx, magn_methy; /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocate magnified image"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); large_image=SyncNextImageInList(image); large_image->columns=magnified_width; large_image->rows=magnified_height; magn_methx=mng_info->magn_methx; magn_methy=mng_info->magn_methy; #if (MAGICKCORE_QUANTUM_DEPTH > 16) #define QM unsigned short if (magn_methx != 1 || magn_methy != 1) { /* Scale pixels to unsigned shorts to prevent overflow of intermediate values of interpolations */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleQuantumToShort( GetPixelRed(q))); SetPixelGreen(q,ScaleQuantumToShort( GetPixelGreen(q))); SetPixelBlue(q,ScaleQuantumToShort( GetPixelBlue(q))); SetPixelOpacity(q,ScaleQuantumToShort( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #else #define QM Quantum #endif if (image->matte != MagickFalse) (void) SetImageBackgroundColor(large_image); else { large_image->background_color.opacity=OpaqueOpacity; (void) SetImageBackgroundColor(large_image); if (magn_methx == 4) magn_methx=2; if (magn_methx == 5) magn_methx=3; if (magn_methy == 4) magn_methy=2; if (magn_methy == 5) magn_methy=3; } /* magnify the rows into the right side of the large image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the rows to %.20g",(double) large_image->rows); m=(ssize_t) mng_info->magn_mt; yy=0; length=(size_t) image->columns; next=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*next)); prev=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*prev)); if ((prev == (PixelPacket *) NULL) || (next == (PixelPacket *) NULL)) { if (prev != (PixelPacket *) NULL) prev=(PixelPacket *) RelinquishMagickMemory(prev); if (next != (PixelPacket *) NULL) next=(PixelPacket *) RelinquishMagickMemory(next); image=DestroyImageList(image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } n=GetAuthenticPixels(image,0,0,image->columns,1,exception); (void) memcpy(next,n,length); for (y=0; y < (ssize_t) image->rows; y++) { if (y == 0) m=(ssize_t) mng_info->magn_mt; else if (magn_methy > 1 && y == (ssize_t) image->rows-2) m=(ssize_t) mng_info->magn_mb; else if (magn_methy <= 1 && y == (ssize_t) image->rows-1) m=(ssize_t) mng_info->magn_mb; else if (magn_methy > 1 && y == (ssize_t) image->rows-1) m=1; else m=(ssize_t) mng_info->magn_my; n=prev; prev=next; next=n; if (y < (ssize_t) image->rows-1) { n=GetAuthenticPixels(image,0,y+1,image->columns,1, exception); (void) memcpy(next,n,length); } for (i=0; i < m; i++, yy++) { register PixelPacket *pixels; assert(yy < (ssize_t) large_image->rows); pixels=prev; n=next; q=GetAuthenticPixels(large_image,0,yy,large_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; q+=(large_image->columns-image->columns); for (x=(ssize_t) image->columns-1; x >= 0; x--) { /* To do: get color as function of indexes[x] */ /* if (image->storage_class == PseudoClass) { } */ if (magn_methy <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methy == 2 || magn_methy == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } else { /* Interpolate */ SetPixelRed(q, ((QM) (((ssize_t) (2*i*(GetPixelRed(n) -GetPixelRed(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelRed(pixels))))); SetPixelGreen(q, ((QM) (((ssize_t) (2*i*(GetPixelGreen(n) -GetPixelGreen(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelGreen(pixels))))); SetPixelBlue(q, ((QM) (((ssize_t) (2*i*(GetPixelBlue(n) -GetPixelBlue(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelBlue(pixels))))); if (image->matte != MagickFalse) SetPixelOpacity(q, ((QM) (((ssize_t) (2*i*(GetPixelOpacity(n) -GetPixelOpacity(pixels)+m)) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))))); } if (magn_methy == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) SetPixelOpacity(q, (*pixels).opacity+0); else SetPixelOpacity(q, (*n).opacity+0); } } else /* if (magn_methy == 3 || magn_methy == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methy == 5) { SetPixelOpacity(q, (QM) (((ssize_t) (2*i* (GetPixelOpacity(n) -GetPixelOpacity(pixels)) +m))/((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } n++; q++; pixels++; } /* x */ if (SyncAuthenticPixels(large_image,exception) == 0) break; } /* i */ } /* y */ prev=(PixelPacket *) RelinquishMagickMemory(prev); next=(PixelPacket *) RelinquishMagickMemory(next); length=image->columns; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Delete original image"); DeleteImageFromList(&image); image=large_image; mng_info->image=image; /* magnify the columns */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the columns to %.20g",(double) image->columns); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *pixels; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; pixels=q+(image->columns-length); n=pixels+1; for (x=(ssize_t) (image->columns-length); x < (ssize_t) image->columns; x++) { /* To do: Rewrite using Get/Set***PixelComponent() */ if (x == (ssize_t) (image->columns-length)) m=(ssize_t) mng_info->magn_ml; else if (magn_methx > 1 && x == (ssize_t) image->columns-2) m=(ssize_t) mng_info->magn_mr; else if (magn_methx <= 1 && x == (ssize_t) image->columns-1) m=(ssize_t) mng_info->magn_mr; else if (magn_methx > 1 && x == (ssize_t) image->columns-1) m=1; else m=(ssize_t) mng_info->magn_mx; for (i=0; i < m; i++) { if (magn_methx <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methx == 2 || magn_methx == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } /* To do: Rewrite using Get/Set***PixelComponent() */ else { /* Interpolate */ SetPixelRed(q, (QM) ((2*i*( GetPixelRed(n) -GetPixelRed(pixels))+m) /((ssize_t) (m*2))+ GetPixelRed(pixels))); SetPixelGreen(q, (QM) ((2*i*( GetPixelGreen(n) -GetPixelGreen(pixels))+m) /((ssize_t) (m*2))+ GetPixelGreen(pixels))); SetPixelBlue(q, (QM) ((2*i*( GetPixelBlue(n) -GetPixelBlue(pixels))+m) /((ssize_t) (m*2))+ GetPixelBlue(pixels))); if (image->matte != MagickFalse) SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))); } if (magn_methx == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelOpacity(q, GetPixelOpacity(pixels)+0); } else { SetPixelOpacity(q, GetPixelOpacity(n)+0); } } } else /* if (magn_methx == 3 || magn_methx == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methx == 5) { /* Interpolate */ SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m)/ ((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } q++; } n++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } #if (MAGICKCORE_QUANTUM_DEPTH > 16) if (magn_methx != 1 || magn_methy != 1) { /* Rescale pixels to Quantum */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleShortToQuantum( GetPixelRed(q))); SetPixelGreen(q,ScaleShortToQuantum( GetPixelGreen(q))); SetPixelBlue(q,ScaleShortToQuantum( GetPixelBlue(q))); SetPixelOpacity(q,ScaleShortToQuantum( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished MAGN processing"); } } /* Crop_box is with respect to the upper left corner of the MNG. */ crop_box.left=mng_info->image_box.left+mng_info->x_off[object_id]; crop_box.right=mng_info->image_box.right+mng_info->x_off[object_id]; crop_box.top=mng_info->image_box.top+mng_info->y_off[object_id]; crop_box.bottom=mng_info->image_box.bottom+mng_info->y_off[object_id]; crop_box=mng_minimum_box(crop_box,mng_info->clip); crop_box=mng_minimum_box(crop_box,mng_info->frame); crop_box=mng_minimum_box(crop_box,mng_info->object_clip[object_id]); if ((crop_box.left != (mng_info->image_box.left +mng_info->x_off[object_id])) || (crop_box.right != (mng_info->image_box.right +mng_info->x_off[object_id])) || (crop_box.top != (mng_info->image_box.top +mng_info->y_off[object_id])) || (crop_box.bottom != (mng_info->image_box.bottom +mng_info->y_off[object_id]))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Crop the PNG image"); if ((crop_box.left < crop_box.right) && (crop_box.top < crop_box.bottom)) { Image *im; RectangleInfo crop_info; /* Crop_info is with respect to the upper left corner of the image. */ crop_info.x=(crop_box.left-mng_info->x_off[object_id]); crop_info.y=(crop_box.top-mng_info->y_off[object_id]); crop_info.width=(size_t) (crop_box.right-crop_box.left); crop_info.height=(size_t) (crop_box.bottom-crop_box.top); image->page.width=image->columns; image->page.height=image->rows; image->page.x=0; image->page.y=0; im=CropImage(image,&crop_info,exception); if (im != (Image *) NULL) { image->columns=im->columns; image->rows=im->rows; im=DestroyImage(im); image->page.width=image->columns; image->page.height=image->rows; image->page.x=crop_box.left; image->page.y=crop_box.top; } } else { /* No pixels in crop area. The MNG spec still requires a layer, though, so make a single transparent pixel in the top left corner. */ image->columns=1; image->rows=1; image->colors=2; (void) SetImageBackgroundColor(image); image->page.width=1; image->page.height=1; image->page.x=0; image->page.y=0; } } #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED image=mng_info->image; #endif } #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy, and promote any depths > 8 to 16. */ if (image->depth > 16) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif GetImageException(image,exception); if (image_info->number_scenes != 0) { if (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)) break; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading image datastream."); } while (LocaleCompare(image_info->magick,"MNG") == 0); (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading all image datastreams."); #if defined(MNG_INSERT_LAYERS) if (insert_layers && !mng_info->image_found && (mng_info->mng_width) && (mng_info->mng_height)) { /* Insert a background layer if nothing else was found. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No images found. Inserting a background layer."); if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocation failed, returning NULL."); return(DestroyImageList(image)); } image=SyncNextImageInList(image); } image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; image->matte=MagickFalse; if (image_info->ping == MagickFalse) (void) SetImageBackgroundColor(image); mng_info->image_found++; } #endif image->iterations=mng_iterations; if (mng_iterations == 1) image->start_loop=MagickTrue; while (GetPreviousImageInList(image) != (Image *) NULL) { image_count++; if (image_count > 10*mng_info->image_found) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," No beginning"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted, beginning of list not found", "`%s'",image_info->filename); return(DestroyImageList(image)); } image=GetPreviousImageInList(image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Corrupt list"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted; next_image is NULL","`%s'", image_info->filename); } } if (mng_info->ticks_per_second && mng_info->image_found > 1 && GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " First image null"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"image->next for first image is NULL but shouldn't be.", "`%s'",image_info->filename); } if (mng_info->image_found == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No visible images found."); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"No visible images in file","`%s'",image_info->filename); return(DestroyImageList(image)); } if (mng_info->ticks_per_second) final_delay=1UL*MagickMax(image->ticks_per_second,1L)* final_delay/mng_info->ticks_per_second; else image->start_loop=MagickTrue; /* Find final nonzero image delay */ final_image_delay=0; while (GetNextImageInList(image) != (Image *) NULL) { if (image->delay) final_image_delay=image->delay; image=GetNextImageInList(image); } if (final_delay < final_image_delay) final_delay=final_image_delay; image->delay=final_delay; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->delay=%.20g, final_delay=%.20g",(double) image->delay, (double) final_delay); if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Before coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g",(double) image->delay); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g",(double) scene++,(double) image->delay); } } image=GetFirstImageInList(image); #ifdef MNG_COALESCE_LAYERS if (insert_layers) { Image *next_image, *next; size_t scene; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Coalesce Images"); scene=image->scene; next_image=CoalesceImages(image,&image->exception); if (next_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); image=DestroyImageList(image); image=next_image; for (next=image; next != (Image *) NULL; next=next_image) { next->page.width=mng_info->mng_width; next->page.height=mng_info->mng_height; next->page.x=0; next->page.y=0; next->scene=scene++; next_image=GetNextImageInList(next); if (next_image == (Image *) NULL) break; if (next->delay == 0) { scene--; next_image->previous=GetPreviousImageInList(next); if (GetPreviousImageInList(next) == (Image *) NULL) image=next_image; else next->previous->next=next_image; next=DestroyImage(next); } } } #endif while (GetNextImageInList(image) != (Image *) NULL) image=GetNextImageInList(image); image->dispose=BackgroundDispose; if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " After coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g dispose=%.20g",(double) image->delay, (double) image->dispose); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g dispose=%.20g",(double) scene++, (double) image->delay,(double) image->dispose); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage();"); return(image); } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadMNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneMNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadMNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadMNGImage()"); return(GetFirstImageInList(image)); } #else /* PNG_LIBPNG_VER > 10011 */ static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "PNG library is too old","`%s'",image_info->filename); return(Image *) NULL; } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { return(ReadPNGImage(image_info,exception)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPNGImage() adds properties for the PNG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPNGImage method is: % % size_t RegisterPNGImage(void) % */ ModuleExport size_t RegisterPNGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char *PNGNote= { "See http://www.libpng.org/ for details about the PNG format." }, *JNGNote= { "See http://www.libpng.org/pub/mng/ for details about the JNG\n" "format." }, *MNGNote= { "See http://www.libpng.org/pub/mng/ for details about the MNG\n" "format." }; *version='\0'; #if defined(PNG_LIBPNG_VER_STRING) (void) ConcatenateMagickString(version,"libpng ",MaxTextExtent); (void) ConcatenateMagickString(version,PNG_LIBPNG_VER_STRING,MaxTextExtent); if (LocaleCompare(PNG_LIBPNG_VER_STRING,png_get_header_ver(NULL)) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,png_get_libpng_ver(NULL), MaxTextExtent); } #endif entry=SetMagickInfo("MNG"); entry->seekable_stream=MagickTrue; #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadMNGImage; entry->encoder=(EncodeImageHandler *) WriteMNGImage; #endif entry->magick=(IsImageFormatHandler *) IsMNG; entry->description=ConstantString("Multiple-image Network Graphics"); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("video/x-mng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(MNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("Portable Network Graphics"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); if (*version != '\0') entry->version=ConstantString(version); entry->note=ConstantString(PNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG8"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString( "8-bit indexed with optional binary transparency"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG24"); *version='\0'; #if defined(ZLIB_VERSION) (void) ConcatenateMagickString(version,"zlib ",MaxTextExtent); (void) ConcatenateMagickString(version,ZLIB_VERSION,MaxTextExtent); if (LocaleCompare(ZLIB_VERSION,zlib_version) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,zlib_version,MaxTextExtent); } #endif if (*version != '\0') entry->version=ConstantString(version); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 24-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG32"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 32-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG48"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 48-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG64"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 64-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG00"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString( "PNG inheriting bit-depth, color-type from original if possible"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JNG"); #if defined(JNG_SUPPORTED) #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJNGImage; entry->encoder=(EncodeImageHandler *) WriteJNGImage; #endif #endif entry->magick=(IsImageFormatHandler *) IsJNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("JPEG Network Graphics"); entry->mime_type=ConstantString("image/x-jng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(JNGNote); (void) RegisterMagickInfo(entry); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE ping_semaphore=AllocateSemaphoreInfo(); #endif return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPNGImage() removes format registrations made by the % PNG module from the list of supported formats. % % The format of the UnregisterPNGImage method is: % % UnregisterPNGImage(void) % */ ModuleExport void UnregisterPNGImage(void) { (void) UnregisterMagickInfo("MNG"); (void) UnregisterMagickInfo("PNG"); (void) UnregisterMagickInfo("PNG8"); (void) UnregisterMagickInfo("PNG24"); (void) UnregisterMagickInfo("PNG32"); (void) UnregisterMagickInfo("PNG48"); (void) UnregisterMagickInfo("PNG64"); (void) UnregisterMagickInfo("PNG00"); (void) UnregisterMagickInfo("JNG"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE if (ping_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&ping_semaphore); #endif } #if defined(MAGICKCORE_PNG_DELEGATE) #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteMNGImage() writes an image in the Portable Network Graphics % Group's "Multiple-image Network Graphics" encoded image format. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteMNGImage method is: % % MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % % To do (as of version 5.5.2, November 26, 2002 -- glennrp -- see also % "To do" under ReadPNGImage): % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % Write the iCCP chunk at MNG level when (icc profile length > 0) % % Improve selection of color type (use indexed-colour or indexed-colour % with tRNS when 256 or fewer unique RGBA values are present). % % Figure out what to do with "dispose=<restore-to-previous>" (dispose == 3) % This will be complicated if we limit ourselves to generating MNG-LC % files. For now we ignore disposal method 3 and simply overlay the next % image on it. % % Check for identical PLTE's or PLTE/tRNS combinations and use a % global MNG PLTE or PLTE/tRNS combination when appropriate. % [mostly done 15 June 1999 but still need to take care of tRNS] % % Check for identical sRGB and replace with a global sRGB (and remove % gAMA/cHRM if sRGB is found; check for identical gAMA/cHRM and % replace with global gAMA/cHRM (or with sRGB if appropriate; replace % local gAMA/cHRM with local sRGB if appropriate). % % Check for identical sBIT chunks and write global ones. % % Provide option to skip writing the signature tEXt chunks. % % Use signatures to detect identical objects and reuse the first % instance of such objects instead of writing duplicate objects. % % Use a smaller-than-32k value of compression window size when % appropriate. % % Encode JNG datastreams. Mostly done as of 5.5.2; need to write % ancillary text chunks and save profiles. % % Provide an option to force LC files (to ensure exact framing rate) % instead of VLC. % % Provide an option to force VLC files instead of LC, even when offsets % are present. This will involve expanding the embedded images with a % transparent region at the top and/or left. */ static void Magick_png_write_raw_profile(const ImageInfo *image_info,png_struct *ping, png_info *ping_info, unsigned char *profile_type, unsigned char *profile_description, unsigned char *profile_data, png_uint_32 length) { png_textp text; register ssize_t i; unsigned char *sp; png_charp dp; png_uint_32 allocated_length, description_length; unsigned char hex[16]={'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; if (LocaleNCompare((char *) profile_type+1, "ng-chunk-",9) == 0) return; if (image_info->verbose) { (void) printf("writing raw profile: type=%s, length=%.20g\n", (char *) profile_type, (double) length); } #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping,(png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif description_length=(png_uint_32) strlen((const char *) profile_description); allocated_length=(png_uint_32) (length*2 + (length >> 5) + 20 + description_length); #if PNG_LIBPNG_VER >= 10400 text[0].text=(png_charp) png_malloc(ping, (png_alloc_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_alloc_size_t) 80); #else text[0].text=(png_charp) png_malloc(ping, (png_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_size_t) 80); #endif text[0].key[0]='\0'; (void) ConcatenateMagickString(text[0].key, "Raw profile type ",MaxTextExtent); (void) ConcatenateMagickString(text[0].key,(const char *) profile_type,62); sp=profile_data; dp=text[0].text; *dp++='\n'; (void) CopyMagickString(dp,(const char *) profile_description, allocated_length); dp+=description_length; *dp++='\n'; (void) FormatLocaleString(dp,allocated_length- (png_size_t) (dp-text[0].text),"%8lu ",(unsigned long) length); dp+=8; for (i=0; i < (ssize_t) length; i++) { if (i%36 == 0) *dp++='\n'; *(dp++)=(char) hex[((*sp >> 4) & 0x0f)]; *(dp++)=(char) hex[((*sp++ ) & 0x0f)]; } *dp++='\n'; *dp='\0'; text[0].text_length=(png_size_t) (dp-text[0].text); text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? -1 : 0; if (text[0].text_length <= allocated_length) png_set_text(ping,ping_info,text,1); png_free(ping,text[0].text); png_free(ping,text[0].key); png_free(ping,text); } #if defined(PNG_tIME_SUPPORTED) static void write_tIME_chunk(Image *image,png_struct *ping,png_info *info, const char *date) { const char *timestamp; int ret; int day, hour, minute, month, second, year; int addhours=0, addminutes=0; png_time ptime; if (date == (const char *) NULL) timestamp=GetImageProperty(image,"date:modify"); else timestamp=date; LogMagickEvent(CoderEvent,GetMagickModule(), " Writing tIME chunk: timestamp property is %30s\n",timestamp); ret=sscanf(timestamp,"%d-%d-%dT%d:%d:%d",&year,&month,&day,&hour, &minute, &second); addhours=0; addminutes=0; ret=sscanf(timestamp,"%d-%d-%dT%d:%d:%d%d:%d",&year,&month,&day,&hour, &minute, &second, &addhours, &addminutes); LogMagickEvent(CoderEvent,GetMagickModule(), " Date format specified for png:tIME=%s" ,timestamp); LogMagickEvent(CoderEvent,GetMagickModule(), " ret=%d,y=%d, m=%d, d=%d, h=%d, m=%d, s=%d, ah=%d, as=%d", ret,year,month,day,hour,minute,second,addhours,addminutes); if (ret < 6) { LogMagickEvent(CoderEvent,GetMagickModule(), " Invalid date, ret=%d",ret); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderError, "Invalid date format specified for png:tIME","`%s' (ret=%d)", image->filename,ret); return; } if (addhours < 0) { addhours+=24; addminutes=-addminutes; day--; } hour+=addhours; minute+=addminutes; if (day == 0) { month--; day=31; if(month == 2) day=28; else { if(month == 4 || month == 6 || month == 9 || month == 11) day=30; else day=31; } } if (month == 0) { month++; year--; } if (minute > 59) { hour++; minute-=60; } if (hour > 23) { day ++; hour -=24; } if (hour < 0) { day --; hour +=24; } /* To do: fix this for leap years */ if (day > 31 || (month == 2 && day > 28) || ((month == 4 || month == 6 || month == 9 || month == 11) && day > 30)) { month++; day = 1; } if (month > 12) { year++; month=1; } ptime.year = year; ptime.month = month; ptime.day = day; ptime.hour = hour; ptime.minute = minute; ptime.second = second; LogMagickEvent(CoderEvent,GetMagickModule(), " png_set_tIME: y=%d, m=%d, d=%d, h=%d, m=%d, s=%d, ah=%d, am=%d", ptime.year, ptime.month, ptime.day, ptime.hour, ptime.minute, ptime.second, addhours, addminutes); png_set_tIME(ping,info,&ptime); } #endif /* Write one PNG image */ static MagickBooleanType WriteOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { char s[2]; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; const char *name, *property, *value; const StringInfo *profile; int num_passes, pass, ping_wrote_caNv; png_byte ping_trans_alpha[256]; png_color palette[257]; png_color_16 ping_background, ping_trans_color; png_info *ping_info; png_struct *ping; png_uint_32 ping_height, ping_width; ssize_t y; MagickBooleanType image_matte, logging, matte, ping_have_blob, ping_have_cheap_transparency, ping_have_color, ping_have_non_bw, ping_have_PLTE, ping_have_bKGD, ping_have_eXIf, ping_have_iCCP, ping_have_pHYs, ping_have_sRGB, ping_have_tRNS, ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, /* ping_exclude_EXIF, */ ping_exclude_eXIf, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tIME, /* ping_exclude_tRNS, */ ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, ping_preserve_iCCP, ping_need_colortype_warning, status, tried_332, tried_333, tried_444; MemoryInfo *volatile pixel_info; QuantumInfo *quantum_info; register ssize_t i, x; unsigned char *ping_pixels; volatile int image_colors, ping_bit_depth, ping_color_type, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans; volatile size_t image_depth, old_bit_depth; size_t quality, rowbytes, save_image_depth; int j, number_colors, number_opaque, number_semitransparent, number_transparent, ping_pHYs_unit_type; png_uint_32 ping_pHYs_x_resolution, ping_pHYs_y_resolution; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOnePNGImage()"); /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,MaxTextExtent); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,MaxTextExtent); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " IM version = %s", im_vers); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Libpng version = %s", libpng_vers); if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", zlib_runv); } } /* Initialize some stuff */ ping_bit_depth=0, ping_color_type=0, ping_interlace_method=0, ping_compression_method=0, ping_filter_method=0, ping_num_trans = 0; ping_background.red = 0; ping_background.green = 0; ping_background.blue = 0; ping_background.gray = 0; ping_background.index = 0; ping_trans_color.red=0; ping_trans_color.green=0; ping_trans_color.blue=0; ping_trans_color.gray=0; ping_pHYs_unit_type = 0; ping_pHYs_x_resolution = 0; ping_pHYs_y_resolution = 0; ping_have_blob=MagickFalse; ping_have_cheap_transparency=MagickFalse; ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; ping_have_PLTE=MagickFalse; ping_have_bKGD=MagickFalse; ping_have_eXIf=MagickTrue; ping_have_iCCP=MagickFalse; ping_have_pHYs=MagickFalse; ping_have_sRGB=MagickFalse; ping_have_tRNS=MagickFalse; ping_exclude_bKGD=mng_info->ping_exclude_bKGD; ping_exclude_caNv=mng_info->ping_exclude_caNv; ping_exclude_cHRM=mng_info->ping_exclude_cHRM; ping_exclude_date=mng_info->ping_exclude_date; /* ping_exclude_EXIF=mng_info->ping_exclude_EXIF; */ ping_exclude_eXIf=mng_info->ping_exclude_eXIf; ping_exclude_gAMA=mng_info->ping_exclude_gAMA; ping_exclude_iCCP=mng_info->ping_exclude_iCCP; /* ping_exclude_iTXt=mng_info->ping_exclude_iTXt; */ ping_exclude_oFFs=mng_info->ping_exclude_oFFs; ping_exclude_pHYs=mng_info->ping_exclude_pHYs; ping_exclude_sRGB=mng_info->ping_exclude_sRGB; ping_exclude_tEXt=mng_info->ping_exclude_tEXt; ping_exclude_tIME=mng_info->ping_exclude_tIME; /* ping_exclude_tRNS=mng_info->ping_exclude_tRNS; */ ping_exclude_zCCP=mng_info->ping_exclude_zCCP; /* hex-encoded iCCP in zTXt */ ping_exclude_zTXt=mng_info->ping_exclude_zTXt; ping_preserve_colormap = mng_info->ping_preserve_colormap; ping_preserve_iCCP = mng_info->ping_preserve_iCCP; ping_need_colortype_warning = MagickFalse; property=(const char *) NULL; /* Recognize the ICC sRGB profile and convert it to the sRGB chunk, * i.e., eliminate the ICC profile and set image->rendering_intent. * Note that this will not involve any changes to the actual pixels * but merely passes information to applications that read the resulting * PNG image. * * To do: recognize other variants of the sRGB profile, using the CRC to * verify all recognized variants including the 7 already known. * * Work around libpng16+ rejecting some "known invalid sRGB profiles". * * Use something other than image->rendering_intent to record the fact * that the sRGB profile was found. * * Record the ICC version (currently v2 or v4) of the incoming sRGB ICC * profile. Record the Blackpoint Compensation, if any. */ if (ping_exclude_sRGB == MagickFalse && ping_preserve_iCCP == MagickFalse) { char *name; const StringInfo *profile; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } ping_exclude_iCCP = MagickTrue; ping_exclude_zCCP = MagickTrue; ping_have_sRGB = MagickTrue; break; } } } if (sRGB_info[icheck].len == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); } } name=GetNextImageProfile(image); } } number_opaque = 0; number_semitransparent = 0; number_transparent = 0; if (logging != MagickFalse) { if (image->storage_class == UndefinedClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=UndefinedClass"); if (image->storage_class == DirectClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=DirectClass"); if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=PseudoClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->magick= %s",image_info->magick); (void) LogMagickEvent(CoderEvent,GetMagickModule(), image->taint ? " image->taint=MagickTrue": " image->taint=MagickFalse"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%g", image->gamma); } if (image->storage_class == PseudoClass && (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (mng_info->write_png_colortype != 1 && mng_info->write_png_colortype != 5))) { (void) SyncImage(image); image->storage_class = DirectClass; } if (ping_preserve_colormap == MagickFalse) { if (image->storage_class != PseudoClass && image->colormap != NULL) { /* Free the bogus colormap; it can cause trouble later */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Freeing bogus colormap"); (void) RelinquishMagickMemory(image->colormap); image->colormap=NULL; } } if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); /* Sometimes we get PseudoClass images whose RGB values don't match the colors in the colormap. This code syncs the RGB values. */ if (image->depth <= 8 && image->taint && image->storage_class == PseudoClass) (void) SyncImage(image); #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->depth > 8) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reducing PNG bit depth to 8 since this is a Q8 build."); image->depth=8; } #endif /* Respect the -depth option */ if (image->depth < 4) { register PixelPacket *r; ExceptionInfo *exception; exception=(&image->exception); if (image->depth > 2) { /* Scale to 4-bit */ LBR04PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR04PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR04PacketRGBO(image->colormap[i]); } } } else if (image->depth > 1) { /* Scale to 2-bit */ LBR02PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR02PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR02PacketRGBO(image->colormap[i]); } } } else { /* Scale to 1-bit */ LBR01PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR01PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR01PacketRGBO(image->colormap[i]); } } } } /* To do: set to next higher multiple of 8 */ if (image->depth < 8) image->depth=8; #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy */ if (image->depth > 8) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (image->depth == 16 && mng_info->write_png_depth != 16) if (mng_info->write_png8 || LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif image_colors = (int) image->colors; if (mng_info->write_png_colortype && (mng_info->write_png_colortype > 4 || (mng_info->write_png_depth >= 8 && mng_info->write_png_colortype < 4 && image->matte == MagickFalse))) { /* Avoid the expensive BUILD_PALETTE operation if we're sure that we * are not going to need the result. */ number_opaque = (int) image->colors; if (mng_info->write_png_colortype == 1 || mng_info->write_png_colortype == 5) ping_have_color=MagickFalse; else ping_have_color=MagickTrue; ping_have_non_bw=MagickFalse; if (image->matte != MagickFalse) { number_transparent = 2; number_semitransparent = 1; } else { number_transparent = 0; number_semitransparent = 0; } } if (mng_info->write_png_colortype < 7) { /* BUILD_PALETTE * * Normally we run this just once, but in the case of writing PNG8 * we reduce the transparency to binary and run again, then if there * are still too many colors we reduce to a simple 4-4-4-1, then 3-3-3-1 * RGBA palette and run again, and then to a simple 3-3-2-1 RGBA * palette. Then (To do) we take care of a final reduction that is only * needed if there are still 256 colors present and one of them has both * transparent and opaque instances. */ tried_332 = MagickFalse; tried_333 = MagickFalse; tried_444 = MagickFalse; for (j=0; j<6; j++) { /* * Sometimes we get DirectClass images that have 256 colors or fewer. * This code will build a colormap. * * Also, sometimes we get PseudoClass images with an out-of-date * colormap. This code will replace the colormap with a new one. * Sometimes we get PseudoClass images that have more than 256 colors. * This code will delete the colormap and change the image to * DirectClass. * * If image->matte is MagickFalse, we ignore the opacity channel * even though it sometimes contains left-over non-opaque values. * * Also we gather some information (number of opaque, transparent, * and semitransparent pixels, and whether the image has any non-gray * pixels or only black-and-white pixels) that we might need later. * * Even if the user wants to force GrayAlpha or RGBA (colortype 4 or 6) * we need to check for bogus non-opaque values, at least. */ ExceptionInfo *exception; int n; PixelPacket opaque[260], semitransparent[260], transparent[260]; register IndexPacket *indexes; register const PixelPacket *s, *q; register PixelPacket *r; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Enter BUILD_PALETTE:"); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->columns=%.20g",(double) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->rows=%.20g",(double) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); if (image->storage_class == PseudoClass && image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Original colormap:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < 256; i++) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } for (i=image->colors - 10; i < (ssize_t) image->colors; i++) { if (i > 255) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d",(int) image->colors); if (image->colors == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " (zero means unknown)"); if (ping_preserve_colormap == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Regenerate the colormap"); } exception=(&image->exception); image_colors=0; number_opaque = 0; number_semitransparent = 0; number_transparent = 0; for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte == MagickFalse || GetPixelOpacity(q) == OpaqueOpacity) { if (number_opaque < 259) { if (number_opaque == 0) { GetPixelRGB(q, opaque); opaque[0].opacity=OpaqueOpacity; number_opaque=1; } for (i=0; i< (ssize_t) number_opaque; i++) { if (IsColorEqual(q, opaque+i)) break; } if (i == (ssize_t) number_opaque && number_opaque < 259) { number_opaque++; GetPixelRGB(q, opaque+i); opaque[i].opacity=OpaqueOpacity; } } } else if (q->opacity == TransparentOpacity) { if (number_transparent < 259) { if (number_transparent == 0) { GetPixelRGBO(q, transparent); ping_trans_color.red= (unsigned short) GetPixelRed(q); ping_trans_color.green= (unsigned short) GetPixelGreen(q); ping_trans_color.blue= (unsigned short) GetPixelBlue(q); ping_trans_color.gray= (unsigned short) GetPixelRed(q); number_transparent = 1; } for (i=0; i< (ssize_t) number_transparent; i++) { if (IsColorEqual(q, transparent+i)) break; } if (i == (ssize_t) number_transparent && number_transparent < 259) { number_transparent++; GetPixelRGBO(q, transparent+i); } } } else { if (number_semitransparent < 259) { if (number_semitransparent == 0) { GetPixelRGBO(q, semitransparent); number_semitransparent = 1; } for (i=0; i< (ssize_t) number_semitransparent; i++) { if (IsColorEqual(q, semitransparent+i) && GetPixelOpacity(q) == semitransparent[i].opacity) break; } if (i == (ssize_t) number_semitransparent && number_semitransparent < 259) { number_semitransparent++; GetPixelRGBO(q, semitransparent+i); } } } q++; } } if (mng_info->write_png8 == MagickFalse && ping_exclude_bKGD == MagickFalse) { /* Add the background color to the palette, if it * isn't already there. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Check colormap for background (%d,%d,%d)", (int) image->background_color.red, (int) image->background_color.green, (int) image->background_color.blue); } for (i=0; i<number_opaque; i++) { if (opaque[i].red == image->background_color.red && opaque[i].green == image->background_color.green && opaque[i].blue == image->background_color.blue) break; } if (number_opaque < 259 && i == number_opaque) { opaque[i] = image->background_color; ping_background.index = i; number_opaque++; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d",(int) i); } } else if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in the colormap to add background color"); } image_colors=number_opaque+number_transparent+number_semitransparent; if (logging != MagickFalse) { if (image_colors > 256) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has more than 256 colors"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has %d colors",image_colors); } if (ping_preserve_colormap != MagickFalse) break; if (mng_info->write_png_colortype != 7) /* We won't need this info */ { ping_have_color=MagickFalse; ping_have_non_bw=MagickFalse; if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "incompatible colorspace"); ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; } if(image_colors > 256) { for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != GetPixelGreen(s) || GetPixelRed(s) != GetPixelBlue(s)) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } s++; } if (ping_have_color != MagickFalse) break; /* Worst case is black-and-white; we are looking at every * pixel twice. */ if (ping_have_non_bw == MagickFalse) { s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != 0 && GetPixelRed(s) != QuantumRange) { ping_have_non_bw=MagickTrue; break; } s++; } } } } } if (image_colors < 257) { PixelPacket colormap[260]; /* * Initialize image colormap. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Sort the new colormap"); /* Sort palette, transparent first */; n = 0; for (i=0; i<number_transparent; i++) colormap[n++] = transparent[i]; for (i=0; i<number_semitransparent; i++) colormap[n++] = semitransparent[i]; for (i=0; i<number_opaque; i++) colormap[n++] = opaque[i]; ping_background.index += (number_transparent + number_semitransparent); /* image_colors < 257; search the colormap instead of the pixels * to get ping_have_color and ping_have_non_bw */ for (i=0; i<n; i++) { if (ping_have_color == MagickFalse) { if (colormap[i].red != colormap[i].green || colormap[i].red != colormap[i].blue) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } } if (ping_have_non_bw == MagickFalse) { if (colormap[i].red != 0 && colormap[i].red != QuantumRange) ping_have_non_bw=MagickTrue; } } if ((mng_info->ping_exclude_tRNS == MagickFalse || (number_transparent == 0 && number_semitransparent == 0)) && (((mng_info->write_png_colortype-1) == PNG_COLOR_TYPE_PALETTE) || (mng_info->write_png_colortype == 0))) { if (logging != MagickFalse) { if (n != (ssize_t) image_colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_colors (%d) and n (%d) don't match", image_colors, n); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireImageColormap"); } image->colors = image_colors; if (AcquireImageColormap(image,image_colors) == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } for (i=0; i< (ssize_t) image_colors; i++) image->colormap[i] = colormap[i]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d (%d)", (int) image->colors, image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Update the pixel indexes"); } /* Sync the pixel indices with the new colormap */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i< (ssize_t) image_colors; i++) { if ((image->matte == MagickFalse || image->colormap[i].opacity == GetPixelOpacity(q)) && image->colormap[i].red == GetPixelRed(q) && image->colormap[i].green == GetPixelGreen(q) && image->colormap[i].blue == GetPixelBlue(q)) { SetPixelIndex(indexes+x,i); break; } } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d", (int) image->colors); if (image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < (ssize_t) image->colors; i++) { if (i < 300 || i >= (ssize_t) image->colors - 10) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } if (number_transparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent = %d", number_transparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent > 256"); if (number_opaque < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque = %d", number_opaque); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque > 256"); if (number_semitransparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent = %d", number_semitransparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent > 256"); if (ping_have_non_bw == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are black or white"); else if (ping_have_color == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are gray"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " At least one pixel or the background is non-gray"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Exit BUILD_PALETTE:"); } if (mng_info->write_png8 == MagickFalse) break; /* Make any reductions necessary for the PNG8 format */ if (image_colors <= 256 && image_colors != 0 && image->colormap != NULL && number_semitransparent == 0 && number_transparent <= 1) break; /* PNG8 can't have semitransparent colors so we threshold the * opacity to 0 or OpaqueOpacity, and PNG8 can only have one * transparent color so if more than one is transparent we merge * them into image->background_color. */ if (number_semitransparent != 0 || number_transparent > 1) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Thresholding the alpha channel to binary"); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) > TransparentOpacity/2) { SetPixelOpacity(r,TransparentOpacity); SetPixelRgb(r,&image->background_color); } else SetPixelOpacity(r,OpaqueOpacity); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image_colors != 0 && image_colors <= 256 && image->colormap != NULL) for (i=0; i<image_colors; i++) image->colormap[i].opacity = (image->colormap[i].opacity > TransparentOpacity/2 ? TransparentOpacity : OpaqueOpacity); } continue; } /* PNG8 can't have more than 256 colors so we quantize the pixels and * background color to the 4-4-4-1, 3-3-3-1 or 3-3-2-1 palette. If the * image is mostly gray, the 4-4-4-1 palette is likely to end up with 256 * colors or less. */ if (tried_444 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 4-4-4"); tried_444 = MagickTrue; LBR04PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 4-4-4"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR04PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 4-4-4"); for (i=0; i<image_colors; i++) { LBR04PacketRGB(image->colormap[i]); } } continue; } if (tried_333 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-3"); tried_333 = MagickTrue; LBR03PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-3-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR03PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-3-1"); for (i=0; i<image_colors; i++) { LBR03PacketRGB(image->colormap[i]); } } continue; } if (tried_332 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-2"); tried_332 = MagickTrue; /* Red and green were already done so we only quantize the blue * channel */ LBR02PacketBlue(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR02PixelBlue(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-2-1"); for (i=0; i<image_colors; i++) { LBR02PacketBlue(image->colormap[i]); } } continue; } if (image_colors == 0 || image_colors > 256) { /* Take care of special case with 256 opaque colors + 1 transparent * color. We don't need to quantize to 2-3-2-1; we only need to * eliminate one color, so we'll merge the two darkest red * colors (0x49, 0, 0) -> (0x24, 0, 0). */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red background colors to 3-3-2-1"); if (ScaleQuantumToChar(image->background_color.red) == 0x49 && ScaleQuantumToChar(image->background_color.green) == 0x00 && ScaleQuantumToChar(image->background_color.blue) == 0x00) { image->background_color.red=ScaleCharToQuantum(0x24); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (ScaleQuantumToChar(GetPixelRed(r)) == 0x49 && ScaleQuantumToChar(GetPixelGreen(r)) == 0x00 && ScaleQuantumToChar(GetPixelBlue(r)) == 0x00 && GetPixelOpacity(r) == OpaqueOpacity) { SetPixelRed(r,ScaleCharToQuantum(0x24)); } r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else { for (i=0; i<image_colors; i++) { if (ScaleQuantumToChar(image->colormap[i].red) == 0x49 && ScaleQuantumToChar(image->colormap[i].green) == 0x00 && ScaleQuantumToChar(image->colormap[i].blue) == 0x00) { image->colormap[i].red=ScaleCharToQuantum(0x24); } } } } } } /* END OF BUILD_PALETTE */ /* If we are excluding the tRNS chunk and there is transparency, * then we must write a Gray-Alpha (color-type 4) or RGBA (color-type 6) * PNG. */ if (mng_info->ping_exclude_tRNS != MagickFalse && (number_transparent != 0 || number_semitransparent != 0)) { unsigned int colortype=mng_info->write_png_colortype; if (ping_have_color == MagickFalse) mng_info->write_png_colortype = 5; else mng_info->write_png_colortype = 7; if (colortype != 0 && mng_info->write_png_colortype != colortype) ping_need_colortype_warning=MagickTrue; } /* See if cheap transparency is possible. It is only possible * when there is a single transparent color, no semitransparent * color, and no opaque color that has the same RGB components * as the transparent color. We only need this information if * we are writing a PNG with colortype 0 or 2, and we have not * excluded the tRNS chunk. */ if (number_transparent == 1 && mng_info->write_png_colortype < 4) { ping_have_cheap_transparency = MagickTrue; if (number_semitransparent != 0) ping_have_cheap_transparency = MagickFalse; else if (image_colors == 0 || image_colors > 256 || image->colormap == NULL) { ExceptionInfo *exception; register const PixelPacket *q; exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { q=GetVirtualPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity != TransparentOpacity && (unsigned short) GetPixelRed(q) == ping_trans_color.red && (unsigned short) GetPixelGreen(q) == ping_trans_color.green && (unsigned short) GetPixelBlue(q) == ping_trans_color.blue) { ping_have_cheap_transparency = MagickFalse; break; } q++; } if (ping_have_cheap_transparency == MagickFalse) break; } } else { /* Assuming that image->colormap[0] is the one transparent color * and that all others are opaque. */ if (image_colors > 1) for (i=1; i<image_colors; i++) if (image->colormap[i].red == image->colormap[0].red && image->colormap[i].green == image->colormap[0].green && image->colormap[i].blue == image->colormap[0].blue) { ping_have_cheap_transparency = MagickFalse; break; } } if (logging != MagickFalse) { if (ping_have_cheap_transparency == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is not possible."); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is possible."); } } else ping_have_cheap_transparency = MagickFalse; image_depth=image->depth; quantum_info = (QuantumInfo *) NULL; number_colors=0; image_colors=(int) image->colors; image_matte=image->matte; if (mng_info->write_png_colortype < 5) mng_info->IsPalette=image->storage_class == PseudoClass && image_colors <= 256 && image->colormap != NULL; else mng_info->IsPalette = MagickFalse; if ((mng_info->write_png_colortype == 4 || mng_info->write_png8) && (image->colors == 0 || image->colormap == NULL)) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Cannot write PNG8 or color-type 3; colormap is NULL", "`%s'",image->filename); return(MagickFalse); } /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_write_struct_2(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler,(void *) NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_write_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_write_struct(&ping,(png_info **) NULL); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } png_set_write_fn(ping,image,png_put_data,png_flush_data); pixel_info=(MemoryInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG write failed. */ #ifdef PNG_DEBUG if (image_info->verbose) (void) printf("PNG write has failed.\n"); #endif png_destroy_write_struct(&ping,&ping_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); return(MagickFalse); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for writing. */ #if defined(PNG_MNG_FEATURES_SUPPORTED) if (mng_info->write_mng) { (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); # ifdef PNG_WRITE_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature when writing a MNG because * zero-length PLTE is OK */ png_set_check_for_invalid_index (ping, 0); # endif } #else # ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if (mng_info->write_mng) png_permit_empty_plte(ping,MagickTrue); # endif #endif x=0; ping_width=(png_uint_32) image->columns; ping_height=(png_uint_32) image->rows; if (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32) image_depth=8; if (mng_info->write_png48 || mng_info->write_png64) image_depth=16; if (mng_info->write_png_depth != 0) image_depth=mng_info->write_png_depth; /* Adjust requested depth to next higher valid depth if necessary */ if (image_depth > 8) image_depth=16; if ((image_depth > 4) && (image_depth < 8)) image_depth=8; if (image_depth == 3) image_depth=4; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " width=%.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " height=%.20g",(double) ping_height); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative ping_bit_depth=%.20g",(double) image_depth); } save_image_depth=image_depth; ping_bit_depth=(png_byte) save_image_depth; #if defined(PNG_pHYs_SUPPORTED) if (ping_exclude_pHYs == MagickFalse) { if ((image->x_resolution != 0) && (image->y_resolution != 0) && (!mng_info->write_mng || !mng_info->equal_physs)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); if (image->units == PixelsPerInchResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution= (png_uint_32) ((100.0*image->x_resolution+0.5)/2.54); ping_pHYs_y_resolution= (png_uint_32) ((100.0*image->y_resolution+0.5)/2.54); } else if (image->units == PixelsPerCentimeterResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution=(png_uint_32) (100.0*image->x_resolution+0.5); ping_pHYs_y_resolution=(png_uint_32) (100.0*image->y_resolution+0.5); } else { ping_pHYs_unit_type=PNG_RESOLUTION_UNKNOWN; ping_pHYs_x_resolution=(png_uint_32) image->x_resolution; ping_pHYs_y_resolution=(png_uint_32) image->y_resolution; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Set up PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) ping_pHYs_x_resolution,(double) ping_pHYs_y_resolution, (int) ping_pHYs_unit_type); ping_have_pHYs = MagickTrue; } } #endif if (ping_exclude_bKGD == MagickFalse) { if ((!mng_info->adjoin || !mng_info->equal_backgrounds)) { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_background.red=(png_uint_16) (ScaleQuantumToShort(image->background_color.red) & mask); ping_background.green=(png_uint_16) (ScaleQuantumToShort(image->background_color.green) & mask); ping_background.blue=(png_uint_16) (ScaleQuantumToShort(image->background_color.blue) & mask); ping_background.gray=(png_uint_16) ping_background.green; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (1)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth=%d",ping_bit_depth); } ping_have_bKGD = MagickTrue; } /* Select the color type. */ matte=image_matte; old_bit_depth=0; if (mng_info->IsPalette && mng_info->write_png8) { /* To do: make this a function cause it's used twice, except for reducing the sample depth from 8. */ number_colors=image_colors; ping_have_tRNS=MagickFalse; /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors (%d)", number_colors, image_colors); for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), #if MAGICKCORE_QUANTUM_DEPTH == 8 " %3ld (%3d,%3d,%3d)", #else " %5ld (%5d,%5d,%5d)", #endif (long) i,palette[i].red,palette[i].green,palette[i].blue); } ping_have_PLTE=MagickTrue; image_depth=ping_bit_depth; ping_num_trans=0; if (matte != MagickFalse) { /* Identify which colormap entry is transparent. */ assert(number_colors <= 256); assert(image->colormap != NULL); for (i=0; i < (ssize_t) number_transparent; i++) ping_trans_alpha[i]=0; ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else ping_have_tRNS=MagickTrue; } if (ping_exclude_bKGD == MagickFalse) { /* * Identify which colormap entry is the background color. */ for (i=0; i < (ssize_t) MagickMax(1L*number_colors-1L,1L); i++) if (IsPNGColorEqual(ping_background,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); } } } /* end of write_png8 */ else if (mng_info->write_png_colortype == 1) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; } else if (mng_info->write_png24 || mng_info->write_png48 || mng_info->write_png_colortype == 3) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; } else if (mng_info->write_png32 || mng_info->write_png64 || mng_info->write_png_colortype == 7) { image_matte=MagickTrue; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; } else /* mng_info->write_pngNN not specified */ { image_depth=ping_bit_depth; if (mng_info->write_png_colortype != 0) { ping_color_type=(png_byte) mng_info->write_png_colortype-1; if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) image_matte=MagickTrue; else image_matte=MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG colortype %d was specified:",(int) ping_color_type); } else /* write_png_colortype not specified */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selecting PNG colortype:"); ping_color_type=(png_byte) ((matte != MagickFalse)? PNG_COLOR_TYPE_RGB_ALPHA:PNG_COLOR_TYPE_RGB); if (image_info->type == TrueColorType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } if (image_info->type == TrueColorMatteType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; image_matte=MagickTrue; } if (image_info->type == PaletteType || image_info->type == PaletteMatteType) ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (mng_info->write_png_colortype == 0 && image_info->type == UndefinedType) { if (ping_have_color == MagickFalse) { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY_ALPHA; image_matte=MagickTrue; } } else { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGBA; image_matte=MagickTrue; } } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selected PNG colortype=%d",ping_color_type); if (ping_bit_depth < 8) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) ping_bit_depth=8; } old_bit_depth=ping_bit_depth; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->matte == MagickFalse && ping_have_non_bw == MagickFalse) ping_bit_depth=1; } if (ping_color_type == PNG_COLOR_TYPE_PALETTE) { size_t one = 1; ping_bit_depth=1; if (image->colors == 0) { /* DO SOMETHING */ png_error(ping,"image has 0 colors"); } while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG bit depth: %d",ping_bit_depth); } if (ping_bit_depth < (int) mng_info->write_png_depth) ping_bit_depth = mng_info->write_png_depth; } image_depth=ping_bit_depth; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG color type: %s (%.20g)", PngColorTypeToString(ping_color_type), (double) ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->type: %.20g",(double) image_info->type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_depth: %.20g",(double) image_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth: %.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth: %.20g",(double) ping_bit_depth); } if (matte != MagickFalse) { if (mng_info->IsPalette) { if (mng_info->write_png_colortype == 0) { ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; if (ping_have_color != MagickFalse) ping_color_type=PNG_COLOR_TYPE_RGBA; } /* * Determine if there is any transparent color. */ if (number_transparent + number_semitransparent == 0) { /* No transparent pixels are present. Change 4 or 6 to 0 or 2. */ image_matte=MagickFalse; if (mng_info->write_png_colortype == 0) ping_color_type&=0x03; } else { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_trans_color.red=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].red) & mask); ping_trans_color.green=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].green) & mask); ping_trans_color.blue=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].blue) & mask); ping_trans_color.gray=(png_uint_16) (ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image, image->colormap))) & mask); ping_trans_color.index=(png_byte) 0; ping_have_tRNS=MagickTrue; } if (ping_have_tRNS != MagickFalse) { /* * Determine if there is one and only one transparent color * and if so if it is fully transparent. */ if (ping_have_cheap_transparency == MagickFalse) ping_have_tRNS=MagickFalse; } if (ping_have_tRNS != MagickFalse) { if (mng_info->write_png_colortype == 0) ping_color_type &= 0x03; /* changes 4 or 6 to 0 or 2 */ if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } else { if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } matte=image_matte; if (ping_have_tRNS != MagickFalse) image_matte=MagickFalse; if ((mng_info->IsPalette) && mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE && ping_have_color == MagickFalse && (image_matte == MagickFalse || image_depth >= 8)) { size_t one=1; if (image_matte != MagickFalse) ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; else if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_GRAY_ALPHA) { ping_color_type=PNG_COLOR_TYPE_GRAY; if (save_image_depth == 16 && image_depth == 8) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (0)"); } ping_trans_color.gray*=0x0101; } } if (image_depth > MAGICKCORE_QUANTUM_DEPTH) image_depth=MAGICKCORE_QUANTUM_DEPTH; if ((image_colors == 0) || ((ssize_t) (image_colors-1) > (ssize_t) MaxColormapSize)) image_colors=(int) (one << image_depth); if (image_depth > 8) ping_bit_depth=16; else { ping_bit_depth=8; if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { if(!mng_info->write_png_depth) { ping_bit_depth=1; while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY && image_colors < 17 && mng_info->IsPalette) { /* Check if grayscale is reducible */ int depth_4_ok=MagickTrue, depth_2_ok=MagickTrue, depth_1_ok=MagickTrue; for (i=0; i < (ssize_t) image_colors; i++) { unsigned char intensity; intensity=ScaleQuantumToChar(image->colormap[i].red); if ((intensity & 0x0f) != ((intensity & 0xf0) >> 4)) depth_4_ok=depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x03) != ((intensity & 0x0c) >> 2)) depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x01) != ((intensity & 0x02) >> 1)) depth_1_ok=MagickFalse; } if (depth_1_ok && mng_info->write_png_depth <= 1) ping_bit_depth=1; else if (depth_2_ok && mng_info->write_png_depth <= 2) ping_bit_depth=2; else if (depth_4_ok && mng_info->write_png_depth <= 4) ping_bit_depth=4; } } image_depth=ping_bit_depth; } else if (mng_info->IsPalette) { number_colors=image_colors; if (image_depth <= 8) { /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (!(mng_info->have_write_global_plte && matte == MagickFalse)) { for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors", number_colors); ping_have_PLTE=MagickTrue; } /* color_type is PNG_COLOR_TYPE_PALETTE */ if (mng_info->write_png_depth == 0) { size_t one; ping_bit_depth=1; one=1; while ((one << ping_bit_depth) < (size_t) number_colors) ping_bit_depth <<= 1; } ping_num_trans=0; if (matte != MagickFalse) { /* * Set up trans_colors array. */ assert(number_colors <= 256); ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (1)"); } ping_have_tRNS=MagickTrue; for (i=0; i < ping_num_trans; i++) { ping_trans_alpha[i]= (png_byte) (255- ScaleQuantumToChar(image->colormap[i].opacity)); } } } } } else { if (image_depth < 8) image_depth=8; if ((save_image_depth == 16) && (image_depth == 8)) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color from (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } ping_trans_color.red*=0x0101; ping_trans_color.green*=0x0101; ping_trans_color.blue*=0x0101; ping_trans_color.gray*=0x0101; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } if (ping_bit_depth < (ssize_t) mng_info->write_png_depth) ping_bit_depth = (ssize_t) mng_info->write_png_depth; /* Adjust background and transparency samples in sub-8-bit grayscale files. */ if (ping_bit_depth < 8 && ping_color_type == PNG_COLOR_TYPE_GRAY) { png_uint_16 maxval; size_t one=1; maxval=(png_uint_16) ((one << ping_bit_depth)-1); if (ping_exclude_bKGD == MagickFalse) { ping_background.gray=(png_uint_16) ((maxval/65535.)*(ScaleQuantumToShort((Quantum) GetPixelLuma(image,&image->background_color)))+.5); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (2)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.gray is %d", (int) ping_background.gray); } ping_have_bKGD = MagickTrue; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color.gray from %d", (int)ping_trans_color.gray); ping_trans_color.gray=(png_uint_16) ((maxval/255.)*( ping_trans_color.gray)+.5); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to %d", (int)ping_trans_color.gray); } if (ping_exclude_bKGD == MagickFalse) { if (mng_info->IsPalette && (int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { /* Identify which colormap entry is the background color. */ number_colors=image_colors; for (i=0; i < (ssize_t) MagickMax(1L*number_colors,1L); i++) if (IsPNGColorEqual(image->background_color,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk with index=%d",(int) i); } if (i < (ssize_t) number_colors) { ping_have_bKGD = MagickTrue; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background =(%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); } } else /* Can't happen */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in PLTE to add bKGD color"); ping_have_bKGD = MagickFalse; } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color type: %s (%d)", PngColorTypeToString(ping_color_type), ping_color_type); /* Initialize compression level and filtering. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up deflate compression"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression buffer size: 32768"); } png_set_compression_buffer_size(ping,32768L); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression mem level: 9"); png_set_compression_mem_level(ping, 9); /* Untangle the "-quality" setting: Undefined is 0; the default is used. Default is 75 10's digit: 0 or omitted: Use Z_HUFFMAN_ONLY strategy with the zlib default compression level 1-9: the zlib compression level 1's digit: 0-4: the PNG filter method 5: libpng adaptive filtering if compression level > 5 libpng filter type "none" if compression level <= 5 or if image is grayscale or palette 6: libpng adaptive filtering 7: "LOCO" filtering (intrapixel differing) if writing a MNG, otherwise "none". Did not work in IM-6.7.0-9 and earlier because of a missing "else". 8: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), adaptive filtering. Unused prior to IM-6.7.0-10, was same as 6 9: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), no PNG filters Unused prior to IM-6.7.0-10, was same as 6 Note that using the -quality option, not all combinations of PNG filter type, zlib compression level, and zlib compression strategy are possible. This is addressed by using "-define png:compression-strategy", etc., which takes precedence over -quality. */ quality=image_info->quality == UndefinedCompressionQuality ? 75UL : image_info->quality; if (quality <= 9) { if (mng_info->write_png_compression_strategy == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; } else if (mng_info->write_png_compression_level == 0) { int level; level=(int) MagickMin((ssize_t) quality/10,9); mng_info->write_png_compression_level = level+1; } if (mng_info->write_png_compression_strategy == 0) { if ((quality %10) == 8 || (quality %10) == 9) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy=Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif } if (mng_info->write_png_compression_filter == 0) mng_info->write_png_compression_filter=((int) quality % 10) + 1; if (logging != MagickFalse) { if (mng_info->write_png_compression_level) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression level: %d", (int) mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_strategy) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression strategy: %d", (int) mng_info->write_png_compression_strategy-1); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up filtering"); if (mng_info->write_png_compression_filter == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: ADAPTIVE"); else if (mng_info->write_png_compression_filter == 0 || mng_info->write_png_compression_filter == 1) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: NONE"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: %d", (int) mng_info->write_png_compression_filter-1); } if (mng_info->write_png_compression_level != 0) png_set_compression_level(ping,mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_filter == 6) { if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || (quality < 50)) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); } else if (mng_info->write_png_compression_filter == 7 || mng_info->write_png_compression_filter == 10) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); else if (mng_info->write_png_compression_filter == 8) { #if defined(PNG_MNG_FEATURES_SUPPORTED) && defined(PNG_INTRAPIXEL_DIFFERENCING) if (mng_info->write_mng) { if (((int) ping_color_type == PNG_COLOR_TYPE_RGB) || ((int) ping_color_type == PNG_COLOR_TYPE_RGBA)) ping_filter_method=PNG_INTRAPIXEL_DIFFERENCING; } #endif png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); } else if (mng_info->write_png_compression_filter == 9) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else if (mng_info->write_png_compression_filter != 0) png_set_filter(ping,PNG_FILTER_TYPE_BASE, mng_info->write_png_compression_filter-1); if (mng_info->write_png_compression_strategy != 0) png_set_compression_strategy(ping, mng_info->write_png_compression_strategy-1); ping_interlace_method=image_info->interlace != NoInterlace; if (mng_info->write_mng) png_set_sig_bytes(ping,8); /* Bail out if cannot meet defined png:bit-depth or png:color-type */ if (mng_info->write_png_colortype != 0) { if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY) if (ping_have_color != MagickFalse) { ping_color_type = PNG_COLOR_TYPE_RGB; if (ping_bit_depth < 8) ping_bit_depth=8; } if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY_ALPHA) if (ping_have_color != MagickFalse) ping_color_type = PNG_COLOR_TYPE_RGB_ALPHA; } if (ping_need_colortype_warning != MagickFalse || ((mng_info->write_png_depth && (int) mng_info->write_png_depth != ping_bit_depth) || (mng_info->write_png_colortype && ((int) mng_info->write_png_colortype-1 != ping_color_type && mng_info->write_png_colortype != 7 && !(mng_info->write_png_colortype == 5 && ping_color_type == 0))))) { if (logging != MagickFalse) { if (ping_need_colortype_warning != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image has transparency but tRNS chunk was excluded"); } if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth=%u, Computed depth=%u", mng_info->write_png_depth, ping_bit_depth); } if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type=%u, Computed color type=%u", mng_info->write_png_colortype-1, ping_color_type); } } png_warning(ping, "Cannot write image with defined png:bit-depth or png:color-type."); } if (image_matte != MagickFalse && image->matte == MagickFalse) { /* Add an opaque matte channel */ image->matte = MagickTrue; (void) SetImageOpacity(image,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Added an opaque matte channel"); } if (number_transparent != 0 || number_semitransparent != 0) { if (ping_color_type < 4) { ping_have_tRNS=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting ping_have_tRNS=MagickTrue."); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG header chunks"); png_set_IHDR(ping,ping_info,ping_width,ping_height, ping_bit_depth,ping_color_type, ping_interlace_method,ping_compression_method, ping_filter_method); if (ping_color_type == 3 && ping_have_PLTE != MagickFalse) { if (mng_info->have_write_global_plte && matte == MagickFalse) { png_set_PLTE(ping,ping_info,NULL,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up empty PLTE chunk"); } else png_set_PLTE(ping,ping_info,palette,number_colors); if (logging != MagickFalse) { for (i=0; i< (ssize_t) number_colors; i++) { if (i < ping_num_trans) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d), tRNS[%d] = (%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue, (int) i, (int) ping_trans_alpha[i]); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue); } } } /* Only write the iCCP chunk if we are not writing the sRGB chunk. */ if (ping_exclude_sRGB != MagickFalse || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if ((ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) && (ping_exclude_iCCP == MagickFalse || ping_exclude_zCCP == MagickFalse)) { ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { #ifdef PNG_WRITE_iCCP_SUPPORTED if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { if (ping_exclude_iCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up iCCP chunk"); png_set_iCCP(ping,ping_info,(const png_charp) name,0, #if (PNG_LIBPNG_VER < 10500) (png_charp) GetStringInfoDatum(profile), #else (const png_byte *) GetStringInfoDatum(profile), #endif (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } else #endif { if (LocaleCompare(name,"exif") == 0) { /* Do not write hex-encoded ICC chunk; we will write it later as an eXIf chunk */ name=GetNextImageProfile(image); continue; } if (ping_exclude_zCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up zTXT chunk with uuencoded ICC"); Magick_png_write_raw_profile(image_info,ping,ping_info, (unsigned char *) name,(unsigned char *) name, GetStringInfoDatum(profile), (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk with %s profile",name); name=GetNextImageProfile(image); } } } #if defined(PNG_WRITE_sRGB_SUPPORTED) if ((mng_info->have_write_global_srgb == 0) && ping_have_iCCP != MagickTrue && (ping_have_sRGB != MagickFalse || png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if (ping_exclude_sRGB == MagickFalse) { /* Note image rendering intent. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up sRGB chunk"); (void) png_set_sRGB(ping,ping_info,( Magick_RenderingIntent_to_PNG_RenderingIntent( image->rendering_intent))); ping_have_sRGB = MagickTrue; } } if ((!mng_info->write_mng) || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) #endif { if (ping_exclude_gAMA == MagickFalse && ping_have_iCCP == MagickFalse && ping_have_sRGB == MagickFalse && (ping_exclude_sRGB == MagickFalse || (image->gamma < .45 || image->gamma > .46))) { if ((mng_info->have_write_global_gama == 0) && (image->gamma != 0.0)) { /* Note image gamma. To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up gAMA chunk"); png_set_gAMA(ping,ping_info,image->gamma); } } if (ping_exclude_cHRM == MagickFalse && ping_have_sRGB == MagickFalse) { if ((mng_info->have_write_global_chrm == 0) && (image->chromaticity.red_primary.x != 0.0)) { /* Note image chromaticity. Note: if cHRM+gAMA == sRGB write sRGB instead. */ PrimaryInfo bp, gp, rp, wp; wp=image->chromaticity.white_point; rp=image->chromaticity.red_primary; gp=image->chromaticity.green_primary; bp=image->chromaticity.blue_primary; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up cHRM chunk"); png_set_cHRM(ping,ping_info,wp.x,wp.y,rp.x,rp.y,gp.x,gp.y, bp.x,bp.y); } } } if (ping_exclude_bKGD == MagickFalse) { if (ping_have_bKGD != MagickFalse) { png_set_bKGD(ping,ping_info,&ping_background); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background color = (%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " index = %d, gray=%d", (int) ping_background.index, (int) ping_background.gray); } } } if (ping_exclude_pHYs == MagickFalse) { if (ping_have_pHYs != MagickFalse) { png_set_pHYs(ping,ping_info, ping_pHYs_x_resolution, ping_pHYs_y_resolution, ping_pHYs_unit_type); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_resolution=%lu", (unsigned long) ping_pHYs_x_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " y_resolution=%lu", (unsigned long) ping_pHYs_y_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " unit_type=%lu", (unsigned long) ping_pHYs_unit_type); } } } #if defined(PNG_tIME_SUPPORTED) if (ping_exclude_tIME == MagickFalse) { const char *timestamp; if (image->taint == MagickFalse) { timestamp=GetImageOption(image_info,"png:tIME"); if (timestamp == (const char *) NULL) timestamp=GetImageProperty(image,"png:tIME"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reset tIME in tainted image"); timestamp=GetImageProperty(image,"date:modify"); } if (timestamp != (const char *) NULL) write_tIME_chunk(image,ping,ping_info,timestamp); } #endif if (mng_info->need_blob != MagickFalse) { if (OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception) == MagickFalse) png_error(ping,"WriteBlob Failed"); ping_have_blob=MagickTrue; (void) ping_have_blob; } png_write_info_before_PLTE(ping, ping_info); if (ping_have_tRNS != MagickFalse && ping_color_type < 4) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Calling png_set_tRNS with num_trans=%d",ping_num_trans); } if (ping_color_type == 3) (void) png_set_tRNS(ping, ping_info, ping_trans_alpha, ping_num_trans, NULL); else { (void) png_set_tRNS(ping, ping_info, NULL, 0, &ping_trans_color); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS color =(%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } png_write_info(ping,ping_info); ping_wrote_caNv = MagickFalse; /* write caNv chunk */ if (ping_exclude_caNv == MagickFalse) { if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || image->page.x != 0 || image->page.y != 0) { unsigned char chunk[20]; (void) WriteBlobMSBULong(image,16L); /* data length=8 */ PNGType(chunk,mng_caNv); LogPNGChunk(logging,mng_caNv,16L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); PNGsLong(chunk+12,(png_int_32) image->page.x); PNGsLong(chunk+16,(png_int_32) image->page.y); (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); ping_wrote_caNv = MagickTrue; } } #if defined(PNG_oFFs_SUPPORTED) if (ping_exclude_oFFs == MagickFalse && ping_wrote_caNv == MagickFalse) { if (image->page.x || image->page.y) { png_set_oFFs(ping,ping_info,(png_int_32) image->page.x, (png_int_32) image->page.y, 0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up oFFs chunk with x=%d, y=%d, units=0", (int) image->page.x, (int) image->page.y); } } #endif #if (PNG_LIBPNG_VER == 10206) /* avoid libpng-1.2.6 bug by setting PNG_HAVE_IDAT flag */ #define PNG_HAVE_IDAT 0x04 ping->mode |= PNG_HAVE_IDAT; #undef PNG_HAVE_IDAT #endif png_set_packing(ping); /* Allocate memory. */ rowbytes=image->columns; if (image_depth > 8) rowbytes*=2; switch (ping_color_type) { case PNG_COLOR_TYPE_RGB: rowbytes*=3; break; case PNG_COLOR_TYPE_GRAY_ALPHA: rowbytes*=2; break; case PNG_COLOR_TYPE_RGBA: rowbytes*=4; break; default: break; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocating %.20g bytes of memory for pixels",(double) rowbytes); } pixel_info=AcquireVirtualMemory(rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Allocation of memory for pixels failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); /* Initialize image scanlines. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Memory allocation for quantum_info failed"); quantum_info->format=UndefinedQuantumFormat; SetQuantumDepth(image,quantum_info,image_depth); (void) SetQuantumEndian(image,quantum_info,MSBEndian); num_passes=png_set_interlace_handling(ping); if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (mng_info->IsPalette || (image_info->type == BilevelType)) && image_matte == MagickFalse && ping_have_non_bw == MagickFalse) { /* Palette, Bilevel, or Opaque Monochrome */ register const PixelPacket *p; SetQuantumDepth(image,quantum_info,8); for (pass=0; pass < num_passes; pass++) { /* Convert PseudoClass image to a PNG monochrome image. */ for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (0)"); p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (mng_info->IsPalette) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_PALETTE && mng_info->write_png_depth && mng_info->write_png_depth != old_bit_depth) { /* Undo pixel scaling */ for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) (*(ping_pixels+i) >> (8-old_bit_depth)); } } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); } if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE) for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) ((*(ping_pixels+i) > 127) ? 255 : 0); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (1)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else /* Not Palette, Bilevel, or Opaque Monochrome */ { if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (image_matte != MagickFalse || (ping_bit_depth >= MAGICKCORE_QUANTUM_DEPTH)) && (mng_info->IsPalette) && ping_have_color == MagickFalse) { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (mng_info->IsPalette) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY PNG pixels (2)"); } else /* PNG_COLOR_TYPE_GRAY_ALPHA */ { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (2)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels,&image->exception); } if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (2)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { if ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->storage_class == DirectClass) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (3)"); } else if (image_matte != MagickFalse) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBAQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (3)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } else /* not ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) */ { if ((ping_color_type != PNG_COLOR_TYPE_GRAY) && (ping_color_type != PNG_COLOR_TYPE_GRAY_ALPHA)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is not GRAY or GRAY_ALPHA",pass); SetQuantumDepth(image,quantum_info,8); image_depth=8; } for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is RGB, 16-bit GRAY, or GRAY_ALPHA",pass); p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { SetQuantumDepth(image,quantum_info,image->depth); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (4)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,IndexQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y <= 2) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of non-gray pixels (4)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_pixels[0]=%d,ping_pixels[1]=%d", (int)ping_pixels[0],(int)ping_pixels[1]); } } png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } } } if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Wrote PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Width: %.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Height: %.20g",(double) ping_height); if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth: %d",mng_info->write_png_depth); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG bit-depth written: %d",ping_bit_depth); if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type: %d",mng_info->write_png_colortype-1); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color-type written: %d",ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG Interlace method: %d",ping_interlace_method); } /* Generate text chunks after IDAT. */ if (ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) { ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { png_textp text; value=GetImageProperty(image,property); /* Don't write any "png:" or "jpeg:" properties; those are just for * "identify" or for passing through to another JPEG */ if ((LocaleNCompare(property,"png:",4) != 0 && LocaleNCompare(property,"jpeg:",5) != 0) && /* Suppress density and units if we wrote a pHYs chunk */ (ping_exclude_pHYs != MagickFalse || LocaleCompare(property,"density") != 0 || LocaleCompare(property,"units") != 0) && /* Suppress the IM-generated Date:create and Date:modify */ (ping_exclude_date == MagickFalse || LocaleNCompare(property, "Date:",5) != 0)) { if (value != (const char *) NULL) { #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping, (png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif text[0].key=(char *) property; text[0].text=(char *) value; text[0].text_length=strlen(value); if (ping_exclude_tEXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_zTXt; else if (ping_exclude_zTXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_NONE; else { text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? PNG_TEXT_COMPRESSION_NONE : PNG_TEXT_COMPRESSION_zTXt ; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " keyword: '%s'",text[0].key); } png_set_text(ping,ping_info,text,1); png_free(ping,text); } } property=GetNextImageProperty(image); } } /* write eXIf profile */ if (ping_have_eXIf != MagickFalse && ping_exclude_eXIf == MagickFalse) { char *name; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { if (LocaleCompare(name,"exif") == 0) { const StringInfo *profile; profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { png_uint_32 length; unsigned char chunk[4], *data; StringInfo *ping_profile; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Have eXIf profile"); ping_profile=CloneStringInfo(profile); data=GetStringInfoDatum(ping_profile), length=(png_uint_32) GetStringInfoLength(ping_profile); PNGType(chunk,mng_eXIf); if (length < 7) { ping_profile=DestroyStringInfo(ping_profile); break; /* otherwise crashes */ } if (*data == 'E' && *(data+1) == 'x' && *(data+2) == 'i' && *(data+3) == 'f' && *(data+4) == '\0' && *(data+5) == '\0') { /* skip the "Exif\0\0" JFIF Exif Header ID */ length -= 6; data += 6; } LogPNGChunk(logging,chunk,length); (void) WriteBlobMSBULong(image,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,data); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4), data, (uInt) length)); ping_profile=DestroyStringInfo(ping_profile); break; } } name=GetNextImageProfile(image); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG end info"); png_write_end(ping,ping_info); if (mng_info->need_fram && (int) image->dispose == BackgroundDispose) { if (mng_info->page.x || mng_info->page.y || (ping_width != mng_info->page.width) || (ping_height != mng_info->page.height)) { unsigned char chunk[32]; /* Write FRAM 4 with clipping boundaries followed by FRAM 1. */ (void) WriteBlobMSBULong(image,27L); /* data length=27 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,27L); chunk[4]=4; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=1; /* flag for changing delay, for next frame only */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=1; /* flag for changing frame clipping for next frame */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) (0L)); /* temporary 0 delay */ chunk[14]=0; /* clipping boundaries delta type */ PNGLong(chunk+15,(png_uint_32) (mng_info->page.x)); /* left cb */ PNGLong(chunk+19, (png_uint_32) (mng_info->page.x + ping_width)); PNGLong(chunk+23,(png_uint_32) (mng_info->page.y)); /* top cb */ PNGLong(chunk+27, (png_uint_32) (mng_info->page.y + ping_height)); (void) WriteBlob(image,31,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,31)); mng_info->old_framing_mode=4; mng_info->framing_mode=1; } else mng_info->framing_mode=3; } if (mng_info->write_mng && !mng_info->need_fram && ((int) image->dispose == 3)) png_error(ping, "Cannot convert GIF with disposal method 3 to MNG-LC"); /* Free PNG resources. */ png_destroy_write_struct(&ping,&ping_info); pixel_info=RelinquishVirtualMemory(pixel_info); /* Store bit depth actually written */ s[0]=(char) ping_bit_depth; s[1]='\0'; (void) SetImageProperty(image,"png:bit-depth-written",s); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block. Revert to * Throwing an Exception when an error occurs. */ return(MagickTrue); /* End write one PNG image */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePNGImage() writes a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WritePNGImage method is: % % MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % Returns MagickTrue on success, MagickFalse on failure. % % Communicating with the PNG encoder: % % While the datastream written is always in PNG format and normally would % be given the "png" file extension, this method also writes the following % pseudo-formats which are subsets of png: % % o PNG8: An 8-bit indexed PNG datastream is written. If the image has % a depth greater than 8, the depth is reduced. If transparency % is present, the tRNS chunk must only have values 0 and 255 % (i.e., transparency is binary: fully opaque or fully % transparent). If other values are present they will be % 50%-thresholded to binary transparency. If more than 256 % colors are present, they will be quantized to the 4-4-4-1, % 3-3-3-1, or 3-3-2-1 palette. The underlying RGB color % of any resulting fully-transparent pixels is changed to % the image's background color. % % If you want better quantization or dithering of the colors % or alpha than that, you need to do it before calling the % PNG encoder. The pixels contain 8-bit indices even if % they could be represented with 1, 2, or 4 bits. Grayscale % images will be written as indexed PNG files even though the % PNG grayscale type might be slightly more efficient. Please % note that writing to the PNG8 format may result in loss % of color and alpha data. % % o PNG24: An 8-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. The only loss incurred % is reduction of sample depth to 8. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG32: An 8-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 255. The alpha % channel is present even if the image is fully opaque. % The only loss in data is the reduction of the sample depth % to 8. % % o PNG48: A 16-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG64: A 16-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 65535. The alpha % channel is present even if the image is fully opaque. % % o PNG00: A PNG that inherits its colortype and bit-depth from the input % image, if the input was a PNG, is written. If these values % cannot be found, or if the pixels have been changed in a way % that makes this impossible, then "PNG00" falls back to the % regular "PNG" format. % % o -define: For more precise control of the PNG output, you can use the % Image options "png:bit-depth" and "png:color-type". These % can be set from the commandline with "-define" and also % from the application programming interfaces. The options % are case-independent and are converted to lowercase before % being passed to this encoder. % % png:color-type can be 0, 2, 3, 4, or 6. % % When png:color-type is 0 (Grayscale), png:bit-depth can % be 1, 2, 4, 8, or 16. % % When png:color-type is 2 (RGB), png:bit-depth can % be 8 or 16. % % When png:color-type is 3 (Indexed), png:bit-depth can % be 1, 2, 4, or 8. This refers to the number of bits % used to store the index. The color samples always have % bit-depth 8 in indexed PNG files. % % When png:color-type is 4 (Gray-Matte) or 6 (RGB-Matte), % png:bit-depth can be 8 or 16. % % If the image cannot be written without loss with the % requested bit-depth and color-type, a PNG file will not % be written, a warning will be issued, and the encoder will % return MagickFalse. % % Since image encoders should not be responsible for the "heavy lifting", % the user should make sure that ImageMagick has already reduced the % image depth and number of colors and limit transparency to binary % transparency prior to attempting to write the image with depth, color, % or transparency limitations. % % To do: Enforce the previous paragraph. % % Note that another definition, "png:bit-depth-written" exists, but it % is not intended for external use. It is only used internally by the % PNG encoder to inform the JNG encoder of the depth of the alpha channel. % % As of version 6.6.6 the following optimizations are always done: % % o 32-bit depth is reduced to 16. % o 16-bit depth is reduced to 8 if all pixels contain samples whose % high byte and low byte are identical. % o Palette is sorted to remove unused entries and to put a % transparent color first, if BUILD_PNG_PALETTE is defined. % o Opaque matte channel is removed when writing an indexed PNG. % o Grayscale images are reduced to 1, 2, or 4 bit depth if % this can be done without loss and a larger bit depth N was not % requested via the "-define png:bit-depth=N" option. % o If matte channel is present but only one transparent color is % present, RGB+tRNS is written instead of RGBA % o Opaque matte channel is removed (or added, if color-type 4 or 6 % was requested when converting an opaque image). % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType excluding, logging, status; MngInfo *mng_info; const char *value; int source; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WritePNGImage()"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; mng_info->equal_backgrounds=MagickTrue; /* See if user has requested a specific PNG subformat */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; mng_info->write_png48=LocaleCompare(image_info->magick,"PNG48") == 0; mng_info->write_png64=LocaleCompare(image_info->magick,"PNG64") == 0; value=GetImageOption(image_info,"png:format"); if (value != (char *) NULL || LocaleCompare(image_info->magick,"PNG00") == 0) { mng_info->write_png8 = MagickFalse; mng_info->write_png24 = MagickFalse; mng_info->write_png32 = MagickFalse; mng_info->write_png48 = MagickFalse; mng_info->write_png64 = MagickFalse; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format=%s",value); if (LocaleCompare(value,"png8") == 0) mng_info->write_png8 = MagickTrue; else if (LocaleCompare(value,"png24") == 0) mng_info->write_png24 = MagickTrue; else if (LocaleCompare(value,"png32") == 0) mng_info->write_png32 = MagickTrue; else if (LocaleCompare(value,"png48") == 0) mng_info->write_png48 = MagickTrue; else if (LocaleCompare(value,"png64") == 0) mng_info->write_png64 = MagickTrue; else if ((LocaleCompare(value,"png00") == 0) || LocaleCompare(image_info->magick,"PNG00") == 0) { /* Retrieve png:IHDR.bit-depth-orig and png:IHDR.color-type-orig */ value=GetImageProperty(image,"png:IHDR.bit-depth-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited bit depth=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; } value=GetImageProperty(image,"png:IHDR.color-type-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited color type=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; } } } if (mng_info->write_png8) { mng_info->write_png_colortype = /* 3 */ 4; mng_info->write_png_depth = 8; image->depth = 8; } if (mng_info->write_png24) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 8; image->depth = 8; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png32) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 8; image->depth = 8; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } if (mng_info->write_png48) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 16; image->depth = 16; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png64) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 16; image->depth = 16; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } value=GetImageOption(image_info,"png:bit-depth"); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:bit-depth", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:bit-depth=%d was defined.\n",mng_info->write_png_depth); } value=GetImageOption(image_info,"png:color-type"); if (value != (char *) NULL) { /* We must store colortype+1 because 0 is a valid colortype */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:color-type", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:color-type=%d was defined.\n",mng_info->write_png_colortype-1); } /* Check for chunks to be excluded: * * The default is to not exclude any known chunks except for any * listed in the "unused_chunks" array, above. * * Chunks can be listed for exclusion via a "png:exclude-chunk" * define (in the image properties or in the image artifacts) * or via a mng_info member. For convenience, in addition * to or instead of a comma-separated list of chunks, the * "exclude-chunk" string can be simply "all" or "none". * * The exclude-chunk define takes priority over the mng_info. * * A "png:include-chunk" define takes priority over both the * mng_info and the "png:exclude-chunk" define. Like the * "exclude-chunk" string, it can define "all" or "none" as * well as a comma-separated list. Chunks that are unknown to * ImageMagick are always excluded, regardless of their "copy-safe" * status according to the PNG specification, and even if they * appear in the "include-chunk" list. Such defines appearing among * the image options take priority over those found among the image * artifacts. * * Finally, all chunks listed in the "unused_chunks" array are * automatically excluded, regardless of the other instructions * or lack thereof. * * if you exclude sRGB but not gAMA (recommended), then sRGB chunk * will not be written and the gAMA chunk will only be written if it * is not between .45 and .46, or approximately (1.0/2.2). * * If you exclude tRNS and the image has transparency, the colortype * is forced to be 4 or 6 (GRAY_ALPHA or RGB_ALPHA). * * The -strip option causes StripImage() to set the png:include-chunk * artifact to "none,trns,gama". */ mng_info->ping_exclude_bKGD=MagickFalse; mng_info->ping_exclude_caNv=MagickFalse; mng_info->ping_exclude_cHRM=MagickFalse; mng_info->ping_exclude_date=MagickFalse; mng_info->ping_exclude_eXIf=MagickFalse; mng_info->ping_exclude_EXIF=MagickFalse; /* hex-encoded EXIF in zTXt */ mng_info->ping_exclude_gAMA=MagickFalse; mng_info->ping_exclude_iCCP=MagickFalse; /* mng_info->ping_exclude_iTXt=MagickFalse; */ mng_info->ping_exclude_oFFs=MagickFalse; mng_info->ping_exclude_pHYs=MagickFalse; mng_info->ping_exclude_sRGB=MagickFalse; mng_info->ping_exclude_tEXt=MagickFalse; mng_info->ping_exclude_tIME=MagickFalse; mng_info->ping_exclude_tRNS=MagickFalse; mng_info->ping_exclude_zCCP=MagickFalse; /* hex-encoded iCCP in zTXt */ mng_info->ping_exclude_zTXt=MagickFalse; mng_info->ping_preserve_colormap=MagickFalse; value=GetImageOption(image_info,"png:preserve-colormap"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-colormap"); if (value != NULL) mng_info->ping_preserve_colormap=MagickTrue; mng_info->ping_preserve_iCCP=MagickFalse; value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) mng_info->ping_preserve_iCCP=MagickTrue; /* These compression-level, compression-strategy, and compression-filter * defines take precedence over values from the -quality option. */ value=GetImageOption(image_info,"png:compression-level"); if (value == NULL) value=GetImageArtifact(image,"png:compression-level"); if (value != NULL) { /* To do: use a "LocaleInteger:()" function here. */ /* We have to add 1 to everything because 0 is a valid input, * and we want to use 0 (the default) to mean undefined. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_level = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_level = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_level = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_level = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_level = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_level = 6; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_compression_level = 7; else if (LocaleCompare(value,"7") == 0) mng_info->write_png_compression_level = 8; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_compression_level = 9; else if (LocaleCompare(value,"9") == 0) mng_info->write_png_compression_level = 10; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-level", "=%s",value); } value=GetImageOption(image_info,"png:compression-strategy"); if (value == NULL) value=GetImageArtifact(image,"png:compression-strategy"); if (value != NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_strategy = Z_FILTERED+1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; else if (LocaleCompare(value,"3") == 0) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy = Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else if (LocaleCompare(value,"4") == 0) #ifdef Z_FIXED /* Z_FIXED was added to zlib-1.2.2.2 */ mng_info->write_png_compression_strategy = Z_FIXED+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-strategy", "=%s",value); } value=GetImageOption(image_info,"png:compression-filter"); if (value == NULL) value=GetImageArtifact(image,"png:compression-filter"); if (value != NULL) { /* To do: combinations of filters allowed by libpng * masks 0x08 through 0xf8 * * Implement this as a comma-separated list of 0,1,2,3,4,5 * where 5 is a special case meaning PNG_ALL_FILTERS. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_filter = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_filter = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_filter = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_filter = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_filter = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_filter = 6; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-filter", "=%s",value); } for (source=0; source<8; source++) { value = NULL; if (source == 0) value=GetImageOption(image_info,"png:exclude-chunks"); if (source == 1) value=GetImageArtifact(image,"png:exclude-chunks"); if (source == 2) value=GetImageOption(image_info,"png:exclude-chunk"); if (source == 3) value=GetImageArtifact(image,"png:exclude-chunk"); if (source == 4) value=GetImageOption(image_info,"png:include-chunks"); if (source == 5) value=GetImageArtifact(image,"png:include-chunks"); if (source == 6) value=GetImageOption(image_info,"png:include-chunk"); if (source == 7) value=GetImageArtifact(image,"png:include-chunk"); if (value == NULL) continue; if (source < 4) excluding = MagickTrue; else excluding = MagickFalse; if (logging != MagickFalse) { if (source == 0 || source == 2) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image options.\n", value); else if (source == 1 || source == 3) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image artifacts.\n", value); else if (source == 4 || source == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image options.\n", value); else /* if (source == 5 || source == 7) */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image artifacts.\n", value); } if (IsOptionMember("all",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding; mng_info->ping_exclude_caNv=excluding; mng_info->ping_exclude_cHRM=excluding; mng_info->ping_exclude_date=excluding; mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; mng_info->ping_exclude_gAMA=excluding; mng_info->ping_exclude_iCCP=excluding; /* mng_info->ping_exclude_iTXt=excluding; */ mng_info->ping_exclude_oFFs=excluding; mng_info->ping_exclude_pHYs=excluding; mng_info->ping_exclude_sRGB=excluding; mng_info->ping_exclude_tIME=excluding; mng_info->ping_exclude_tEXt=excluding; mng_info->ping_exclude_tRNS=excluding; mng_info->ping_exclude_zCCP=excluding; mng_info->ping_exclude_zTXt=excluding; } if (IsOptionMember("none",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_caNv=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_cHRM=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_date=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_eXIf=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_EXIF=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_gAMA=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_iCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; /* mng_info->ping_exclude_iTXt=!excluding; */ mng_info->ping_exclude_oFFs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_pHYs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_sRGB=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tEXt=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tIME=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tRNS=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zTXt=excluding != MagickFalse ? MagickFalse : MagickTrue; } if (IsOptionMember("bkgd",value) != MagickFalse) mng_info->ping_exclude_bKGD=excluding; if (IsOptionMember("caNv",value) != MagickFalse) mng_info->ping_exclude_caNv=excluding; if (IsOptionMember("chrm",value) != MagickFalse) mng_info->ping_exclude_cHRM=excluding; if (IsOptionMember("date",value) != MagickFalse) mng_info->ping_exclude_date=excluding; if (IsOptionMember("exif",value) != MagickFalse) { mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; } if (IsOptionMember("gama",value) != MagickFalse) mng_info->ping_exclude_gAMA=excluding; if (IsOptionMember("iccp",value) != MagickFalse) mng_info->ping_exclude_iCCP=excluding; #if 0 if (IsOptionMember("itxt",value) != MagickFalse) mng_info->ping_exclude_iTXt=excluding; #endif if (IsOptionMember("offs",value) != MagickFalse) mng_info->ping_exclude_oFFs=excluding; if (IsOptionMember("phys",value) != MagickFalse) mng_info->ping_exclude_pHYs=excluding; if (IsOptionMember("srgb",value) != MagickFalse) mng_info->ping_exclude_sRGB=excluding; if (IsOptionMember("text",value) != MagickFalse) mng_info->ping_exclude_tEXt=excluding; if (IsOptionMember("time",value) != MagickFalse) mng_info->ping_exclude_tIME=excluding; if (IsOptionMember("trns",value) != MagickFalse) mng_info->ping_exclude_tRNS=excluding; if (IsOptionMember("zccp",value) != MagickFalse) mng_info->ping_exclude_zCCP=excluding; if (IsOptionMember("ztxt",value) != MagickFalse) mng_info->ping_exclude_zTXt=excluding; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Chunks to be excluded from the output png:"); if (mng_info->ping_exclude_bKGD != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " bKGD"); if (mng_info->ping_exclude_caNv != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " caNv"); if (mng_info->ping_exclude_cHRM != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " cHRM"); if (mng_info->ping_exclude_date != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " date"); if (mng_info->ping_exclude_EXIF != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " EXIF"); if (mng_info->ping_exclude_eXIf != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " eXIf"); if (mng_info->ping_exclude_gAMA != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " gAMA"); if (mng_info->ping_exclude_iCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iCCP"); #if 0 if (mng_info->ping_exclude_iTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iTXt"); #endif if (mng_info->ping_exclude_oFFs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " oFFs"); if (mng_info->ping_exclude_pHYs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pHYs"); if (mng_info->ping_exclude_sRGB != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " sRGB"); if (mng_info->ping_exclude_tEXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tEXt"); if (mng_info->ping_exclude_tIME != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tIME"); if (mng_info->ping_exclude_tRNS != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS"); if (mng_info->ping_exclude_zCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zCCP"); if (mng_info->ping_exclude_zTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zTXt"); } mng_info->need_blob = MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WritePNGImage()"); return(status); } #if defined(JNG_SUPPORTED) /* Write one JNG image */ static MagickBooleanType WriteOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { Image *jpeg_image; ImageInfo *jpeg_image_info; int unique_filenames; MagickBooleanType logging, status; size_t length; unsigned char *blob, chunk[80], *p; unsigned int jng_alpha_compression_method, jng_alpha_sample_depth, jng_color_type, transparent; size_t jng_alpha_quality, jng_quality; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOneJNGImage()"); blob=(unsigned char *) NULL; jpeg_image=(Image *) NULL; jpeg_image_info=(ImageInfo *) NULL; length=0; unique_filenames=0; status=MagickTrue; transparent=image_info->type==GrayscaleMatteType || image_info->type==TrueColorMatteType || image->matte != MagickFalse; jng_alpha_sample_depth = 0; jng_quality=image_info->quality == 0UL ? 75UL : image_info->quality%1000; jng_alpha_compression_method=image->compression==JPEGCompression? 8 : 0; jng_alpha_quality=image_info->quality == 0UL ? 75UL : image_info->quality; if (jng_alpha_quality >= 1000) jng_alpha_quality /= 1000; if (transparent != 0) { jng_color_type=14; /* Create JPEG blob, image, and image_info */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info for opacity."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) { jpeg_image_info=DestroyImageInfo(jpeg_image_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) { jpeg_image_info=DestroyImageInfo(jpeg_image_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); status=SeparateImageChannel(jpeg_image,OpacityChannel); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); status=NegateImage(jpeg_image,MagickFalse); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_image->matte=MagickFalse; jpeg_image_info->type=GrayscaleType; jpeg_image->quality=jng_alpha_quality; jpeg_image_info->type=GrayscaleType; (void) SetImageType(jpeg_image,GrayscaleType); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent, "%s",jpeg_image->filename); } else { jng_alpha_compression_method=0; jng_color_type=10; jng_alpha_sample_depth=0; } /* To do: check bit depth of PNG alpha channel */ /* Check if image is grayscale. */ if (image_info->type != TrueColorMatteType && image_info->type != TrueColorType && SetImageGray(image,&image->exception)) jng_color_type-=2; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Quality = %d",(int) jng_quality); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Color Type = %d",jng_color_type); if (transparent != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Compression = %d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Depth = %d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Quality = %d",(int) jng_alpha_quality); } } if (transparent != 0) { if (jng_alpha_compression_method==0) { const char *value; /* Encode opacity as a grayscale PNG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating PNG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); length=0; (void) CopyMagickString(jpeg_image_info->magick,"PNG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"PNG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; /* Exclude all ancillary chunks */ (void) SetImageArtifact(jpeg_image,"png:exclude-chunks","all"); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); /* Retrieve sample depth used */ value=GetImageProperty(jpeg_image,"png:bit-depth-written"); if (value != (char *) NULL) jng_alpha_sample_depth= (unsigned int) value[0]; } else { /* Encode opacity as a grayscale JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating JPEG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); jng_alpha_sample_depth=8; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); } /* Destroy JPEG image and image_info */ jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); } /* Write JHDR chunk */ (void) WriteBlobMSBULong(image,16L); /* chunk data length=16 */ PNGType(chunk,mng_JHDR); LogPNGChunk(logging,mng_JHDR,16L); PNGLong(chunk+4,(png_uint_32) image->columns); PNGLong(chunk+8,(png_uint_32) image->rows); chunk[12]=jng_color_type; chunk[13]=8; /* sample depth */ chunk[14]=8; /*jng_image_compression_method */ chunk[15]=(unsigned char) (image_info->interlace == NoInterlace ? 0 : 8); chunk[16]=jng_alpha_sample_depth; chunk[17]=jng_alpha_compression_method; chunk[18]=0; /*jng_alpha_filter_method */ chunk[19]=0; /*jng_alpha_interlace_method */ (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG width:%15lu",(unsigned long) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG height:%14lu",(unsigned long) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG color type:%10d",jng_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG sample depth:%8d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG compression:%9d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG interlace:%11d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha depth:%9d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha compression:%3d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha filter:%8d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha interlace:%5d",0); } /* Write leading ancillary chunks */ if (transparent != 0) { /* Write JNG bKGD chunk */ unsigned char blue, green, red; ssize_t num_bytes; if (jng_color_type == 8 || jng_color_type == 12) num_bytes=6L; else num_bytes=10L; (void) WriteBlobMSBULong(image,(size_t) (num_bytes-4L)); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,(size_t) (num_bytes-4L)); red=ScaleQuantumToChar(image->background_color.red); green=ScaleQuantumToChar(image->background_color.green); blue=ScaleQuantumToChar(image->background_color.blue); *(chunk+4)=0; *(chunk+5)=red; *(chunk+6)=0; *(chunk+7)=green; *(chunk+8)=0; *(chunk+9)=blue; (void) WriteBlob(image,(size_t) num_bytes,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) num_bytes)); } if ((image->colorspace == sRGBColorspace || image->rendering_intent)) { /* Write JNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { if (image->gamma != 0.0) { /* Write JNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); } if ((mng_info->equal_chrms == MagickFalse) && (image->chromaticity.red_primary.x != 0.0)) { PrimaryInfo primary; /* Write JNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); } } if (image->x_resolution && image->y_resolution && !mng_info->equal_physs) { /* Write JNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (mng_info->write_mng == 0 && (image->page.x || image->page.y)) { /* Write JNG oFFs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_oFFs); LogPNGChunk(logging,mng_oFFs,9L); PNGsLong(chunk+4,(ssize_t) (image->page.x)); PNGsLong(chunk+8,(ssize_t) (image->page.y)); chunk[12]=0; (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (transparent != 0) { if (jng_alpha_compression_method==0) { register ssize_t i; size_t len; /* Write IDAT chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write IDAT chunks from blob, length=%.20g.",(double) length); /* Copy IDAT chunks */ len=0; p=blob+8; for (i=8; i<(ssize_t) length; i+=len+12) { len=(((unsigned int) *(p ) & 0xff) << 24) + (((unsigned int) *(p + 1) & 0xff) << 16) + (((unsigned int) *(p + 2) & 0xff) << 8) + (((unsigned int) *(p + 3) & 0xff) ) ; p+=4; if (*(p)==73 && *(p+1)==68 && *(p+2)==65 && *(p+3)==84) /* IDAT */ { /* Found an IDAT chunk. */ (void) WriteBlobMSBULong(image,len); LogPNGChunk(logging,mng_IDAT,len); (void) WriteBlob(image,len+4,p); (void) WriteBlobMSBULong(image,crc32(0,p,(uInt) len+4)); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping %c%c%c%c chunk, length=%.20g.", *(p),*(p+1),*(p+2),*(p+3),(double) len); } p+=(8+len); } } else if (length != 0) { /* Write JDAA chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAA chunk, length=%.20g.",(double) length); (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAA); LogPNGChunk(logging,mng_JDAA,length); /* Write JDAT chunk(s) data */ (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob, (uInt) length)); } blob=(unsigned char *) RelinquishMagickMemory(blob); } /* Encode image as a JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent,"%s", jpeg_image->filename); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Created jpeg_image, %.20g x %.20g.",(double) jpeg_image->columns, (double) jpeg_image->rows); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (jng_color_type == 8 || jng_color_type == 12) jpeg_image_info->type=GrayscaleType; jpeg_image_info->quality=jng_quality; jpeg_image->quality=jng_quality; (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating blob."); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length,&image->exception); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAT chunk, length=%.20g.",(double) length); } /* Write JDAT chunk(s) */ (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAT); LogPNGChunk(logging,mng_JDAT,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob,(uInt) length)); jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); blob=(unsigned char *) RelinquishMagickMemory(blob); /* Write IEND chunk */ (void) WriteBlobMSBULong(image,0L); PNGType(chunk,mng_IEND); LogPNGChunk(logging,mng_IEND,0); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOneJNGImage(); unique_filenames=%d",unique_filenames); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJNGImage() writes a JPEG Network Graphics (JNG) image file. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteJNGImage method is: % % MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteJNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); if ((image->columns > 65535UL) || (image->rows > 65535UL)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; (void) WriteBlob(image,8,(const unsigned char *) "\213JNG\r\n\032\n"); status=WriteOneJNGImage(mng_info,image_info,image); mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); (void) CatchImageException(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteJNGImage()"); return(status); } #endif static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { const char *option; Image *next_image; MagickBooleanType status; volatile MagickBooleanType logging; MngInfo *mng_info; int image_count, need_iterations, need_matte; volatile int #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) need_local_plte, #endif all_images_are_gray, need_defi, use_global_plte; register ssize_t i; unsigned char chunk[800]; volatile unsigned int write_jng, write_mng; volatile size_t scene; size_t final_delay=0, initial_delay, imageListLength; #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteMNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; write_mng=LocaleCompare(image_info->magick,"MNG") == 0; /* * See if user has requested a specific PNG subformat to be used * for all of the PNGs in the MNG being written, e.g., * * convert *.png png8:animation.mng * * To do: check -define png:bit_depth and png:color_type as well, * or perhaps use mng:bit_depth and mng:color_type instead for * global settings. */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; write_jng=MagickFalse; if (image_info->compression == JPEGCompression) write_jng=MagickTrue; mng_info->adjoin=image_info->adjoin && (GetNextImageInList(image) != (Image *) NULL) && write_mng; if (logging != MagickFalse) { /* Log some info about the input */ Image *p; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Checking input image(s)\n" " Image_info depth: %.20g, Type: %d", (double) image_info->depth, image_info->type); scene=0; for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scene: %.20g\n, Image depth: %.20g", (double) scene++, (double) p->depth); if (p->matte) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: True"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: False"); if (p->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: DirectClass"); if (p->colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) p->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: unspecified"); if (mng_info->adjoin == MagickFalse) break; } } use_global_plte=MagickFalse; all_images_are_gray=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_defi=MagickFalse; need_matte=MagickFalse; mng_info->framing_mode=1; mng_info->old_framing_mode=1; if (write_mng) if (image_info->page != (char *) NULL) { /* Determine image bounding box. */ SetGeometry(image,&mng_info->page); (void) ParseMetaGeometry(image_info->page,&mng_info->page.x, &mng_info->page.y,&mng_info->page.width,&mng_info->page.height); } if (write_mng) { unsigned int need_geom; unsigned short red, green, blue; mng_info->page=image->page; need_geom=MagickTrue; if (mng_info->page.width || mng_info->page.height) need_geom=MagickFalse; /* Check all the scenes. */ initial_delay=image->delay; need_iterations=MagickFalse; mng_info->equal_chrms=image->chromaticity.red_primary.x != 0.0; mng_info->equal_physs=MagickTrue, mng_info->equal_gammas=MagickTrue; mng_info->equal_srgbs=MagickTrue; mng_info->equal_backgrounds=MagickTrue; image_count=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) all_images_are_gray=MagickTrue; mng_info->equal_palettes=MagickFalse; need_local_plte=MagickFalse; #endif for (next_image=image; next_image != (Image *) NULL; ) { if (need_geom) { if ((next_image->columns+next_image->page.x) > mng_info->page.width) mng_info->page.width=next_image->columns+next_image->page.x; if ((next_image->rows+next_image->page.y) > mng_info->page.height) mng_info->page.height=next_image->rows+next_image->page.y; } if (next_image->page.x || next_image->page.y) need_defi=MagickTrue; if (next_image->matte) need_matte=MagickTrue; if ((int) next_image->dispose >= BackgroundDispose) if (next_image->matte || next_image->page.x || next_image->page.y || ((next_image->columns < mng_info->page.width) && (next_image->rows < mng_info->page.height))) mng_info->need_fram=MagickTrue; if (next_image->iterations) need_iterations=MagickTrue; final_delay=next_image->delay; if (final_delay != initial_delay || final_delay > 1UL* next_image->ticks_per_second) mng_info->need_fram=1; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* check for global palette possibility. */ if (image->matte != MagickFalse) need_local_plte=MagickTrue; if (need_local_plte == 0) { if (SetImageGray(image,&image->exception) == MagickFalse) all_images_are_gray=MagickFalse; mng_info->equal_palettes=PalettesAreEqual(image,next_image); if (use_global_plte == 0) use_global_plte=mng_info->equal_palettes; need_local_plte=!mng_info->equal_palettes; } #endif if (GetNextImageInList(next_image) != (Image *) NULL) { if (next_image->background_color.red != next_image->next->background_color.red || next_image->background_color.green != next_image->next->background_color.green || next_image->background_color.blue != next_image->next->background_color.blue) mng_info->equal_backgrounds=MagickFalse; if (next_image->gamma != next_image->next->gamma) mng_info->equal_gammas=MagickFalse; if (next_image->rendering_intent != next_image->next->rendering_intent) mng_info->equal_srgbs=MagickFalse; if ((next_image->units != next_image->next->units) || (next_image->x_resolution != next_image->next->x_resolution) || (next_image->y_resolution != next_image->next->y_resolution)) mng_info->equal_physs=MagickFalse; if (mng_info->equal_chrms) { if (next_image->chromaticity.red_primary.x != next_image->next->chromaticity.red_primary.x || next_image->chromaticity.red_primary.y != next_image->next->chromaticity.red_primary.y || next_image->chromaticity.green_primary.x != next_image->next->chromaticity.green_primary.x || next_image->chromaticity.green_primary.y != next_image->next->chromaticity.green_primary.y || next_image->chromaticity.blue_primary.x != next_image->next->chromaticity.blue_primary.x || next_image->chromaticity.blue_primary.y != next_image->next->chromaticity.blue_primary.y || next_image->chromaticity.white_point.x != next_image->next->chromaticity.white_point.x || next_image->chromaticity.white_point.y != next_image->next->chromaticity.white_point.y) mng_info->equal_chrms=MagickFalse; } } image_count++; next_image=GetNextImageInList(next_image); } if (image_count < 2) { mng_info->equal_backgrounds=MagickFalse; mng_info->equal_chrms=MagickFalse; mng_info->equal_gammas=MagickFalse; mng_info->equal_srgbs=MagickFalse; mng_info->equal_physs=MagickFalse; use_global_plte=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_iterations=MagickFalse; } if (mng_info->need_fram == MagickFalse) { /* Only certain framing rates 100/n are exactly representable without the FRAM chunk but we'll allow some slop in VLC files */ if (final_delay == 0) { if (need_iterations != MagickFalse) { /* It's probably a GIF with loop; don't run it *too* fast. */ if (mng_info->adjoin) { final_delay=10; (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "input has zero delay between all frames; assuming", " 10 cs `%s'",""); } } else mng_info->ticks_per_second=0; } if (final_delay != 0) mng_info->ticks_per_second=(png_uint_32) (image->ticks_per_second/final_delay); if (final_delay > 50) mng_info->ticks_per_second=2; if (final_delay > 75) mng_info->ticks_per_second=1; if (final_delay > 125) mng_info->need_fram=MagickTrue; if (need_defi && final_delay > 2 && (final_delay != 4) && (final_delay != 5) && (final_delay != 10) && (final_delay != 20) && (final_delay != 25) && (final_delay != 50) && (final_delay != (size_t) image->ticks_per_second)) mng_info->need_fram=MagickTrue; /* make it exact; cannot be VLC */ } if (mng_info->need_fram != MagickFalse) mng_info->ticks_per_second=1UL*image->ticks_per_second; /* If pseudocolor, we should also check to see if all the palettes are identical and write a global PLTE if they are. ../glennrp Feb 99. */ /* Write the MNG version 1.0 signature and MHDR chunk. */ (void) WriteBlob(image,8,(const unsigned char *) "\212MNG\r\n\032\n"); (void) WriteBlobMSBULong(image,28L); /* chunk data length=28 */ PNGType(chunk,mng_MHDR); LogPNGChunk(logging,mng_MHDR,28L); PNGLong(chunk+4,(png_uint_32) mng_info->page.width); PNGLong(chunk+8,(png_uint_32) mng_info->page.height); PNGLong(chunk+12,mng_info->ticks_per_second); PNGLong(chunk+16,0L); /* layer count=unknown */ PNGLong(chunk+20,0L); /* frame count=unknown */ PNGLong(chunk+24,0L); /* play time=unknown */ if (write_jng) { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,27L); /* simplicity=LC+JNG */ else PNGLong(chunk+28,25L); /* simplicity=VLC+JNG */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,19L); /* simplicity=LC+JNG, no transparency */ else PNGLong(chunk+28,17L); /* simplicity=VLC+JNG, no transparency */ } } else { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,11L); /* simplicity=LC */ else PNGLong(chunk+28,9L); /* simplicity=VLC */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,3L); /* simplicity=LC, no transparency */ else PNGLong(chunk+28,1L); /* simplicity=VLC, no transparency */ } } (void) WriteBlob(image,32,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,32)); option=GetImageOption(image_info,"mng:need-cacheoff"); if (option != (const char *) NULL) { size_t length; /* Write "nEED CACHEOFF" to turn playback caching off for streaming MNG. */ PNGType(chunk,mng_nEED); length=CopyMagickString((char *) chunk+4,"CACHEOFF",20); (void) WriteBlobMSBULong(image,(size_t) length); LogPNGChunk(logging,mng_nEED,(size_t) length); length+=4; (void) WriteBlob(image,length,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) length)); } if ((GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) != (Image *) NULL) && (image->iterations != 1)) { /* Write MNG TERM chunk */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_TERM); LogPNGChunk(logging,mng_TERM,10L); chunk[4]=3; /* repeat animation */ chunk[5]=0; /* show last frame when done */ PNGLong(chunk+6,(png_uint_32) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) PNGLong(chunk+10,PNG_UINT_31_MAX); else PNGLong(chunk+10,(png_uint_32) image->iterations); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM delay: %.20g",(double) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM iterations: %.20g",(double) PNG_UINT_31_MAX); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image iterations: %.20g",(double) image->iterations); } (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); } /* To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if ((image->colorspace == sRGBColorspace || image->rendering_intent) && mng_info->equal_srgbs) { /* Write MNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); mng_info->have_write_global_srgb=MagickTrue; } else { if (image->gamma && mng_info->equal_gammas) { /* Write MNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); mng_info->have_write_global_gama=MagickTrue; } if (mng_info->equal_chrms) { PrimaryInfo primary; /* Write MNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); mng_info->have_write_global_chrm=MagickTrue; } } if (image->x_resolution && image->y_resolution && mng_info->equal_physs) { /* Write MNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } /* Write MNG BACK chunk and global bKGD chunk, if the image is transparent or does not cover the entire frame. */ if (write_mng && (image->matte || image->page.x > 0 || image->page.y > 0 || (image->page.width && (image->page.width+image->page.x < mng_info->page.width)) || (image->page.height && (image->page.height+image->page.y < mng_info->page.height)))) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_BACK); LogPNGChunk(logging,mng_BACK,6L); red=ScaleQuantumToShort(image->background_color.red); green=ScaleQuantumToShort(image->background_color.green); blue=ScaleQuantumToShort(image->background_color.blue); PNGShort(chunk+4,red); PNGShort(chunk+6,green); PNGShort(chunk+8,blue); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); if (mng_info->equal_backgrounds) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,6L); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); } } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if ((need_local_plte == MagickFalse) && (image->storage_class == PseudoClass) && (all_images_are_gray == MagickFalse)) { size_t data_length; /* Write MNG PLTE chunk */ data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red) & 0xff; chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green) & 0xff; chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue) & 0xff; } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } #endif } scene=0; mng_info->delay=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) mng_info->equal_palettes=MagickFalse; #endif imageListLength=GetImageListLength(image); do { if (mng_info->adjoin) { #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* If we aren't using a global palette for the entire MNG, check to see if we can use one for two or more consecutive images. */ if (need_local_plte && use_global_plte && !all_images_are_gray) { if (mng_info->IsPalette) { /* When equal_palettes is true, this image has the same palette as the previous PseudoClass image */ mng_info->have_write_global_plte=mng_info->equal_palettes; mng_info->equal_palettes=PalettesAreEqual(image,image->next); if (mng_info->equal_palettes && !mng_info->have_write_global_plte) { /* Write MNG PLTE chunk */ size_t data_length; data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red); chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green); chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue); } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk, (uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } } else mng_info->have_write_global_plte=MagickFalse; } #endif if (need_defi) { ssize_t previous_x, previous_y; if (scene != 0) { previous_x=mng_info->page.x; previous_y=mng_info->page.y; } else { previous_x=0; previous_y=0; } mng_info->page=image->page; if ((mng_info->page.x != previous_x) || (mng_info->page.y != previous_y)) { (void) WriteBlobMSBULong(image,12L); /* data length=12 */ PNGType(chunk,mng_DEFI); LogPNGChunk(logging,mng_DEFI,12L); chunk[4]=0; /* object 0 MSB */ chunk[5]=0; /* object 0 LSB */ chunk[6]=0; /* visible */ chunk[7]=0; /* abstract */ PNGLong(chunk+8,(png_uint_32) mng_info->page.x); PNGLong(chunk+12,(png_uint_32) mng_info->page.y); (void) WriteBlob(image,16,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,16)); } } } mng_info->write_mng=write_mng; if ((int) image->dispose >= 3) mng_info->framing_mode=3; if (mng_info->need_fram && mng_info->adjoin && ((image->delay != mng_info->delay) || (mng_info->framing_mode != mng_info->old_framing_mode))) { if (image->delay == mng_info->delay) { /* Write a MNG FRAM chunk with the new framing mode. */ (void) WriteBlobMSBULong(image,1L); /* data length=1 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,1L); chunk[4]=(unsigned char) mng_info->framing_mode; (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { /* Write a MNG FRAM chunk with the delay. */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,10L); chunk[4]=(unsigned char) mng_info->framing_mode; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=2; /* flag for changing default delay */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=0; /* flag for changing frame clipping */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) ((mng_info->ticks_per_second* image->delay)/MagickMax(image->ticks_per_second,1))); (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); mng_info->delay=(png_uint_32) image->delay; } mng_info->old_framing_mode=mng_info->framing_mode; } #if defined(JNG_SUPPORTED) if (image_info->compression == JPEGCompression) { ImageInfo *write_info; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing JNG object."); /* To do: specify the desired alpha compression method. */ write_info=CloneImageInfo(image_info); write_info->compression=UndefinedCompression; status=WriteOneJNGImage(mng_info,write_info,image); write_info=DestroyImageInfo(write_info); } else #endif { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG object."); mng_info->need_blob = MagickFalse; mng_info->ping_preserve_colormap = MagickFalse; /* We don't want any ancillary chunks written */ mng_info->ping_exclude_bKGD=MagickTrue; mng_info->ping_exclude_caNv=MagickTrue; mng_info->ping_exclude_cHRM=MagickTrue; mng_info->ping_exclude_date=MagickTrue; mng_info->ping_exclude_EXIF=MagickTrue; mng_info->ping_exclude_eXIf=MagickTrue; mng_info->ping_exclude_gAMA=MagickTrue; mng_info->ping_exclude_iCCP=MagickTrue; /* mng_info->ping_exclude_iTXt=MagickTrue; */ mng_info->ping_exclude_oFFs=MagickTrue; mng_info->ping_exclude_pHYs=MagickTrue; mng_info->ping_exclude_sRGB=MagickTrue; mng_info->ping_exclude_tEXt=MagickTrue; mng_info->ping_exclude_tRNS=MagickTrue; mng_info->ping_exclude_zCCP=MagickTrue; mng_info->ping_exclude_zTXt=MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); } if (status == MagickFalse) { mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); return(MagickFalse); } (void) CatchImageException(image); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (mng_info->adjoin); if (write_mng) { while (GetPreviousImageInList(image) != (Image *) NULL) image=GetPreviousImageInList(image); /* Write the MEND chunk. */ (void) WriteBlobMSBULong(image,0x00000000L); PNGType(chunk,mng_MEND); LogPNGChunk(logging,mng_MEND,0L); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); } /* Relinquish resources. */ (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WriteMNGImage()"); return(MagickTrue); } #else /* PNG_LIBPNG_VER > 10011 */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { (void) image; printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); ThrowBinaryException(CoderError,"PNG library is too old", image_info->filename); } static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { return(WritePNGImage(image_info,image)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif
./CrossVul/dataset_final_sorted/CWE-617/c/bad_370_0
crossvul-cpp_data_good_1770_0
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "trace.h" #include <stdio.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <sys/stat.h> #include <fcntl.h> #include <time.h> #include <libgen.h> #include <assert.h> #include <sys/utsname.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/socket.h> #include <sys/select.h> #include <sys/time.h> #include <sys/ioctl.h> #include <arpa/inet.h> #include <netinet/if_ether.h> #include <pwd.h> #include <grp.h> static void usage(void); static struct protocol protos[] = { { LLDPD_MODE_LLDP, 1, "LLDP", 'l', lldp_send, lldp_decode, NULL, LLDP_MULTICAST_ADDR }, #ifdef ENABLE_CDP { LLDPD_MODE_CDPV1, 0, "CDPv1", 'c', cdpv1_send, cdp_decode, cdpv1_guess, CDP_MULTICAST_ADDR }, { LLDPD_MODE_CDPV2, 0, "CDPv2", 'c', cdpv2_send, cdp_decode, cdpv2_guess, CDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_SONMP { LLDPD_MODE_SONMP, 0, "SONMP", 's', sonmp_send, sonmp_decode, NULL, SONMP_MULTICAST_ADDR }, #endif #ifdef ENABLE_EDP { LLDPD_MODE_EDP, 0, "EDP", 'e', edp_send, edp_decode, NULL, EDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_FDP { LLDPD_MODE_FDP, 0, "FDP", 'f', fdp_send, cdp_decode, NULL, FDP_MULTICAST_ADDR }, #endif { 0, 0, "any", ' ', NULL, NULL, NULL, {0,0,0,0,0,0} } }; static char **saved_argv; #ifdef HAVE___PROGNAME extern const char *__progname; #else # define __progname "lldpd" #endif static void usage(void) { fprintf(stderr, "Usage: %s [OPTIONS ...]\n", __progname); fprintf(stderr, "Version: %s\n", PACKAGE_STRING); fprintf(stderr, "\n"); fprintf(stderr, "-d Do not daemonize.\n"); fprintf(stderr, "-r Receive-only mode\n"); fprintf(stderr, "-i Disable LLDP-MED inventory TLV transmission.\n"); fprintf(stderr, "-k Disable advertising of kernel release, version, machine.\n"); fprintf(stderr, "-S descr Override the default system description.\n"); fprintf(stderr, "-P name Override the default hardware platform.\n"); fprintf(stderr, "-m IP Specify the IPv4 management addresses of this system.\n"); fprintf(stderr, "-u file Specify the Unix-domain socket used for communication with lldpctl(8).\n"); fprintf(stderr, "-H mode Specify the behaviour when detecting multiple neighbors.\n"); fprintf(stderr, "-I iface Limit interfaces to use.\n"); #ifdef ENABLE_LLDPMED fprintf(stderr, "-M class Enable emission of LLDP-MED frame. 'class' should be one of:\n"); fprintf(stderr, " 1 Generic Endpoint (Class I)\n"); fprintf(stderr, " 2 Media Endpoint (Class II)\n"); fprintf(stderr, " 3 Communication Device Endpoints (Class III)\n"); fprintf(stderr, " 4 Network Connectivity Device\n"); #endif #ifdef USE_SNMP fprintf(stderr, "-x Enable SNMP subagent.\n"); #endif fprintf(stderr, "\n"); #if defined ENABLE_CDP || defined ENABLE_EDP || defined ENABLE_FDP || defined ENABLE_SONMP fprintf(stderr, "Additional protocol support.\n"); #ifdef ENABLE_CDP fprintf(stderr, "-c Enable the support of CDP protocol. (Cisco)\n"); #endif #ifdef ENABLE_EDP fprintf(stderr, "-e Enable the support of EDP protocol. (Extreme)\n"); #endif #ifdef ENABLE_FDP fprintf(stderr, "-f Enable the support of FDP protocol. (Foundry)\n"); #endif #ifdef ENABLE_SONMP fprintf(stderr, "-s Enable the support of SONMP protocol. (Nortel)\n"); #endif fprintf(stderr, "\n"); #endif fprintf(stderr, "see manual page lldpd(8) for more information\n"); exit(1); } struct lldpd_hardware * lldpd_get_hardware(struct lldpd *cfg, char *name, int index, struct lldpd_ops *ops) { struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if ((strcmp(hardware->h_ifname, name) == 0) && (hardware->h_ifindex == index) && ((!ops) || (ops == hardware->h_ops))) break; } return hardware; } struct lldpd_hardware * lldpd_alloc_hardware(struct lldpd *cfg, char *name, int index) { struct lldpd_hardware *hardware; log_debug("alloc", "allocate a new local port (%s)", name); if ((hardware = (struct lldpd_hardware *) calloc(1, sizeof(struct lldpd_hardware))) == NULL) return NULL; hardware->h_cfg = cfg; strlcpy(hardware->h_ifname, name, sizeof(hardware->h_ifname)); hardware->h_ifindex = index; hardware->h_lport.p_chassis = LOCAL_CHASSIS(cfg); hardware->h_lport.p_chassis->c_refcount++; TAILQ_INIT(&hardware->h_rports); #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_med_cap_available) { hardware->h_lport.p_med_cap_enabled = LLDP_MED_CAP_CAP; if (!cfg->g_config.c_noinventory) hardware->h_lport.p_med_cap_enabled |= LLDP_MED_CAP_IV; } #endif #ifdef ENABLE_DOT1 TAILQ_INIT(&hardware->h_lport.p_vlans); TAILQ_INIT(&hardware->h_lport.p_ppvids); TAILQ_INIT(&hardware->h_lport.p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&hardware->h_lport.p_custom_list); #endif levent_hardware_init(hardware); return hardware; } struct lldpd_mgmt * lldpd_alloc_mgmt(int family, void *addrptr, size_t addrsize, u_int32_t iface) { struct lldpd_mgmt *mgmt; log_debug("alloc", "allocate a new management address (family: %d)", family); if (family <= LLDPD_AF_UNSPEC || family >= LLDPD_AF_LAST) { errno = EAFNOSUPPORT; return NULL; } if (addrsize > LLDPD_MGMT_MAXADDRSIZE) { errno = EOVERFLOW; return NULL; } mgmt = calloc(1, sizeof(struct lldpd_mgmt)); if (mgmt == NULL) { errno = ENOMEM; return NULL; } mgmt->m_family = family; memcpy(&mgmt->m_addr, addrptr, addrsize); mgmt->m_addrsize = addrsize; mgmt->m_iface = iface; return mgmt; } void lldpd_hardware_cleanup(struct lldpd *cfg, struct lldpd_hardware *hardware) { log_debug("alloc", "cleanup hardware port %s", hardware->h_ifname); free(hardware->h_lport_previous); free(hardware->h_lchassis_previous_id); free(hardware->h_lport_previous_id); lldpd_port_cleanup(&hardware->h_lport, 1); if (hardware->h_ops && hardware->h_ops->cleanup) hardware->h_ops->cleanup(cfg, hardware); levent_hardware_release(hardware); free(hardware); } static void lldpd_display_neighbors(struct lldpd *cfg) { if (!cfg->g_config.c_set_ifdescr) return; struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { struct lldpd_port *port; char *description; const char *neighbor = NULL; unsigned neighbors = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (SMART_HIDDEN(port)) continue; neighbors++; neighbor = port->p_chassis->c_name; } if (neighbors == 0) priv_iface_description(hardware->h_ifname, ""); else if (neighbors == 1 && neighbor && *neighbor != '\0') { if (asprintf(&description, "%s", neighbor) != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } else { if (asprintf(&description, "%d neighbor%s", neighbors, (neighbors > 1)?"s":"") != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } } } static void lldpd_count_neighbors(struct lldpd *cfg) { #if HAVE_SETPROCTITLE struct lldpd_chassis *chassis; const char *neighbor; unsigned neighbors = 0; TAILQ_FOREACH(chassis, &cfg->g_chassis, c_entries) { neighbors++; neighbor = chassis->c_name; } neighbors--; if (neighbors == 0) setproctitle("no neighbor"); else if (neighbors == 1 && neighbor && *neighbor != '\0') setproctitle("connected to %s", neighbor); else setproctitle("%d neighbor%s", neighbors, (neighbors > 1)?"s":""); #endif lldpd_display_neighbors(cfg); } static void notify_clients_deletion(struct lldpd_hardware *hardware, struct lldpd_port *rport) { TRACE(LLDPD_NEIGHBOR_DELETE(hardware->h_ifname, rport->p_chassis->c_name, rport->p_descr)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_DELETED, rport); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_DELETED, rport); #endif } static void lldpd_reset_timer(struct lldpd *cfg) { /* Reset timer for ports that have been changed. */ struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { /* We keep a flat copy of the local port to see if there is any * change. To do this, we zero out fields that are not * significant, marshal the port, then restore. */ struct lldpd_port *port = &hardware->h_lport; u_int8_t *output = NULL; ssize_t output_len; char save[LLDPD_PORT_START_MARKER]; memcpy(save, port, sizeof(save)); /* coverity[suspicious_sizeof] We intentionally partially memset port */ memset(port, 0, sizeof(save)); output_len = lldpd_port_serialize(port, (void**)&output); memcpy(port, save, sizeof(save)); if (output_len == -1) { log_warnx("localchassis", "unable to serialize local port %s to check for differences", hardware->h_ifname); continue; } /* Compare with the previous value */ if (hardware->h_lport_previous && output_len == hardware->h_lport_previous_len && !memcmp(output, hardware->h_lport_previous, output_len)) { log_debug("localchassis", "no change detected for port %s", hardware->h_ifname); } else { log_debug("localchassis", "change detected for port %s, resetting its timer", hardware->h_ifname); levent_schedule_pdu(hardware); } /* Update the value */ free(hardware->h_lport_previous); hardware->h_lport_previous = output; hardware->h_lport_previous_len = output_len; } } void lldpd_cleanup(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; struct lldpd_chassis *chassis, *chassis_next; log_debug("localchassis", "cleanup all ports"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); if (!hardware->h_flags) { TRACE(LLDPD_INTERFACES_DELETE(hardware->h_ifname)); TAILQ_REMOVE(&cfg->g_hardware, hardware, h_entries); lldpd_remote_cleanup(hardware, notify_clients_deletion, 1); lldpd_hardware_cleanup(cfg, hardware); } else lldpd_remote_cleanup(hardware, notify_clients_deletion, 0); } log_debug("localchassis", "cleanup all chassis"); for (chassis = TAILQ_FIRST(&cfg->g_chassis); chassis; chassis = chassis_next) { chassis_next = TAILQ_NEXT(chassis, c_entries); if (chassis->c_refcount == 0) { TAILQ_REMOVE(&cfg->g_chassis, chassis, c_entries); lldpd_chassis_cleanup(chassis, 1); } } lldpd_count_neighbors(cfg); levent_schedule_cleanup(cfg); } /* Update chassis `ochassis' with values from `chassis'. The later one is not expected to be part of a list! It will also be wiped from memory. */ static void lldpd_move_chassis(struct lldpd_chassis *ochassis, struct lldpd_chassis *chassis) { struct lldpd_mgmt *mgmt, *mgmt_next; /* We want to keep refcount, index and list stuff from the current * chassis */ TAILQ_ENTRY(lldpd_chassis) entries; int refcount = ochassis->c_refcount; int index = ochassis->c_index; memcpy(&entries, &ochassis->c_entries, sizeof(entries)); lldpd_chassis_cleanup(ochassis, 0); /* Make the copy. */ /* WARNING: this is a kludgy hack, we need in-place copy and cannot use * marshaling. */ memcpy(ochassis, chassis, sizeof(struct lldpd_chassis)); TAILQ_INIT(&ochassis->c_mgmt); /* Copy of management addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); TAILQ_INSERT_TAIL(&ochassis->c_mgmt, mgmt, m_entries); } /* Restore saved values */ ochassis->c_refcount = refcount; ochassis->c_index = index; memcpy(&ochassis->c_entries, &entries, sizeof(entries)); /* Get rid of the new chassis */ free(chassis); } static int lldpd_guess_type(struct lldpd *cfg, char *frame, int s) { int i; if (s < ETHER_ADDR_LEN) return -1; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].guess == NULL) { if (memcmp(frame, cfg->g_protocols[i].mac, ETHER_ADDR_LEN) == 0) { log_debug("decode", "guessed protocol is %s (from MAC address)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } else { if (cfg->g_protocols[i].guess(frame, s)) { log_debug("decode", "guessed protocol is %s (from detector function)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } } return -1; } static void lldpd_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware) { int i; struct lldpd_chassis *chassis, *ochassis = NULL; struct lldpd_port *port, *oport = NULL, *aport; int guess = LLDPD_MODE_LLDP; log_debug("decode", "decode a received frame on %s", hardware->h_ifname); if (s < sizeof(struct ether_header) + 4) /* Too short, just discard it */ return; /* Decapsulate VLAN frames */ struct ether_header eheader; memcpy(&eheader, frame, sizeof(struct ether_header)); if (eheader.ether_type == htons(ETHERTYPE_VLAN)) { /* VLAN decapsulation means to shift 4 bytes left the frame from * offset 2*ETHER_ADDR_LEN */ memmove(frame + 2*ETHER_ADDR_LEN, frame + 2*ETHER_ADDR_LEN + 4, s - 2*ETHER_ADDR_LEN); s -= 4; } TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if ((oport->p_lastframe != NULL) && (oport->p_lastframe->size == s) && (memcmp(oport->p_lastframe->frame, frame, s) == 0)) { /* Already received the same frame */ log_debug("decode", "duplicate frame, no need to decode"); oport->p_lastupdate = time(NULL); return; } } guess = lldpd_guess_type(cfg, frame, s); for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].mode == guess) { log_debug("decode", "using decode function for %s protocol", cfg->g_protocols[i].name); if (cfg->g_protocols[i].decode(cfg, frame, s, hardware, &chassis, &port) == -1) { log_debug("decode", "function for %s protocol did not decode this frame", cfg->g_protocols[i].name); return; } chassis->c_protocol = port->p_protocol = cfg->g_protocols[i].mode; break; } } if (cfg->g_protocols[i].mode == 0) { log_debug("decode", "unable to guess frame type on %s", hardware->h_ifname); return; } TRACE(LLDPD_FRAME_DECODED( hardware->h_ifname, cfg->g_protocols[i].name, chassis->c_name, port->p_descr)); /* Do we already have the same MSAP somewhere? */ int count = 0; log_debug("decode", "search for the same MSAP"); TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (port->p_protocol == oport->p_protocol) { count++; if ((port->p_id_subtype == oport->p_id_subtype) && (port->p_id_len == oport->p_id_len) && (memcmp(port->p_id, oport->p_id, port->p_id_len) == 0) && (chassis->c_id_subtype == oport->p_chassis->c_id_subtype) && (chassis->c_id_len == oport->p_chassis->c_id_len) && (memcmp(chassis->c_id, oport->p_chassis->c_id, chassis->c_id_len) == 0)) { ochassis = oport->p_chassis; log_debug("decode", "MSAP is already known"); break; } } } /* Do we have room for a new MSAP? */ if (!oport && cfg->g_config.c_max_neighbors) { if (count == (cfg->g_config.c_max_neighbors - 1)) { log_debug("decode", "max neighbors %d reached for port %s, " "dropping any new ones silently", cfg->g_config.c_max_neighbors, hardware->h_ifname); } else if (count > cfg->g_config.c_max_neighbors - 1) { log_debug("decode", "too many neighbors for port %s, drop this new one", hardware->h_ifname); lldpd_port_cleanup(port, 1); lldpd_chassis_cleanup(chassis, 1); free(port); return; } } /* No, but do we already know the system? */ if (!oport) { log_debug("decode", "MSAP is unknown, search for the chassis"); TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) { if ((chassis->c_protocol == ochassis->c_protocol) && (chassis->c_id_subtype == ochassis->c_id_subtype) && (chassis->c_id_len == ochassis->c_id_len) && (memcmp(chassis->c_id, ochassis->c_id, chassis->c_id_len) == 0)) break; } } if (oport) { /* The port is known, remove it before adding it back */ TAILQ_REMOVE(&hardware->h_rports, oport, p_entries); lldpd_port_cleanup(oport, 1); free(oport); } if (ochassis) { lldpd_move_chassis(ochassis, chassis); chassis = ochassis; } else { /* Chassis not known, add it */ log_debug("decode", "unknown chassis, add it to the list"); chassis->c_index = ++cfg->g_lastrid; chassis->c_refcount = 0; TAILQ_INSERT_TAIL(&cfg->g_chassis, chassis, c_entries); i = 0; TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) i++; log_debug("decode", "%d different systems are known", i); } /* Add port */ port->p_lastchange = port->p_lastupdate = time(NULL); if ((port->p_lastframe = (struct lldpd_frame *)malloc(s + sizeof(struct lldpd_frame))) != NULL) { port->p_lastframe->size = s; memcpy(port->p_lastframe->frame, frame, s); } TAILQ_INSERT_TAIL(&hardware->h_rports, port, p_entries); port->p_chassis = chassis; port->p_chassis->c_refcount++; /* Several cases are possible : 1. chassis is new, its refcount was 0. It is now attached to this port, its refcount is 1. 2. chassis already exists and was attached to another port, we increase its refcount accordingly. 3. chassis already exists and was attached to the same port, its refcount was decreased with lldpd_port_cleanup() and is now increased again. In all cases, if the port already existed, it has been freed with lldpd_port_cleanup() and therefore, the refcount of the chassis that was attached to it is decreased. */ /* coverity[use_after_free] TAILQ_REMOVE does the right thing */ i = 0; TAILQ_FOREACH(aport, &hardware->h_rports, p_entries) i++; log_debug("decode", "%d neighbors for %s", i, hardware->h_ifname); if (!oport) hardware->h_insert_cnt++; /* Notify */ log_debug("decode", "send notifications for changes on %s", hardware->h_ifname); if (oport) { TRACE(LLDPD_NEIGHBOR_UPDATE(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_UPDATED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_UPDATED, port); #endif } else { TRACE(LLDPD_NEIGHBOR_NEW(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_ADDED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_ADDED, port); #endif } #ifdef ENABLE_LLDPMED if (!oport && port->p_chassis->c_med_type) { /* New neighbor, fast start */ if (hardware->h_cfg->g_config.c_enable_fast_start && !hardware->h_tx_fast) { log_debug("decode", "%s: entering fast start due to " "new neighbor", hardware->h_ifname); hardware->h_tx_fast = hardware->h_cfg->g_config.c_tx_fast_init; } levent_schedule_pdu(hardware); } #endif return; } /* Get the output of lsb_release -s -d. This is a slow function. It should be called once. It return NULL if any problem happens. Otherwise, this is a statically allocated buffer. The result includes the trailing \n */ static char * lldpd_get_lsb_release() { static char release[1024]; char *const command[] = { "lsb_release", "-s", "-d", NULL }; int pid, status, devnull, count; int pipefd[2]; log_debug("localchassis", "grab LSB release"); if (pipe(pipefd)) { log_warn("localchassis", "unable to get a pair of pipes"); return NULL; } pid = vfork(); switch (pid) { case -1: log_warn("localchassis", "unable to fork"); return NULL; case 0: /* Child, exec lsb_release */ close(pipefd[0]); if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDERR_FILENO); dup2(pipefd[1], STDOUT_FILENO); if (devnull > 2) close(devnull); if (pipefd[1] > 2) close(pipefd[1]); execvp("lsb_release", command); } _exit(127); break; default: /* Father, read the output from the children */ close(pipefd[1]); count = 0; do { status = read(pipefd[0], release+count, sizeof(release)-count); if ((status == -1) && (errno == EINTR)) continue; if (status > 0) count += status; } while (count < sizeof(release) && (status > 0)); if (status < 0) { log_info("localchassis", "unable to read from lsb_release"); close(pipefd[0]); waitpid(pid, &status, 0); return NULL; } close(pipefd[0]); if (count >= sizeof(release)) { log_info("localchassis", "output of lsb_release is too large"); waitpid(pid, &status, 0); return NULL; } status = -1; if (waitpid(pid, &status, 0) != pid) return NULL; if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) { log_info("localchassis", "lsb_release information not available"); return NULL; } if (!count) { log_info("localchassis", "lsb_release returned an empty string"); return NULL; } release[count] = '\0'; return release; } /* Should not be here */ return NULL; } /* Same like lldpd_get_lsb_release but reads /etc/os-release for PRETTY_NAME=. */ static char * lldpd_get_os_release() { static char release[1024]; char line[1024]; char *key, *val; char *ptr1 = release; log_debug("localchassis", "grab OS release"); FILE *fp = fopen("/etc/os-release", "r"); if (!fp) { log_debug("localchassis", "could not open /etc/os-release"); fp = fopen("/usr/lib/os-release", "r"); } if (!fp) { log_info("localchassis", "could not open either /etc/os-release or /usr/lib/os-release"); return NULL; } while ((fgets(line, sizeof(line), fp) != NULL)) { key = strtok(line, "="); val = strtok(NULL, "="); if (strncmp(key, "PRETTY_NAME", sizeof(line)) == 0) { strlcpy(release, val, sizeof(line)); break; } } fclose(fp); /* Remove trailing newline and all " in the string. */ ptr1 = release + strlen(release) - 1; while (ptr1 != release && ((*ptr1 == '"') || (*ptr1 == '\n'))) { *ptr1 = '\0'; ptr1--; } if (release[0] == '"') return release+1; return release; } static void lldpd_hide_ports(struct lldpd *cfg, struct lldpd_hardware *hardware, int mask) { struct lldpd_port *port; int protocols[LLDPD_MODE_MAX+1]; char buffer[256]; int i, j, k, found; unsigned int min; log_debug("smartfilter", "apply smart filter for port %s", hardware->h_ifname); /* Compute the number of occurrences of each protocol */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) protocols[port->p_protocol]++; /* Turn the protocols[] array into an array of enabled/disabled protocols. 1 means enabled, 0 means disabled. */ min = (unsigned int)-1; for (i = 0; i <= LLDPD_MODE_MAX; i++) if (protocols[i] && (protocols[i] < min)) min = protocols[i]; found = 0; for (i = 0; i <= LLDPD_MODE_MAX; i++) if ((protocols[i] == min) && !found) { /* If we need a tie breaker, we take the first protocol only */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_PROTO | SMART_INCOMING_ONE_PROTO)) found = 1; protocols[i] = 1; } else protocols[i] = 0; /* We set the p_hidden flag to 1 if the protocol is disabled */ TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) port->p_hidden_out = protocols[port->p_protocol]?0:1; else port->p_hidden_in = protocols[port->p_protocol]?0:1; } /* If we want only one neighbor, we take the first one */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_NEIGH | SMART_INCOMING_ONE_NEIGH)) { found = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) { if (found) port->p_hidden_out = 1; if (!port->p_hidden_out) found = 1; } if (mask == SMART_INCOMING) { if (found) port->p_hidden_in = 1; if (!port->p_hidden_in) found = 1; } } } /* Print a debug message summarizing the operation */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; k = j = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (!(((mask == SMART_OUTGOING) && port->p_hidden_out) || ((mask == SMART_INCOMING) && port->p_hidden_in))) { k++; protocols[port->p_protocol] = 1; } j++; } buffer[0] = '\0'; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (cfg->g_protocols[i].enabled && protocols[cfg->g_protocols[i].mode]) { if (strlen(buffer) + strlen(cfg->g_protocols[i].name) + 3 > sizeof(buffer)) { /* Unlikely, our buffer is too small */ memcpy(buffer + sizeof(buffer) - 4, "...", 4); break; } if (buffer[0]) strncat(buffer, ", ", 2); strncat(buffer, cfg->g_protocols[i].name, strlen(cfg->g_protocols[i].name)); } } log_debug("smartfilter", "%s: %s: %d visible neighbors (out of %d)", hardware->h_ifname, (mask == SMART_OUTGOING)?"out filter":"in filter", k, j); log_debug("smartfilter", "%s: protocols: %s", hardware->h_ifname, buffer[0]?buffer:"(none)"); } /* Hide unwanted ports depending on smart mode set by the user */ static void lldpd_hide_all(struct lldpd *cfg) { struct lldpd_hardware *hardware; if (!cfg->g_config.c_smart) return; log_debug("smartfilter", "apply smart filter results on all ports"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if (cfg->g_config.c_smart & SMART_INCOMING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_INCOMING); if (cfg->g_config.c_smart & SMART_OUTGOING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_OUTGOING); } } void lldpd_recv(struct lldpd *cfg, struct lldpd_hardware *hardware, int fd) { char *buffer = NULL; int n; log_debug("receive", "receive a frame on %s", hardware->h_ifname); if ((buffer = (char *)malloc(hardware->h_mtu)) == NULL) { log_warn("receive", "failed to alloc reception buffer"); return; } if ((n = hardware->h_ops->recv(cfg, hardware, fd, buffer, hardware->h_mtu)) == -1) { log_debug("receive", "discard frame received on %s", hardware->h_ifname); free(buffer); return; } if (cfg->g_config.c_paused) { log_debug("receive", "paused, ignore the frame on %s", hardware->h_ifname); free(buffer); return; } hardware->h_rx_cnt++; log_debug("receive", "decode received frame on %s", hardware->h_ifname); TRACE(LLDPD_FRAME_RECEIVED(hardware->h_ifname, buffer, (size_t)n)); lldpd_decode(cfg, buffer, n, hardware); lldpd_hide_all(cfg); /* Immediatly hide */ lldpd_count_neighbors(cfg); free(buffer); } static void lldpd_send_shutdown(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; /* It's safe to call `lldp_send_shutdown()` because shutdown LLDPU will * only be emitted if LLDP was sent on that port. */ if (lldp_send_shutdown(hardware->h_cfg, hardware) != 0) log_warnx("send", "unable to send shutdown LLDPDU on %s", hardware->h_ifname); } void lldpd_send(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; struct lldpd_port *port; int i, sent; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; log_debug("send", "send PDU on %s", hardware->h_ifname); sent = 0; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; /* We send only if we have at least one remote system * speaking this protocol or if the protocol is forced */ if (cfg->g_protocols[i].enabled > 1) { cfg->g_protocols[i].send(cfg, hardware); sent++; continue; } TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { /* If this remote port is disabled, we don't * consider it */ if (port->p_hidden_out) continue; if (port->p_protocol == cfg->g_protocols[i].mode) { TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "send PDU on %s with protocol %s", hardware->h_ifname, cfg->g_protocols[i].name); cfg->g_protocols[i].send(cfg, hardware); sent++; break; } } } if (!sent) { /* Nothing was sent for this port, let's speak the first * available protocol. */ for (i = 0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "fallback to protocol %s for %s", cfg->g_protocols[i].name, hardware->h_ifname); cfg->g_protocols[i].send(cfg, hardware); break; } if (cfg->g_protocols[i].mode == 0) log_warnx("send", "no protocol enabled, dunno what to send"); } } #ifdef ENABLE_LLDPMED static void lldpd_med(struct lldpd_chassis *chassis) { static short int once = 0; if (!once) { chassis->c_med_hw = dmi_hw(); chassis->c_med_fw = dmi_fw(); chassis->c_med_sn = dmi_sn(); chassis->c_med_manuf = dmi_manuf(); chassis->c_med_model = dmi_model(); chassis->c_med_asset = dmi_asset(); once = 1; } } #endif static int lldpd_routing_enabled(struct lldpd *cfg) { int routing; if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_ROUTER) == 0) return 0; if ((routing = interfaces_routing_enabled(cfg)) == -1) { log_debug("localchassis", "unable to check if routing is enabled"); return 0; } return routing; } static void lldpd_update_localchassis(struct lldpd *cfg) { struct utsname un; char *hp; log_debug("localchassis", "update information for local chassis"); assert(LOCAL_CHASSIS(cfg) != NULL); /* Set system name and description */ if (uname(&un) < 0) fatal("localchassis", "failed to get system information"); if (cfg->g_config.c_hostname) { log_debug("localchassis", "use overridden system name `%s`", cfg->g_config.c_hostname); hp = cfg->g_config.c_hostname; } else { if ((hp = priv_gethostname()) == NULL) fatal("localchassis", "failed to get system name"); } free(LOCAL_CHASSIS(cfg)->c_name); free(LOCAL_CHASSIS(cfg)->c_descr); if ((LOCAL_CHASSIS(cfg)->c_name = strdup(hp)) == NULL) fatal("localchassis", NULL); if (cfg->g_config.c_description) { log_debug("localchassis", "use overridden description `%s`", cfg->g_config.c_description); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_config.c_description) == -1) fatal("localchassis", "failed to set full system description"); } else { if (cfg->g_config.c_advertise_version) { log_debug("localchassis", "advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s %s %s %s %s", cfg->g_lsb_release?cfg->g_lsb_release:"", un.sysname, un.release, un.version, un.machine) == -1) fatal("localchassis", "failed to set full system description"); } else { log_debug("localchassis", "do not advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_lsb_release?cfg->g_lsb_release:un.sysname) == -1) fatal("localchassis", "failed to set minimal system description"); } } if (cfg->g_config.c_platform == NULL) cfg->g_config.c_platform = strdup(un.sysname); /* Check routing */ if (lldpd_routing_enabled(cfg)) { log_debug("localchassis", "routing is enabled, enable router capability"); LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_ROUTER; } else LOCAL_CHASSIS(cfg)->c_cap_enabled &= ~LLDP_CAP_ROUTER; #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_TELEPHONE) LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_TELEPHONE; lldpd_med(LOCAL_CHASSIS(cfg)); free(LOCAL_CHASSIS(cfg)->c_med_sw); if (cfg->g_config.c_advertise_version) LOCAL_CHASSIS(cfg)->c_med_sw = strdup(un.release); else LOCAL_CHASSIS(cfg)->c_med_sw = strdup("Unknown"); #endif if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_STATION) && (LOCAL_CHASSIS(cfg)->c_cap_enabled == 0)) LOCAL_CHASSIS(cfg)->c_cap_enabled = LLDP_CAP_STATION; /* Set chassis ID if needed. This is only done if chassis ID has not been set previously (with the MAC address of an interface for example) */ if (LOCAL_CHASSIS(cfg)->c_id == NULL) { log_debug("localchassis", "no chassis ID is currently set, use chassis name"); if (!(LOCAL_CHASSIS(cfg)->c_id = strdup(LOCAL_CHASSIS(cfg)->c_name))) fatal("localchassis", NULL); LOCAL_CHASSIS(cfg)->c_id_len = strlen(LOCAL_CHASSIS(cfg)->c_name); LOCAL_CHASSIS(cfg)->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; } } void lldpd_update_localports(struct lldpd *cfg) { struct lldpd_hardware *hardware; log_debug("localchassis", "update information for local ports"); /* h_flags is set to 0 for each port. If the port is updated, h_flags * will be set to a non-zero value. This will allow us to clean up any * non up-to-date port */ TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) hardware->h_flags = 0; TRACE(LLDPD_INTERFACES_UPDATE()); interfaces_update(cfg); lldpd_cleanup(cfg); lldpd_reset_timer(cfg); } void lldpd_loop(struct lldpd *cfg) { /* Main loop. 1. Update local ports information 2. Update local chassis information */ log_debug("loop", "start new loop"); LOCAL_CHASSIS(cfg)->c_cap_enabled = 0; /* Information for local ports is triggered even when it is possible to * update them on some other event because we want to refresh them if we * missed something. */ log_debug("loop", "update information for local ports"); lldpd_update_localports(cfg); log_debug("loop", "update information for local chassis"); lldpd_update_localchassis(cfg); lldpd_count_neighbors(cfg); } static void lldpd_exit(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; log_debug("main", "exit lldpd"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) lldpd_send_shutdown(hardware); close(cfg->g_ctl); priv_ctl_cleanup(cfg->g_ctlname); log_debug("main", "cleanup hardware information"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); log_debug("main", "cleanup interface %s", hardware->h_ifname); lldpd_remote_cleanup(hardware, NULL, 1); lldpd_hardware_cleanup(cfg, hardware); } } /** * Run lldpcli to configure lldpd. * * @return PID of running lldpcli or -1 if error. */ static pid_t lldpd_configure(int debug, const char *path, const char *ctlname) { pid_t lldpcli = vfork(); int devnull; char sdebug[debug + 3]; memset(sdebug, 'd', debug + 3); sdebug[debug + 2] = '\0'; sdebug[0] = '-'; sdebug[1] = 's'; log_debug("main", "invoke %s %s", path, sdebug); switch (lldpcli) { case -1: log_warn("main", "unable to fork"); return -1; case 0: /* Child, exec lldpcli */ if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDOUT_FILENO); if (devnull > 2) close(devnull); execl(path, "lldpcli", sdebug, "-u", ctlname, "-c", SYSCONFDIR "/lldpd.conf", "-c", SYSCONFDIR "/lldpd.d", "resume", (char *)NULL); log_warn("main", "unable to execute %s", path); log_warnx("main", "configuration is incomplete, lldpd needs to be unpaused"); } _exit(127); break; default: /* Father, don't do anything stupid */ return lldpcli; } /* Should not be here */ return -1; } struct intint { int a; int b; }; static const struct intint filters[] = { { 0, 0 }, { 1, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 2, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO }, { 3, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 4, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER }, { 5, SMART_INCOMING_FILTER }, { 6, SMART_OUTGOING_FILTER }, { 7, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 8, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH }, { 9, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 10, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 11, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH }, { 12, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 13, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 14, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 15, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER }, { 16, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 17, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 18, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 19, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { -1, 0 } }; #ifndef HOST_OS_OSX /** * Tell if we have been started by upstart. */ static int lldpd_started_by_upstart() { #ifdef HOST_OS_LINUX const char *upstartjob = getenv("UPSTART_JOB"); if (!(upstartjob && !strcmp(upstartjob, "lldpd"))) return 0; log_debug("main", "running with upstart, don't fork but stop"); raise(SIGSTOP); unsetenv("UPSTART_JOB"); return 1; #else return 0; #endif } /** * Tell if we have been started by systemd. */ static int lldpd_started_by_systemd() { #ifdef HOST_OS_LINUX int fd = -1; const char *notifysocket = getenv("NOTIFY_SOCKET"); if (!notifysocket || !strchr("@/", notifysocket[0]) || strlen(notifysocket) < 2) return 0; log_debug("main", "running with systemd, don't fork but signal ready"); if ((fd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) { log_warn("main", "unable to open systemd notification socket %s", notifysocket); return 0; } struct sockaddr_un su = { .sun_family = AF_UNIX }; strlcpy(su.sun_path, notifysocket, sizeof(su.sun_path)); if (notifysocket[0] == '@') su.sun_path[0] = 0; struct iovec iov = { .iov_base = "READY=1", .iov_len = strlen("READY=1") }; struct msghdr hdr = { .msg_name = &su, .msg_namelen = offsetof(struct sockaddr_un, sun_path) + strlen(notifysocket), .msg_iov = &iov, .msg_iovlen = 1 }; unsetenv("NOTIFY_SOCKET"); if (sendmsg(fd, &hdr, MSG_NOSIGNAL) < 0) { log_warn("main", "unable to send notification to systemd"); close(fd); return 0; } close(fd); return 1; #else return 0; #endif } #endif int lldpd_main(int argc, char *argv[], char *envp[]) { struct lldpd *cfg; struct lldpd_chassis *lchassis; int ch, debug = 0; #ifdef USE_SNMP int snmp = 0; const char *agentx = NULL; /* AgentX socket */ #endif const char *ctlname = NULL; char *mgmtp = NULL; char *cidp = NULL; char *interfaces = NULL; /* We do not want more options here. Please add them in lldpcli instead * unless there is a very good reason. Most command-line options will * get deprecated at some point. */ char *popt, opts[] = "H:vhkrdD:xX:m:u:4:6:I:C:p:M:P:S:iL:@ "; int i, found, advertise_version = 1; #ifdef ENABLE_LLDPMED int lldpmed = 0, noinventory = 0; int enable_fast_start = 1; #endif char *descr_override = NULL; char *platform_override = NULL; char *lsb_release = NULL; const char *lldpcli = LLDPCLI_PATH; int smart = 15; int receiveonly = 0; int ctl; #ifdef ENABLE_PRIVSEP /* Non privileged user */ struct passwd *user; struct group *group; uid_t uid; gid_t gid; #endif saved_argv = argv; #if HAVE_SETPROCTITLE_INIT setproctitle_init(argc, argv, envp); #endif /* * Get and parse command line options */ if ((popt = strchr(opts, '@')) != NULL) { for (i=0; protos[i].mode != 0 && *popt != '\0'; i++) *(popt++) = protos[i].arg; *popt = '\0'; } while ((ch = getopt(argc, argv, opts)) != -1) { switch (ch) { case 'h': usage(); break; case 'v': fprintf(stdout, "%s\n", PACKAGE_VERSION); exit(0); break; case 'd': debug++; break; case 'D': log_accept(optarg); break; case 'r': receiveonly = 1; break; case 'm': if (mgmtp) { fprintf(stderr, "-m can only be used once\n"); usage(); } mgmtp = strdup(optarg); break; case 'u': if (ctlname) { fprintf(stderr, "-u can only be used once\n"); usage(); } ctlname = optarg; break; case 'I': if (interfaces) { fprintf(stderr, "-I can only be used once\n"); usage(); } interfaces = strdup(optarg); break; case 'C': if (cidp) { fprintf(stderr, "-C can only be used once\n"); usage(); } cidp = strdup(optarg); break; case 'L': if (strlen(optarg)) lldpcli = optarg; else lldpcli = NULL; break; case 'k': advertise_version = 0; break; #ifdef ENABLE_LLDPMED case 'M': lldpmed = atoi(optarg); if ((lldpmed < 1) || (lldpmed > 4)) { fprintf(stderr, "-M requires an argument between 1 and 4\n"); usage(); } break; case 'i': noinventory = 1; break; #else case 'M': case 'i': fprintf(stderr, "LLDP-MED support is not built-in\n"); usage(); break; #endif #ifdef USE_SNMP case 'x': snmp = 1; break; case 'X': if (agentx) { fprintf(stderr, "-X can only be used once\n"); usage(); } snmp = 1; agentx = optarg; break; #else case 'x': case 'X': fprintf(stderr, "SNMP support is not built-in\n"); usage(); #endif break; case 'S': if (descr_override) { fprintf(stderr, "-S can only be used once\n"); usage(); } descr_override = strdup(optarg); break; case 'P': if (platform_override) { fprintf(stderr, "-P can only be used once\n"); usage(); } platform_override = strdup(optarg); break; case 'H': smart = atoi(optarg); break; default: found = 0; for (i=0; protos[i].mode != 0; i++) { if (ch == protos[i].arg) { found = 1; protos[i].enabled++; } } if (!found) usage(); } } if (ctlname == NULL) ctlname = LLDPD_CTL_SOCKET; /* Set correct smart mode */ for (i=0; (filters[i].a != -1) && (filters[i].a != smart); i++); if (filters[i].a == -1) { fprintf(stderr, "Incorrect mode for -H\n"); usage(); } smart = filters[i].b; log_init(debug, __progname); tzset(); /* Get timezone info before chroot */ log_debug("main", "lldpd " PACKAGE_VERSION " starting..."); /* Grab uid and gid to use for priv sep */ #ifdef ENABLE_PRIVSEP if ((user = getpwnam(PRIVSEP_USER)) == NULL) fatal("main", "no " PRIVSEP_USER " user for privilege separation"); uid = user->pw_uid; if ((group = getgrnam(PRIVSEP_GROUP)) == NULL) fatal("main", "no " PRIVSEP_GROUP " group for privilege separation"); gid = group->gr_gid; #endif /* Create and setup socket */ int retry = 1; log_debug("main", "creating control socket"); while ((ctl = ctl_create(ctlname)) == -1) { if (retry-- && errno == EADDRINUSE) { /* Check if a daemon is really listening */ int tfd; log_info("main", "unable to create control socket because it already exists"); log_info("main", "check if another instance is running"); if ((tfd = ctl_connect(ctlname)) != -1) { /* Another instance is running */ close(tfd); log_warnx("main", "another instance is running, please stop it"); fatalx("main", "giving up"); } else if (errno == ECONNREFUSED) { /* Nobody is listening */ log_info("main", "old control socket is present, clean it"); ctl_cleanup(ctlname); continue; } log_warn("main", "cannot determine if another daemon is already running"); fatalx("main", "giving up"); } log_warn("main", "unable to create control socket"); fatalx("main", "giving up"); } #ifdef ENABLE_PRIVSEP if (chown(ctlname, uid, gid) == -1) log_warn("main", "unable to chown control socket"); if (chmod(ctlname, S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IWGRP | S_IXGRP) == -1) log_warn("main", "unable to chmod control socket"); #endif /* Disable SIGPIPE */ signal(SIGPIPE, SIG_IGN); /* Disable SIGHUP, until handlers are installed */ signal(SIGHUP, SIG_IGN); /* Configuration with lldpcli */ if (lldpcli) { log_debug("main", "invoking lldpcli for configuration"); if (lldpd_configure(debug, lldpcli, ctlname) == -1) fatal("main", "unable to spawn lldpcli"); } /* Daemonization, unless started by upstart, systemd or launchd or debug */ #ifndef HOST_OS_OSX if (!lldpd_started_by_upstart() && !lldpd_started_by_systemd() && !debug) { int pid; char *spid; log_debug("main", "daemonize"); if (daemon(0, 0) != 0) fatal("main", "failed to detach daemon"); if ((pid = open(LLDPD_PID_FILE, O_TRUNC | O_CREAT | O_WRONLY, 0666)) == -1) fatal("main", "unable to open pid file " LLDPD_PID_FILE); if (asprintf(&spid, "%d\n", getpid()) == -1) fatal("main", "unable to create pid file " LLDPD_PID_FILE); if (write(pid, spid, strlen(spid)) == -1) fatal("main", "unable to write pid file " LLDPD_PID_FILE); free(spid); close(pid); } #endif /* Try to read system information from /etc/os-release if possible. Fall back to lsb_release for compatibility. */ log_debug("main", "get OS/LSB release information"); lsb_release = lldpd_get_os_release(); if (!lsb_release) { lsb_release = lldpd_get_lsb_release(); } log_debug("main", "initialize privilege separation"); #ifdef ENABLE_PRIVSEP priv_init(PRIVSEP_CHROOT, ctl, uid, gid); #else priv_init(PRIVSEP_CHROOT, ctl, 0, 0); #endif /* Initialization of global configuration */ if ((cfg = (struct lldpd *) calloc(1, sizeof(struct lldpd))) == NULL) fatal("main", NULL); cfg->g_ctlname = ctlname; cfg->g_ctl = ctl; cfg->g_config.c_mgmt_pattern = mgmtp; cfg->g_config.c_cid_pattern = cidp; cfg->g_config.c_iface_pattern = interfaces; cfg->g_config.c_smart = smart; if (lldpcli) cfg->g_config.c_paused = 1; cfg->g_config.c_receiveonly = receiveonly; cfg->g_config.c_tx_interval = LLDPD_TX_INTERVAL; cfg->g_config.c_tx_hold = LLDPD_TX_HOLD; cfg->g_config.c_max_neighbors = LLDPD_MAX_NEIGHBORS; #ifdef ENABLE_LLDPMED cfg->g_config.c_enable_fast_start = enable_fast_start; cfg->g_config.c_tx_fast_init = LLDPD_FAST_INIT; cfg->g_config.c_tx_fast_interval = LLDPD_FAST_TX_INTERVAL; #endif #ifdef USE_SNMP cfg->g_snmp = snmp; cfg->g_snmp_agentx = agentx; #endif /* USE_SNMP */ cfg->g_config.c_bond_slave_src_mac_type = \ LLDP_BOND_SLAVE_SRC_MAC_TYPE_LOCALLY_ADMINISTERED; /* Get ioctl socket */ log_debug("main", "get an ioctl socket"); if ((cfg->g_sock = socket(AF_INET, SOCK_DGRAM, 0)) == -1) fatal("main", "failed to get ioctl socket"); /* Description */ if (!(cfg->g_config.c_advertise_version = advertise_version) && lsb_release && lsb_release[strlen(lsb_release) - 1] == '\n') lsb_release[strlen(lsb_release) - 1] = '\0'; cfg->g_lsb_release = lsb_release; if (descr_override) cfg->g_config.c_description = descr_override; if (platform_override) cfg->g_config.c_platform = platform_override; /* Set system capabilities */ log_debug("main", "set system capabilities"); if ((lchassis = (struct lldpd_chassis*) calloc(1, sizeof(struct lldpd_chassis))) == NULL) fatal("localchassis", NULL); cfg->g_config.c_cap_advertise = 1; lchassis->c_cap_available = LLDP_CAP_BRIDGE | LLDP_CAP_WLAN | LLDP_CAP_ROUTER | LLDP_CAP_STATION; cfg->g_config.c_mgmt_advertise = 1; TAILQ_INIT(&lchassis->c_mgmt); #ifdef ENABLE_LLDPMED if (lldpmed > 0) { if (lldpmed == LLDP_MED_CLASS_III) lchassis->c_cap_available |= LLDP_CAP_TELEPHONE; lchassis->c_med_type = lldpmed; lchassis->c_med_cap_available = LLDP_MED_CAP_CAP | LLDP_MED_CAP_IV | LLDP_MED_CAP_LOCATION | LLDP_MED_CAP_POLICY | LLDP_MED_CAP_MDI_PSE | LLDP_MED_CAP_MDI_PD; cfg->g_config.c_noinventory = noinventory; } else cfg->g_config.c_noinventory = 1; #endif /* Set TTL */ lchassis->c_ttl = cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold; log_debug("main", "initialize protocols"); cfg->g_protocols = protos; for (i=0; protos[i].mode != 0; i++) { /* With -ll, disable LLDP */ if (protos[i].mode == LLDPD_MODE_LLDP) protos[i].enabled %= 3; /* With -ccc force CDPV2, enable CDPV1 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled == 3) { protos[i].enabled = 1; } /* With -cc force CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 2) { protos[i].enabled = 1; } /* With -cccc disable CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled >= 4) { protos[i].enabled = 0; } /* With -cccc disable CDPV1, enable CDPV2; -ccccc will force CDPv2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 4) { protos[i].enabled = 1; } if (protos[i].enabled > 1) log_info("main", "protocol %s enabled and forced", protos[i].name); else if (protos[i].enabled) log_info("main", "protocol %s enabled", protos[i].name); else log_info("main", "protocol %s disabled", protos[i].name); } TAILQ_INIT(&cfg->g_hardware); TAILQ_INIT(&cfg->g_chassis); TAILQ_INSERT_TAIL(&cfg->g_chassis, lchassis, c_entries); lchassis->c_refcount++; /* We should always keep a reference to local chassis */ /* Main loop */ log_debug("main", "start main loop"); levent_loop(cfg); lldpd_exit(cfg); free(cfg); return (0); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_1770_0
crossvul-cpp_data_good_2523_0
/* * kvm eventfd support - use eventfd objects to signal various KVM events * * Copyright 2009 Novell. All Rights Reserved. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: * Gregory Haskins <ghaskins@novell.com> * * This file is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/kvm_irqfd.h> #include <linux/workqueue.h> #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/list.h> #include <linux/eventfd.h> #include <linux/kernel.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/seqlock.h> #include <linux/irqbypass.h> #include <trace/events/kvm.h> #include <kvm/iodev.h> #ifdef CONFIG_HAVE_KVM_IRQFD static struct workqueue_struct *irqfd_cleanup_wq; static void irqfd_inject(struct work_struct *work) { struct kvm_kernel_irqfd *irqfd = container_of(work, struct kvm_kernel_irqfd, inject); struct kvm *kvm = irqfd->kvm; if (!irqfd->resampler) { kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, false); kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, false); } else kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, irqfd->gsi, 1, false); } /* * Since resampler irqfds share an IRQ source ID, we de-assert once * then notify all of the resampler irqfds using this GSI. We can't * do multiple de-asserts or we risk racing with incoming re-asserts. */ static void irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) { struct kvm_kernel_irqfd_resampler *resampler; struct kvm *kvm; struct kvm_kernel_irqfd *irqfd; int idx; resampler = container_of(kian, struct kvm_kernel_irqfd_resampler, notifier); kvm = resampler->kvm; kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, resampler->notifier.gsi, 0, false); idx = srcu_read_lock(&kvm->irq_srcu); list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) eventfd_signal(irqfd->resamplefd, 1); srcu_read_unlock(&kvm->irq_srcu, idx); } static void irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd) { struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler; struct kvm *kvm = resampler->kvm; mutex_lock(&kvm->irqfds.resampler_lock); list_del_rcu(&irqfd->resampler_link); synchronize_srcu(&kvm->irq_srcu); if (list_empty(&resampler->list)) { list_del(&resampler->link); kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, resampler->notifier.gsi, 0, false); kfree(resampler); } mutex_unlock(&kvm->irqfds.resampler_lock); } /* * Race-free decouple logic (ordering is critical) */ static void irqfd_shutdown(struct work_struct *work) { struct kvm_kernel_irqfd *irqfd = container_of(work, struct kvm_kernel_irqfd, shutdown); u64 cnt; /* * Synchronize with the wait-queue and unhook ourselves to prevent * further events. */ eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); /* * We know no new events will be scheduled at this point, so block * until all previously outstanding events have completed */ flush_work(&irqfd->inject); if (irqfd->resampler) { irqfd_resampler_shutdown(irqfd); eventfd_ctx_put(irqfd->resamplefd); } /* * It is now safe to release the object's resources */ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS irq_bypass_unregister_consumer(&irqfd->consumer); #endif eventfd_ctx_put(irqfd->eventfd); kfree(irqfd); } /* assumes kvm->irqfds.lock is held */ static bool irqfd_is_active(struct kvm_kernel_irqfd *irqfd) { return list_empty(&irqfd->list) ? false : true; } /* * Mark the irqfd as inactive and schedule it for removal * * assumes kvm->irqfds.lock is held */ static void irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) { BUG_ON(!irqfd_is_active(irqfd)); list_del_init(&irqfd->list); queue_work(irqfd_cleanup_wq, &irqfd->shutdown); } int __attribute__((weak)) kvm_arch_set_irq_inatomic( struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm, int irq_source_id, int level, bool line_status) { return -EWOULDBLOCK; } /* * Called with wqh->lock held and interrupts disabled */ static int irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct kvm_kernel_irqfd *irqfd = container_of(wait, struct kvm_kernel_irqfd, wait); unsigned long flags = (unsigned long)key; struct kvm_kernel_irq_routing_entry irq; struct kvm *kvm = irqfd->kvm; unsigned seq; int idx; if (flags & POLLIN) { idx = srcu_read_lock(&kvm->irq_srcu); do { seq = read_seqcount_begin(&irqfd->irq_entry_sc); irq = irqfd->irq_entry; } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); /* An event has been signaled, inject an interrupt */ if (kvm_arch_set_irq_inatomic(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false) == -EWOULDBLOCK) schedule_work(&irqfd->inject); srcu_read_unlock(&kvm->irq_srcu, idx); } if (flags & POLLHUP) { /* The eventfd is closing, detach from KVM */ unsigned long flags; spin_lock_irqsave(&kvm->irqfds.lock, flags); /* * We must check if someone deactivated the irqfd before * we could acquire the irqfds.lock since the item is * deactivated from the KVM side before it is unhooked from * the wait-queue. If it is already deactivated, we can * simply return knowing the other side will cleanup for us. * We cannot race against the irqfd going away since the * other side is required to acquire wqh->lock, which we hold */ if (irqfd_is_active(irqfd)) irqfd_deactivate(irqfd); spin_unlock_irqrestore(&kvm->irqfds.lock, flags); } return 0; } static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { struct kvm_kernel_irqfd *irqfd = container_of(pt, struct kvm_kernel_irqfd, pt); add_wait_queue(wqh, &irqfd->wait); } /* Must be called under irqfds.lock */ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) { struct kvm_kernel_irq_routing_entry *e; struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; int n_entries; n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); write_seqcount_begin(&irqfd->irq_entry_sc); e = entries; if (n_entries == 1) irqfd->irq_entry = *e; else irqfd->irq_entry.type = 0; write_seqcount_end(&irqfd->irq_entry_sc); } #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS void __attribute__((weak)) kvm_arch_irq_bypass_stop( struct irq_bypass_consumer *cons) { } void __attribute__((weak)) kvm_arch_irq_bypass_start( struct irq_bypass_consumer *cons) { } int __attribute__((weak)) kvm_arch_update_irqfd_routing( struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { return 0; } #endif static int kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) { struct kvm_kernel_irqfd *irqfd, *tmp; struct fd f; struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; int ret; unsigned int events; int idx; if (!kvm_arch_intc_initialized(kvm)) return -EAGAIN; irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); if (!irqfd) return -ENOMEM; irqfd->kvm = kvm; irqfd->gsi = args->gsi; INIT_LIST_HEAD(&irqfd->list); INIT_WORK(&irqfd->inject, irqfd_inject); INIT_WORK(&irqfd->shutdown, irqfd_shutdown); seqcount_init(&irqfd->irq_entry_sc); f = fdget(args->fd); if (!f.file) { ret = -EBADF; goto out; } eventfd = eventfd_ctx_fileget(f.file); if (IS_ERR(eventfd)) { ret = PTR_ERR(eventfd); goto fail; } irqfd->eventfd = eventfd; if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { struct kvm_kernel_irqfd_resampler *resampler; resamplefd = eventfd_ctx_fdget(args->resamplefd); if (IS_ERR(resamplefd)) { ret = PTR_ERR(resamplefd); goto fail; } irqfd->resamplefd = resamplefd; INIT_LIST_HEAD(&irqfd->resampler_link); mutex_lock(&kvm->irqfds.resampler_lock); list_for_each_entry(resampler, &kvm->irqfds.resampler_list, link) { if (resampler->notifier.gsi == irqfd->gsi) { irqfd->resampler = resampler; break; } } if (!irqfd->resampler) { resampler = kzalloc(sizeof(*resampler), GFP_KERNEL); if (!resampler) { ret = -ENOMEM; mutex_unlock(&kvm->irqfds.resampler_lock); goto fail; } resampler->kvm = kvm; INIT_LIST_HEAD(&resampler->list); resampler->notifier.gsi = irqfd->gsi; resampler->notifier.irq_acked = irqfd_resampler_ack; INIT_LIST_HEAD(&resampler->link); list_add(&resampler->link, &kvm->irqfds.resampler_list); kvm_register_irq_ack_notifier(kvm, &resampler->notifier); irqfd->resampler = resampler; } list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); synchronize_srcu(&kvm->irq_srcu); mutex_unlock(&kvm->irqfds.resampler_lock); } /* * Install our own custom wake-up handling so we are notified via * a callback whenever someone signals the underlying eventfd */ init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); spin_lock_irq(&kvm->irqfds.lock); ret = 0; list_for_each_entry(tmp, &kvm->irqfds.items, list) { if (irqfd->eventfd != tmp->eventfd) continue; /* This fd is used for another irq already. */ ret = -EBUSY; spin_unlock_irq(&kvm->irqfds.lock); goto fail; } idx = srcu_read_lock(&kvm->irq_srcu); irqfd_update(kvm, irqfd); srcu_read_unlock(&kvm->irq_srcu, idx); list_add_tail(&irqfd->list, &kvm->irqfds.items); spin_unlock_irq(&kvm->irqfds.lock); /* * Check if there was an event already pending on the eventfd * before we registered, and trigger it as if we didn't miss it. */ events = f.file->f_op->poll(f.file, &irqfd->pt); if (events & POLLIN) schedule_work(&irqfd->inject); /* * do not drop the file until the irqfd is fully initialized, otherwise * we might race against the POLLHUP */ fdput(f); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS if (kvm_arch_has_irq_bypass()) { irqfd->consumer.token = (void *)irqfd->eventfd; irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer; irqfd->consumer.stop = kvm_arch_irq_bypass_stop; irqfd->consumer.start = kvm_arch_irq_bypass_start; ret = irq_bypass_register_consumer(&irqfd->consumer); if (ret) pr_info("irq bypass consumer (token %p) registration fails: %d\n", irqfd->consumer.token, ret); } #endif return 0; fail: if (irqfd->resampler) irqfd_resampler_shutdown(irqfd); if (resamplefd && !IS_ERR(resamplefd)) eventfd_ctx_put(resamplefd); if (eventfd && !IS_ERR(eventfd)) eventfd_ctx_put(eventfd); fdput(f); out: kfree(irqfd); return ret; } bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; int gsi, idx; idx = srcu_read_lock(&kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); if (gsi != -1) hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) { srcu_read_unlock(&kvm->irq_srcu, idx); return true; } srcu_read_unlock(&kvm->irq_srcu, idx); return false; } EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) { struct kvm_irq_ack_notifier *kian; hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) kian->irq_acked(kian); } void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) { int gsi, idx; trace_kvm_ack_irq(irqchip, pin); idx = srcu_read_lock(&kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); if (gsi != -1) kvm_notify_acked_gsi(kvm, gsi); srcu_read_unlock(&kvm->irq_srcu, idx); } void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); kvm_arch_post_irq_ack_notifier_list_update(kvm); } void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); hlist_del_init_rcu(&kian->link); mutex_unlock(&kvm->irq_lock); synchronize_srcu(&kvm->irq_srcu); kvm_arch_post_irq_ack_notifier_list_update(kvm); } #endif void kvm_eventfd_init(struct kvm *kvm) { #ifdef CONFIG_HAVE_KVM_IRQFD spin_lock_init(&kvm->irqfds.lock); INIT_LIST_HEAD(&kvm->irqfds.items); INIT_LIST_HEAD(&kvm->irqfds.resampler_list); mutex_init(&kvm->irqfds.resampler_lock); #endif INIT_LIST_HEAD(&kvm->ioeventfds); } #ifdef CONFIG_HAVE_KVM_IRQFD /* * shutdown any irqfd's that match fd+gsi */ static int kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) { struct kvm_kernel_irqfd *irqfd, *tmp; struct eventfd_ctx *eventfd; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { /* * This clearing of irq_entry.type is needed for when * another thread calls kvm_irq_routing_update before * we flush workqueue below (we synchronize with * kvm_irq_routing_update using irqfds.lock). */ write_seqcount_begin(&irqfd->irq_entry_sc); irqfd->irq_entry.type = 0; write_seqcount_end(&irqfd->irq_entry_sc); irqfd_deactivate(irqfd); } } spin_unlock_irq(&kvm->irqfds.lock); eventfd_ctx_put(eventfd); /* * Block until we know all outstanding shutdown jobs have completed * so that we guarantee there will not be any more interrupts on this * gsi once this deassign function returns. */ flush_workqueue(irqfd_cleanup_wq); return 0; } int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) { if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) return -EINVAL; if (args->gsi >= KVM_MAX_IRQ_ROUTES) return -EINVAL; if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) return kvm_irqfd_deassign(kvm, args); return kvm_irqfd_assign(kvm, args); } /* * This function is called as the kvm VM fd is being released. Shutdown all * irqfds that still remain open */ void kvm_irqfd_release(struct kvm *kvm) { struct kvm_kernel_irqfd *irqfd, *tmp; spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) irqfd_deactivate(irqfd); spin_unlock_irq(&kvm->irqfds.lock); /* * Block until we know all outstanding shutdown jobs have completed * since we do not take a kvm* reference. */ flush_workqueue(irqfd_cleanup_wq); } /* * Take note of a change in irq routing. * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. */ void kvm_irq_routing_update(struct kvm *kvm) { struct kvm_kernel_irqfd *irqfd; spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry(irqfd, &kvm->irqfds.items, list) { irqfd_update(kvm, irqfd); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS if (irqfd->producer) { int ret = kvm_arch_update_irqfd_routing( irqfd->kvm, irqfd->producer->irq, irqfd->gsi, 1); WARN_ON(ret); } #endif } spin_unlock_irq(&kvm->irqfds.lock); } /* * create a host-wide workqueue for issuing deferred shutdown requests * aggregated from all vm* instances. We need our own isolated * queue to ease flushing work items when a VM exits. */ int kvm_irqfd_init(void) { irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0); if (!irqfd_cleanup_wq) return -ENOMEM; return 0; } void kvm_irqfd_exit(void) { destroy_workqueue(irqfd_cleanup_wq); } #endif /* * -------------------------------------------------------------------- * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal. * * userspace can register a PIO/MMIO address with an eventfd for receiving * notification when the memory has been touched. * -------------------------------------------------------------------- */ struct _ioeventfd { struct list_head list; u64 addr; int length; struct eventfd_ctx *eventfd; u64 datamatch; struct kvm_io_device dev; u8 bus_idx; bool wildcard; }; static inline struct _ioeventfd * to_ioeventfd(struct kvm_io_device *dev) { return container_of(dev, struct _ioeventfd, dev); } static void ioeventfd_release(struct _ioeventfd *p) { eventfd_ctx_put(p->eventfd); list_del(&p->list); kfree(p); } static bool ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val) { u64 _val; if (addr != p->addr) /* address must be precise for a hit */ return false; if (!p->length) /* length = 0 means only look at the address, so always a hit */ return true; if (len != p->length) /* address-range must be precise for a hit */ return false; if (p->wildcard) /* all else equal, wildcard is always a hit */ return true; /* otherwise, we have to actually compare the data */ BUG_ON(!IS_ALIGNED((unsigned long)val, len)); switch (len) { case 1: _val = *(u8 *)val; break; case 2: _val = *(u16 *)val; break; case 4: _val = *(u32 *)val; break; case 8: _val = *(u64 *)val; break; default: return false; } return _val == p->datamatch ? true : false; } /* MMIO/PIO writes trigger an event if the addr/val match */ static int ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct _ioeventfd *p = to_ioeventfd(this); if (!ioeventfd_in_range(p, addr, len, val)) return -EOPNOTSUPP; eventfd_signal(p->eventfd, 1); return 0; } /* * This function is called as KVM is completely shutting down. We do not * need to worry about locking just nuke anything we have as quickly as possible */ static void ioeventfd_destructor(struct kvm_io_device *this) { struct _ioeventfd *p = to_ioeventfd(this); ioeventfd_release(p); } static const struct kvm_io_device_ops ioeventfd_ops = { .write = ioeventfd_write, .destructor = ioeventfd_destructor, }; /* assumes kvm->slots_lock held */ static bool ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) { struct _ioeventfd *_p; list_for_each_entry(_p, &kvm->ioeventfds, list) if (_p->bus_idx == p->bus_idx && _p->addr == p->addr && (!_p->length || !p->length || (_p->length == p->length && (_p->wildcard || p->wildcard || _p->datamatch == p->datamatch)))) return true; return false; } static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) { if (flags & KVM_IOEVENTFD_FLAG_PIO) return KVM_PIO_BUS; if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY) return KVM_VIRTIO_CCW_NOTIFY_BUS; return KVM_MMIO_BUS; } static int kvm_assign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_ioeventfd *args) { struct eventfd_ctx *eventfd; struct _ioeventfd *p; int ret; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { ret = -ENOMEM; goto fail; } INIT_LIST_HEAD(&p->list); p->addr = args->addr; p->bus_idx = bus_idx; p->length = args->len; p->eventfd = eventfd; /* The datamatch feature is optional, otherwise this is a wildcard */ if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH) p->datamatch = args->datamatch; else p->wildcard = true; mutex_lock(&kvm->slots_lock); /* Verify that there isn't a match already */ if (ioeventfd_check_collision(kvm, p)) { ret = -EEXIST; goto unlock_fail; } kvm_iodevice_init(&p->dev, &ioeventfd_ops); ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, &p->dev); if (ret < 0) goto unlock_fail; kvm_get_bus(kvm, bus_idx)->ioeventfd_count++; list_add_tail(&p->list, &kvm->ioeventfds); mutex_unlock(&kvm->slots_lock); return 0; unlock_fail: mutex_unlock(&kvm->slots_lock); fail: kfree(p); eventfd_ctx_put(eventfd); return ret; } static int kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_ioeventfd *args) { struct _ioeventfd *p, *tmp; struct eventfd_ctx *eventfd; struct kvm_io_bus *bus; int ret = -ENOENT; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); mutex_lock(&kvm->slots_lock); list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); if (p->bus_idx != bus_idx || p->eventfd != eventfd || p->addr != args->addr || p->length != args->len || p->wildcard != wildcard) continue; if (!p->wildcard && p->datamatch != args->datamatch) continue; kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); bus = kvm_get_bus(kvm, bus_idx); if (bus) bus->ioeventfd_count--; ioeventfd_release(p); ret = 0; break; } mutex_unlock(&kvm->slots_lock); eventfd_ctx_put(eventfd); return ret; } static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags); int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); if (!args->len && bus_idx == KVM_MMIO_BUS) kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); return ret; } static int kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { enum kvm_bus bus_idx; int ret; bus_idx = ioeventfd_bus_from_flags(args->flags); /* must be natural-word sized, or 0 to ignore length */ switch (args->len) { case 0: case 1: case 2: case 4: case 8: break; default: return -EINVAL; } /* check for range overflow */ if (args->addr + args->len < args->addr) return -EINVAL; /* check for extra flags that we don't understand */ if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) return -EINVAL; /* ioeventfd with no length can't be combined with DATAMATCH */ if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)) return -EINVAL; ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); if (ret) goto fail; /* When length is ignored, MMIO is also put on a separate bus, for * faster lookups. */ if (!args->len && bus_idx == KVM_MMIO_BUS) { ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); if (ret < 0) goto fast_fail; } return 0; fast_fail: kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); fail: return ret; } int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN) return kvm_deassign_ioeventfd(kvm, args); return kvm_assign_ioeventfd(kvm, args); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2523_0
crossvul-cpp_data_bad_2571_0
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/do_as_req.c */ /* * Portions Copyright (C) 2007 Apple Inc. * Copyright 1990, 1991, 2007, 2008, 2009, 2013, 2014 by the * Massachusetts Institute of Technology. All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. * * * KDC Routines to deal with AS_REQ's */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include "com_err.h" #include <syslog.h> #ifdef HAVE_NETINET_IN_H #include <sys/types.h> #include <netinet/in.h> #ifndef hpux #include <arpa/inet.h> #endif /* hpux */ #endif /* HAVE_NETINET_IN_H */ #include "kdc_util.h" #include "kdc_audit.h" #include "policy.h" #include <kadm5/admin.h> #include "adm_proto.h" #include "extern.h" static krb5_error_code prepare_error_as(struct kdc_request_state *, krb5_kdc_req *, krb5_db_entry *, int, krb5_pa_data **, krb5_boolean, krb5_principal, krb5_data **, const char *); /* Determine the key-expiration value according to RFC 4120 section 5.4.2. */ static krb5_timestamp get_key_exp(krb5_db_entry *entry) { if (entry->expiration == 0) return entry->pw_expiration; if (entry->pw_expiration == 0) return entry->expiration; return ts_min(entry->expiration, entry->pw_expiration); } /* * Find the key in client for the most preferred enctype in req_enctypes. Fill * in *kb_out with the decrypted keyblock (which the caller must free) and set * *kd_out to an alias to that key data entry. Set *kd_out to NULL and leave * *kb_out zeroed if no key is found for any of the requested enctypes. * kb_out->enctype may differ from the enctype of *kd_out for DES enctypes; in * this case, kb_out->enctype is the requested enctype used to match the key * data entry. */ static krb5_error_code select_client_key(krb5_context context, krb5_db_entry *client, krb5_enctype *req_enctypes, int n_req_enctypes, krb5_keyblock *kb_out, krb5_key_data **kd_out) { krb5_error_code ret; krb5_key_data *kd; krb5_enctype etype; int i; memset(kb_out, 0, sizeof(*kb_out)); *kd_out = NULL; for (i = 0; i < n_req_enctypes; i++) { etype = req_enctypes[i]; if (!krb5_c_valid_enctype(etype)) continue; if (krb5_dbe_find_enctype(context, client, etype, -1, 0, &kd) == 0) { /* Decrypt the client key data and set its enctype to the request * enctype (which may differ from the key data enctype for DES). */ ret = krb5_dbe_decrypt_key_data(context, NULL, kd, kb_out, NULL); if (ret) return ret; kb_out->enctype = etype; *kd_out = kd; return 0; } } return 0; } struct as_req_state { loop_respond_fn respond; void *arg; krb5_principal_data client_princ; krb5_enc_tkt_part enc_tkt_reply; krb5_enc_kdc_rep_part reply_encpart; krb5_ticket ticket_reply; krb5_keyblock server_keyblock; krb5_keyblock client_keyblock; krb5_db_entry *client; krb5_db_entry *server; krb5_db_entry *local_tgt; krb5_db_entry *local_tgt_storage; krb5_key_data *client_key; krb5_kdc_req *request; struct krb5_kdcpreauth_rock_st rock; const char *status; krb5_pa_data **e_data; krb5_boolean typed_e_data; krb5_kdc_rep reply; krb5_timestamp kdc_time; krb5_timestamp authtime; krb5_keyblock session_key; unsigned int c_flags; krb5_data *req_pkt; krb5_data *inner_body; struct kdc_request_state *rstate; char *sname, *cname; void *pa_context; const krb5_fulladdr *local_addr; const krb5_fulladdr *remote_addr; krb5_data **auth_indicators; krb5_error_code preauth_err; kdc_realm_t *active_realm; krb5_audit_state *au_state; }; static void finish_process_as_req(struct as_req_state *state, krb5_error_code errcode) { krb5_key_data *server_key; krb5_keyblock *as_encrypting_key = NULL; krb5_data *response = NULL; const char *emsg = 0; int did_log = 0; loop_respond_fn oldrespond; void *oldarg; kdc_realm_t *kdc_active_realm = state->active_realm; krb5_audit_state *au_state = state->au_state; assert(state); oldrespond = state->respond; oldarg = state->arg; if (errcode) goto egress; au_state->stage = ENCR_REP; if ((errcode = validate_forwardable(state->request, *state->client, *state->server, state->kdc_time, &state->status))) { errcode += ERROR_TABLE_BASE_krb5; goto egress; } errcode = check_indicators(kdc_context, state->server, state->auth_indicators); if (errcode) { state->status = "HIGHER_AUTHENTICATION_REQUIRED"; goto egress; } state->ticket_reply.enc_part2 = &state->enc_tkt_reply; /* * Find the server key */ if ((errcode = krb5_dbe_find_enctype(kdc_context, state->server, -1, /* ignore keytype */ -1, /* Ignore salttype */ 0, /* Get highest kvno */ &server_key))) { state->status = "FINDING_SERVER_KEY"; goto egress; } /* * Convert server->key into a real key * (it may be encrypted in the database) * * server_keyblock is later used to generate auth data signatures */ if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL, server_key, &state->server_keyblock, NULL))) { state->status = "DECRYPT_SERVER_KEY"; goto egress; } /* Start assembling the response */ state->reply.msg_type = KRB5_AS_REP; state->reply.client = state->enc_tkt_reply.client; /* post canonization */ state->reply.ticket = &state->ticket_reply; state->reply_encpart.session = &state->session_key; if ((errcode = fetch_last_req_info(state->client, &state->reply_encpart.last_req))) { state->status = "FETCH_LAST_REQ"; goto egress; } state->reply_encpart.nonce = state->request->nonce; state->reply_encpart.key_exp = get_key_exp(state->client); state->reply_encpart.flags = state->enc_tkt_reply.flags; state->reply_encpart.server = state->ticket_reply.server; /* copy the time fields EXCEPT for authtime; it's location * is used for ktime */ state->reply_encpart.times = state->enc_tkt_reply.times; state->reply_encpart.times.authtime = state->authtime = state->kdc_time; state->reply_encpart.caddrs = state->enc_tkt_reply.caddrs; state->reply_encpart.enc_padata = NULL; /* Fetch the padata info to be returned (do this before * authdata to handle possible replacement of reply key */ errcode = return_padata(kdc_context, &state->rock, state->req_pkt, state->request, &state->reply, &state->client_keyblock, &state->pa_context); if (errcode) { state->status = "KDC_RETURN_PADATA"; goto egress; } /* If we didn't find a client long-term key and no preauth mechanism * replaced the reply key, error out now. */ if (state->client_keyblock.enctype == ENCTYPE_NULL) { state->status = "CANT_FIND_CLIENT_KEY"; errcode = KRB5KDC_ERR_ETYPE_NOSUPP; goto egress; } errcode = handle_authdata(kdc_context, state->c_flags, state->client, state->server, NULL, state->local_tgt, &state->client_keyblock, &state->server_keyblock, NULL, state->req_pkt, state->request, NULL, /* for_user_princ */ NULL, /* enc_tkt_request */ state->auth_indicators, &state->enc_tkt_reply); if (errcode) { krb5_klog_syslog(LOG_INFO, _("AS_REQ : handle_authdata (%d)"), errcode); state->status = "HANDLE_AUTHDATA"; goto egress; } errcode = krb5_encrypt_tkt_part(kdc_context, &state->server_keyblock, &state->ticket_reply); if (errcode) { state->status = "ENCRYPT_TICKET"; goto egress; } errcode = kau_make_tkt_id(kdc_context, &state->ticket_reply, &au_state->tkt_out_id); if (errcode) { state->status = "GENERATE_TICKET_ID"; goto egress; } state->ticket_reply.enc_part.kvno = server_key->key_data_kvno; errcode = kdc_fast_response_handle_padata(state->rstate, state->request, &state->reply, state->client_keyblock.enctype); if (errcode) { state->status = "MAKE_FAST_RESPONSE"; goto egress; } /* now encode/encrypt the response */ state->reply.enc_part.enctype = state->client_keyblock.enctype; errcode = kdc_fast_handle_reply_key(state->rstate, &state->client_keyblock, &as_encrypting_key); if (errcode) { state->status = "MAKE_FAST_REPLY_KEY"; goto egress; } errcode = return_enc_padata(kdc_context, state->req_pkt, state->request, as_encrypting_key, state->server, &state->reply_encpart, FALSE); if (errcode) { state->status = "KDC_RETURN_ENC_PADATA"; goto egress; } if (kdc_fast_hide_client(state->rstate)) state->reply.client = (krb5_principal)krb5_anonymous_principal(); errcode = krb5_encode_kdc_rep(kdc_context, KRB5_AS_REP, &state->reply_encpart, 0, as_encrypting_key, &state->reply, &response); if (state->client_key != NULL) state->reply.enc_part.kvno = state->client_key->key_data_kvno; if (errcode) { state->status = "ENCODE_KDC_REP"; goto egress; } /* these parts are left on as a courtesy from krb5_encode_kdc_rep so we can use them in raw form if needed. But, we don't... */ memset(state->reply.enc_part.ciphertext.data, 0, state->reply.enc_part.ciphertext.length); free(state->reply.enc_part.ciphertext.data); log_as_req(kdc_context, state->local_addr, state->remote_addr, state->request, &state->reply, state->client, state->cname, state->server, state->sname, state->authtime, 0, 0, 0); did_log = 1; egress: if (errcode != 0) assert (state->status != 0); au_state->status = state->status; au_state->reply = &state->reply; kau_as_req(kdc_context, (errcode || state->preauth_err) ? FALSE : TRUE, au_state); kau_free_kdc_req(au_state); free_padata_context(kdc_context, state->pa_context); if (as_encrypting_key) krb5_free_keyblock(kdc_context, as_encrypting_key); if (errcode) emsg = krb5_get_error_message(kdc_context, errcode); if (state->status) { log_as_req(kdc_context, state->local_addr, state->remote_addr, state->request, &state->reply, state->client, state->cname, state->server, state->sname, state->authtime, state->status, errcode, emsg); did_log = 1; } if (errcode) { if (state->status == 0) { state->status = emsg; } if (errcode != KRB5KDC_ERR_DISCARD) { errcode -= ERROR_TABLE_BASE_krb5; if (errcode < 0 || errcode > KRB_ERR_MAX) errcode = KRB_ERR_GENERIC; errcode = prepare_error_as(state->rstate, state->request, state->local_tgt, errcode, state->e_data, state->typed_e_data, ((state->client != NULL) ? state->client->princ : NULL), &response, state->status); state->status = 0; } } if (emsg) krb5_free_error_message(kdc_context, emsg); if (state->enc_tkt_reply.authorization_data != NULL) krb5_free_authdata(kdc_context, state->enc_tkt_reply.authorization_data); if (state->server_keyblock.contents != NULL) krb5_free_keyblock_contents(kdc_context, &state->server_keyblock); if (state->client_keyblock.contents != NULL) krb5_free_keyblock_contents(kdc_context, &state->client_keyblock); if (state->reply.padata != NULL) krb5_free_pa_data(kdc_context, state->reply.padata); if (state->reply_encpart.enc_padata) krb5_free_pa_data(kdc_context, state->reply_encpart.enc_padata); if (state->cname != NULL) free(state->cname); if (state->sname != NULL) free(state->sname); krb5_db_free_principal(kdc_context, state->client); krb5_db_free_principal(kdc_context, state->server); krb5_db_free_principal(kdc_context, state->local_tgt_storage); if (state->session_key.contents != NULL) krb5_free_keyblock_contents(kdc_context, &state->session_key); if (state->ticket_reply.enc_part.ciphertext.data != NULL) { memset(state->ticket_reply.enc_part.ciphertext.data , 0, state->ticket_reply.enc_part.ciphertext.length); free(state->ticket_reply.enc_part.ciphertext.data); } krb5_free_pa_data(kdc_context, state->e_data); krb5_free_data(kdc_context, state->inner_body); kdc_free_rstate(state->rstate); krb5_free_kdc_req(kdc_context, state->request); k5_free_data_ptr_list(state->auth_indicators); assert(did_log != 0); free(state); (*oldrespond)(oldarg, errcode, response); } static void finish_missing_required_preauth(void *arg) { struct as_req_state *state = (struct as_req_state *)arg; finish_process_as_req(state, state->preauth_err); } static void finish_preauth(void *arg, krb5_error_code code) { struct as_req_state *state = arg; krb5_error_code real_code = code; if (code) { if (vague_errors) code = KRB5KRB_ERR_GENERIC; state->status = "PREAUTH_FAILED"; if (real_code == KRB5KDC_ERR_PREAUTH_FAILED) { state->preauth_err = code; get_preauth_hint_list(state->request, &state->rock, &state->e_data, finish_missing_required_preauth, state); return; } } else { /* * Final check before handing out ticket: If the client requires * preauthentication, verify that the proper kind of * preauthentication was carried out. */ state->status = missing_required_preauth(state->client, state->server, &state->enc_tkt_reply); if (state->status) { state->preauth_err = KRB5KDC_ERR_PREAUTH_REQUIRED; get_preauth_hint_list(state->request, &state->rock, &state->e_data, finish_missing_required_preauth, state); return; } } finish_process_as_req(state, code); } /*ARGSUSED*/ void process_as_req(krb5_kdc_req *request, krb5_data *req_pkt, const krb5_fulladdr *local_addr, const krb5_fulladdr *remote_addr, kdc_realm_t *kdc_active_realm, verto_ctx *vctx, loop_respond_fn respond, void *arg) { krb5_error_code errcode; unsigned int s_flags = 0; krb5_data encoded_req_body; krb5_enctype useenctype; struct as_req_state *state; krb5_audit_state *au_state = NULL; state = k5alloc(sizeof(*state), &errcode); if (state == NULL) { (*respond)(arg, errcode, NULL); return; } state->respond = respond; state->arg = arg; state->request = request; state->req_pkt = req_pkt; state->local_addr = local_addr; state->remote_addr = remote_addr; state->active_realm = kdc_active_realm; errcode = kdc_make_rstate(kdc_active_realm, &state->rstate); if (errcode != 0) { (*respond)(arg, errcode, NULL); free(state); return; } /* Initialize audit state. */ errcode = kau_init_kdc_req(kdc_context, state->request, remote_addr, &au_state); if (errcode) { (*respond)(arg, errcode, NULL); kdc_free_rstate(state->rstate); free(state); return; } state->au_state = au_state; if (state->request->msg_type != KRB5_AS_REQ) { state->status = "VALIDATE_MESSAGE_TYPE"; errcode = KRB5_BADMSGTYPE; goto errout; } /* Seed the audit trail with the request ID and basic information. */ kau_as_req(kdc_context, TRUE, au_state); if (fetch_asn1_field((unsigned char *) req_pkt->data, 1, 4, &encoded_req_body) != 0) { errcode = ASN1_BAD_ID; state->status = "FETCH_REQ_BODY"; goto errout; } errcode = kdc_find_fast(&state->request, &encoded_req_body, NULL, NULL, state->rstate, &state->inner_body); if (errcode) { state->status = "FIND_FAST"; goto errout; } if (state->inner_body == NULL) { /* Not a FAST request; copy the encoded request body. */ errcode = krb5_copy_data(kdc_context, &encoded_req_body, &state->inner_body); if (errcode) { state->status = "COPY_REQ_BODY"; goto errout; } } au_state->request = state->request; state->rock.request = state->request; state->rock.inner_body = state->inner_body; state->rock.rstate = state->rstate; state->rock.vctx = vctx; state->rock.auth_indicators = &state->auth_indicators; if (!state->request->client) { state->status = "NULL_CLIENT"; errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; goto errout; } if ((errcode = krb5_unparse_name(kdc_context, state->request->client, &state->cname))) { state->status = "UNPARSE_CLIENT"; goto errout; } limit_string(state->cname); if (!state->request->server) { state->status = "NULL_SERVER"; errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } if ((errcode = krb5_unparse_name(kdc_context, state->request->server, &state->sname))) { state->status = "UNPARSE_SERVER"; goto errout; } limit_string(state->sname); /* * We set KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY as a hint * to the backend to return naming information in lieu * of cross realm TGS entries. */ setflag(state->c_flags, KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY); /* * Note that according to the referrals draft we should * always canonicalize enterprise principal names. */ if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE) || state->request->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) { setflag(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE); setflag(state->c_flags, KRB5_KDB_FLAG_ALIAS_OK); } if (include_pac_p(kdc_context, state->request)) { setflag(state->c_flags, KRB5_KDB_FLAG_INCLUDE_PAC); } errcode = krb5_db_get_principal(kdc_context, state->request->client, state->c_flags, &state->client); if (errcode == KRB5_KDB_CANTLOCK_DB) errcode = KRB5KDC_ERR_SVC_UNAVAILABLE; if (errcode == KRB5_KDB_NOENTRY) { state->status = "CLIENT_NOT_FOUND"; if (vague_errors) errcode = KRB5KRB_ERR_GENERIC; else errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; goto errout; } else if (errcode) { state->status = "LOOKING_UP_CLIENT"; goto errout; } state->rock.client = state->client; /* * If the backend returned a principal that is not in the local * realm, then we need to refer the client to that realm. */ if (!is_local_principal(kdc_active_realm, state->client->princ)) { /* Entry is a referral to another realm */ state->status = "REFERRAL"; au_state->cl_realm = &state->client->princ->realm; errcode = KRB5KDC_ERR_WRONG_REALM; goto errout; } au_state->stage = SRVC_PRINC; s_flags = 0; setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK); if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE)) { setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE); } errcode = krb5_db_get_principal(kdc_context, state->request->server, s_flags, &state->server); if (errcode == KRB5_KDB_CANTLOCK_DB) errcode = KRB5KDC_ERR_SVC_UNAVAILABLE; if (errcode == KRB5_KDB_NOENTRY) { state->status = "SERVER_NOT_FOUND"; errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } else if (errcode) { state->status = "LOOKING_UP_SERVER"; goto errout; } errcode = get_local_tgt(kdc_context, &state->request->server->realm, state->server, &state->local_tgt, &state->local_tgt_storage); if (errcode) { state->status = "GET_LOCAL_TGT"; goto errout; } au_state->stage = VALIDATE_POL; if ((errcode = krb5_timeofday(kdc_context, &state->kdc_time))) { state->status = "TIMEOFDAY"; goto errout; } state->authtime = state->kdc_time; /* for audit_as_request() */ if ((errcode = validate_as_request(kdc_active_realm, state->request, *state->client, *state->server, state->kdc_time, &state->status, &state->e_data))) { if (!state->status) state->status = "UNKNOWN_REASON"; errcode += ERROR_TABLE_BASE_krb5; goto errout; } au_state->stage = ISSUE_TKT; /* * Select the keytype for the ticket session key. */ if ((useenctype = select_session_keytype(kdc_active_realm, state->server, state->request->nktypes, state->request->ktype)) == 0) { /* unsupported ktype */ state->status = "BAD_ENCRYPTION_TYPE"; errcode = KRB5KDC_ERR_ETYPE_NOSUPP; goto errout; } if ((errcode = krb5_c_make_random_key(kdc_context, useenctype, &state->session_key))) { state->status = "MAKE_RANDOM_KEY"; goto errout; } /* * Canonicalization is only effective if we are issuing a TGT * (the intention is to allow support for Windows "short" realm * aliases, nothing more). */ if (isflagset(s_flags, KRB5_KDB_FLAG_CANONICALIZE) && krb5_is_tgs_principal(state->request->server) && krb5_is_tgs_principal(state->server->princ)) { state->ticket_reply.server = state->server->princ; } else { state->ticket_reply.server = state->request->server; } /* Copy options that request the corresponding ticket flags. */ state->enc_tkt_reply.flags = OPTS2FLAGS(state->request->kdc_options); state->enc_tkt_reply.times.authtime = state->authtime; setflag(state->enc_tkt_reply.flags, TKT_FLG_INITIAL); setflag(state->enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP); /* * It should be noted that local policy may affect the * processing of any of these flags. For example, some * realms may refuse to issue renewable tickets */ state->enc_tkt_reply.session = &state->session_key; if (isflagset(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE)) { state->client_princ = *(state->client->princ); } else { state->client_princ = *(state->request->client); /* The realm is always canonicalized */ state->client_princ.realm = state->client->princ->realm; } state->enc_tkt_reply.client = &state->client_princ; state->enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; state->enc_tkt_reply.transited.tr_contents = empty_string; if (isflagset(state->request->kdc_options, KDC_OPT_POSTDATED)) { setflag(state->enc_tkt_reply.flags, TKT_FLG_INVALID); state->enc_tkt_reply.times.starttime = state->request->from; } else state->enc_tkt_reply.times.starttime = state->kdc_time; kdc_get_ticket_endtime(kdc_active_realm, state->enc_tkt_reply.times.starttime, kdc_infinity, state->request->till, state->client, state->server, &state->enc_tkt_reply.times.endtime); kdc_get_ticket_renewtime(kdc_active_realm, state->request, NULL, state->client, state->server, &state->enc_tkt_reply); /* * starttime is optional, and treated as authtime if not present. * so we can nuke it if it matches */ if (state->enc_tkt_reply.times.starttime == state->enc_tkt_reply.times.authtime) state->enc_tkt_reply.times.starttime = 0; state->enc_tkt_reply.caddrs = state->request->addresses; state->enc_tkt_reply.authorization_data = 0; /* If anonymous requests are being used, adjust the realm of the client * principal. */ if (isflagset(state->request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS)) { if (!krb5_principal_compare_any_realm(kdc_context, state->request->client, krb5_anonymous_principal())) { errcode = KRB5KDC_ERR_BADOPTION; /* Anonymous requested but anonymous principal not used.*/ state->status = "VALIDATE_ANONYMOUS_PRINCIPAL"; goto errout; } krb5_free_principal(kdc_context, state->request->client); state->request->client = NULL; errcode = krb5_copy_principal(kdc_context, krb5_anonymous_principal(), &state->request->client); if (errcode) { state->status = "COPY_ANONYMOUS_PRINCIPAL"; goto errout; } state->enc_tkt_reply.client = state->request->client; setflag(state->client->attributes, KRB5_KDB_REQUIRES_PRE_AUTH); } errcode = select_client_key(kdc_context, state->client, state->request->ktype, state->request->nktypes, &state->client_keyblock, &state->client_key); if (errcode) { state->status = "DECRYPT_CLIENT_KEY"; goto errout; } if (state->client_key != NULL) { state->rock.client_key = state->client_key; state->rock.client_keyblock = &state->client_keyblock; } errcode = kdc_fast_read_cookie(kdc_context, state->rstate, state->request, state->local_tgt); if (errcode) { state->status = "READ_COOKIE"; goto errout; } /* * Check the preauthentication if it is there. */ if (state->request->padata) { check_padata(kdc_context, &state->rock, state->req_pkt, state->request, &state->enc_tkt_reply, &state->pa_context, &state->e_data, &state->typed_e_data, finish_preauth, state); } else finish_preauth(state, 0); return; errout: finish_process_as_req(state, errcode); } static krb5_error_code prepare_error_as(struct kdc_request_state *rstate, krb5_kdc_req *request, krb5_db_entry *local_tgt, int error, krb5_pa_data **e_data_in, krb5_boolean typed_e_data, krb5_principal canon_client, krb5_data **response, const char *status) { krb5_error errpkt; krb5_error_code retval; krb5_data *scratch = NULL, *e_data_asn1 = NULL, *fast_edata = NULL; krb5_pa_data **e_data = NULL, *cookie = NULL; kdc_realm_t *kdc_active_realm = rstate->realm_data; size_t count; errpkt.magic = KV5M_ERROR; if (e_data_in != NULL) { /* Add a PA-FX-COOKIE to e_data_in. e_data is a shallow copy * containing aliases. */ for (count = 0; e_data_in[count] != NULL; count++); e_data = calloc(count + 2, sizeof(*e_data)); if (e_data == NULL) return ENOMEM; memcpy(e_data, e_data_in, count * sizeof(*e_data)); retval = kdc_fast_make_cookie(kdc_context, rstate, local_tgt, request->client, &cookie); e_data[count] = cookie; } errpkt.ctime = request->nonce; errpkt.cusec = 0; retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec); if (retval) goto cleanup; errpkt.error = error; errpkt.server = request->server; errpkt.client = (error == KDC_ERR_WRONG_REALM) ? canon_client : request->client; errpkt.text = string2data((char *)status); if (e_data != NULL) { if (typed_e_data) retval = encode_krb5_typed_data(e_data, &e_data_asn1); else retval = encode_krb5_padata_sequence(e_data, &e_data_asn1); if (retval) goto cleanup; errpkt.e_data = *e_data_asn1; } else errpkt.e_data = empty_data(); retval = kdc_fast_handle_error(kdc_context, rstate, request, e_data, &errpkt, &fast_edata); if (retval) goto cleanup; if (fast_edata != NULL) errpkt.e_data = *fast_edata; scratch = k5alloc(sizeof(*scratch), &retval); if (scratch == NULL) goto cleanup; if (kdc_fast_hide_client(rstate) && errpkt.client != NULL) errpkt.client = (krb5_principal)krb5_anonymous_principal(); retval = krb5_mk_error(kdc_context, &errpkt, scratch); if (retval) goto cleanup; *response = scratch; scratch = NULL; cleanup: krb5_free_data(kdc_context, fast_edata); krb5_free_data(kdc_context, e_data_asn1); free(scratch); free(e_data); if (cookie != NULL) free(cookie->contents); free(cookie); return retval; }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2571_0
crossvul-cpp_data_bad_1771_2
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_EDP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <fnmatch.h> #include <assert.h> static int seq = 0; int edp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = EDP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_EXTREME; struct lldpd_chassis *chassis; int length, i, v; u_int8_t *packet, *pos, *pos_llc, *pos_len_eh, *pos_len_edp, *pos_edp, *tlv, *end; u_int16_t checksum; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; unsigned int state = 0; #endif u_int8_t edp_fakeversion[] = {7, 6, 4, 99}; /* Subsequent XXX can be replaced by other values. We place them here to ensure the position of "" to be a bit invariant with version changes. */ char *deviceslot[] = { "eth", "veth", "XXX", "XXX", "XXX", "XXX", "XXX", "XXX", "", NULL }; log_debug("edp", "send EDP frame on port %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; #ifdef ENABLE_DOT1 while (state != 2) { #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; v = 0; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && /* We need to save our current position to compute ethernet len */ /* SSAP and DSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_EDP))) goto toobig; /* EDP header */ if ((chassis->c_id_len != ETHER_ADDR_LEN) || (chassis->c_id_subtype != LLDP_CHASSISID_SUBTYPE_LLADDR)) { log_warnx("edp", "local chassis does not use MAC address as chassis ID!?"); free(packet); return EINVAL; } if (!( POKE_SAVE(pos_edp) && /* Save the start of EDP frame */ POKE_UINT8(1) && POKE_UINT8(0) && POKE_SAVE(pos_len_edp) && /* We compute the len and the checksum later */ POKE_UINT32(0) && /* Len + Checksum */ POKE_UINT16(seq) && POKE_UINT16(0) && POKE_BYTES(chassis->c_id, ETHER_ADDR_LEN))) goto toobig; seq++; #ifdef ENABLE_DOT1 switch (state) { case 0: #endif /* Display TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_DISPLAY) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_UINT8(0) && /* Add a NULL character for better compatibility */ POKE_END_EDP_TLV)) goto toobig; /* Info TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_INFO))) goto toobig; /* We try to emulate the slot thing */ for (i=0; deviceslot[i] != NULL; i++) { if (strncmp(hardware->h_ifname, deviceslot[i], strlen(deviceslot[i])) == 0) { if (!( POKE_UINT16(i) && POKE_UINT16(atoi(hardware->h_ifname + strlen(deviceslot[i]))))) goto toobig; break; } } /* If we don't find a "slot", we say that the interface is in slot 8 */ if (deviceslot[i] == NULL) { if (!( POKE_UINT16(8) && POKE_UINT16(hardware->h_ifindex))) goto toobig; } if (!( POKE_UINT16(0) && /* vchassis */ POKE_UINT32(0) && POKE_UINT16(0) && /* Reserved */ /* Version */ POKE_BYTES(edp_fakeversion, sizeof(edp_fakeversion)) && /* Connections, we say that we won't have more interfaces than this mask. */ POKE_UINT32(0xffffffff) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_END_EDP_TLV)) goto toobig; #ifdef ENABLE_DOT1 break; case 1: TAILQ_FOREACH(vlan, &hardware->h_lport.p_vlans, v_entries) { v++; if (!( POKE_START_EDP_TLV(EDP_TLV_VLAN) && POKE_UINT8(0) && /* Flags: no IP address */ POKE_UINT8(0) && /* Reserved */ POKE_UINT16(vlan->v_vid) && POKE_UINT32(0) && /* Reserved */ POKE_UINT32(0) && /* IP address */ /* VLAN name */ POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_UINT8(0) && POKE_END_EDP_TLV)) goto toobig; } break; } if ((state == 1) && (v == 0)) { /* No VLAN, no need to send another TLV */ free(packet); break; } #endif /* Null TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_NULL) && POKE_END_EDP_TLV && POKE_SAVE(end))) goto toobig; /* Compute len and checksum */ i = end - pos_llc; /* Ethernet length */ v = end - pos_edp; /* EDP length */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(i))) goto toobig; POKE_RESTORE(pos_len_edp); if (!(POKE_UINT16(v))) goto toobig; checksum = frame_checksum(pos_edp, v, 0); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("edp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); #ifdef ENABLE_DOT1 state++; } #endif hardware->h_tx_cnt++; return 0; toobig: free(packet); return E2BIG; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("edp", name " EDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int edp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; #ifdef ENABLE_DOT1 struct lldpd_mgmt *mgmt, *mgmt_next, *m; struct lldpd_vlan *lvlan = NULL, *lvlan_next; #endif const unsigned char edpaddr[] = EDP_MULTICAST_ADDR; int length, gotend = 0, gotvlans = 0, edp_len, tlv_len, tlv_type; int edp_port, edp_slot; u_int8_t *pos, *pos_edp, *tlv; u_int8_t version[4]; #ifdef ENABLE_DOT1 struct in_addr address; struct lldpd_port *oport; #endif log_debug("edp", "decode EDP frame on port %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("edp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("edp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) + 8 /* LLC */ + 10 + ETHER_ADDR_LEN /* EDP header */) { log_warnx("edp", "too short EDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(edpaddr, sizeof(edpaddr)) != 0) { log_info("edp", "frame not targeted at EDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); /* LLC: DSAP + SSAP + control + org */ if (PEEK_UINT16 != LLC_PID_EDP) { log_debug("edp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } (void)PEEK_SAVE(pos_edp); /* Save the start of EDP packet */ if (PEEK_UINT8 != 1) { log_warnx("edp", "incorrect EDP version for frame received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; /* Reserved */ edp_len = PEEK_UINT16; PEEK_DISCARD_UINT16; /* Checksum */ PEEK_DISCARD_UINT16; /* Sequence */ if (PEEK_UINT16 != 0) { /* ID Type = 0 = MAC */ log_warnx("edp", "incorrect device id type for frame received on %s", hardware->h_ifname); goto malformed; } if (edp_len > length + 10) { log_warnx("edp", "incorrect size for EDP frame received on %s", hardware->h_ifname); goto malformed; } chassis->c_ttl = cfg?cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold:0; chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR; chassis->c_id_len = ETHER_ADDR_LEN; if ((chassis->c_id = (char *)malloc(ETHER_ADDR_LEN)) == NULL) { log_warn("edp", "unable to allocate memory for chassis ID"); goto malformed; } PEEK_BYTES(chassis->c_id, ETHER_ADDR_LEN); /* Let's check checksum */ if (frame_checksum(pos_edp, edp_len, 0) != 0) { log_warnx("edp", "incorrect EDP checksum for frame received on %s", hardware->h_ifname); goto malformed; } while (length && !gotend) { if (length < 4) { log_warnx("edp", "EDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_UINT8 != EDP_TLV_MARKER) { log_warnx("edp", "incorrect marker starting EDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT8; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (tlv_len > length)) { log_debug("edp", "incorrect size in EDP TLV header for frame " "received on %s", hardware->h_ifname); /* Some poor old Extreme Summit are quite bogus */ gotend = 1; break; } switch (tlv_type) { case EDP_TLV_INFO: CHECK_TLV_SIZE(32, "Info"); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; edp_slot = PEEK_UINT16; edp_port = PEEK_UINT16; if (asprintf(&port->p_id, "%d/%d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port ID"); goto malformed; } port->p_id_len = strlen(port->p_id); if (asprintf(&port->p_descr, "Slot %d / Port %d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port description"); goto malformed; } PEEK_DISCARD_UINT16; /* vchassis */ PEEK_DISCARD(6); /* Reserved */ PEEK_BYTES(version, 4); if (asprintf(&chassis->c_descr, "EDP enabled device, version %d.%d.%d.%d", version[0], version[1], version[2], version[3]) == -1) { log_warn("edp", "unable to allocate memory for " "chassis description"); goto malformed; } break; case EDP_TLV_DISPLAY: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("edp", "unable to allocate memory for chassis " "name"); goto malformed; } /* TLV display contains a lot of garbage */ PEEK_BYTES(chassis->c_name, tlv_len); break; case EDP_TLV_NULL: if (tlv_len != 0) { log_warnx("edp", "null tlv with incorrect size in frame " "received on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("edp", "extra data after edp frame on %s", hardware->h_ifname); gotend = 1; break; case EDP_TLV_VLAN: #ifdef ENABLE_DOT1 CHECK_TLV_SIZE(12, "VLAN"); if ((lvlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("edp", "unable to allocate vlan"); goto malformed; } PEEK_DISCARD_UINT16; /* Flags + reserved */ lvlan->v_vid = PEEK_UINT16; /* VID */ PEEK_DISCARD(4); /* Reserved */ PEEK_BYTES(&address, sizeof(address)); if (address.s_addr != INADDR_ANY) { mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { log_warn("edp", "Out of memory"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } if ((lvlan->v_name = (char *)calloc(1, tlv_len + 1 - 12)) == NULL) { log_warn("edp", "unable to allocate vlan name"); goto malformed; } PEEK_BYTES(lvlan->v_name, tlv_len - 12); TAILQ_INSERT_TAIL(&port->p_vlans, lvlan, v_entries); lvlan = NULL; #endif gotvlans = 1; break; default: log_debug("edp", "unknown EDP TLV type (%d) received on %s", tlv_type, hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (gotend == 0)) { #ifdef ENABLE_DOT1 if (gotvlans && gotend) { /* VLAN can be sent in a separate frames. We need to add * those vlans to an existing port */ TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (!((oport->p_protocol == LLDPD_MODE_EDP) && (oport->p_chassis->c_id_subtype == chassis->c_id_subtype) && (oport->p_chassis->c_id_len == chassis->c_id_len) && (memcmp(oport->p_chassis->c_id, chassis->c_id, chassis->c_id_len) == 0))) continue; /* We attach the VLANs to the found port */ lldpd_vlan_cleanup(oport); for (lvlan = TAILQ_FIRST(&port->p_vlans); lvlan != NULL; lvlan = lvlan_next) { lvlan_next = TAILQ_NEXT(lvlan, v_entries); TAILQ_REMOVE(&port->p_vlans, lvlan, v_entries); TAILQ_INSERT_TAIL(&oport->p_vlans, lvlan, v_entries); } /* And the IP addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); /* Don't add an address that already exists! */ TAILQ_FOREACH(m, &chassis->c_mgmt, m_entries) if (m->m_family == mgmt->m_family && !memcmp(&m->m_addr, &mgmt->m_addr, sizeof(m->m_addr))) break; if (m == NULL) TAILQ_INSERT_TAIL(&oport->p_chassis->c_mgmt, mgmt, m_entries); } } /* We discard the remaining frame */ goto malformed; } #else if (gotvlans) goto malformed; #endif log_warnx("edp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_DOT1 free(lvlan); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_EDP */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1771_2
crossvul-cpp_data_good_1771_0
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "trace.h" #include <stdio.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <sys/stat.h> #include <fcntl.h> #include <time.h> #include <libgen.h> #include <assert.h> #include <sys/utsname.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/socket.h> #include <sys/select.h> #include <sys/time.h> #include <sys/ioctl.h> #include <arpa/inet.h> #include <netinet/if_ether.h> #include <pwd.h> #include <grp.h> static void usage(void); static struct protocol protos[] = { { LLDPD_MODE_LLDP, 1, "LLDP", 'l', lldp_send, lldp_decode, NULL, LLDP_MULTICAST_ADDR }, #ifdef ENABLE_CDP { LLDPD_MODE_CDPV1, 0, "CDPv1", 'c', cdpv1_send, cdp_decode, cdpv1_guess, CDP_MULTICAST_ADDR }, { LLDPD_MODE_CDPV2, 0, "CDPv2", 'c', cdpv2_send, cdp_decode, cdpv2_guess, CDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_SONMP { LLDPD_MODE_SONMP, 0, "SONMP", 's', sonmp_send, sonmp_decode, NULL, SONMP_MULTICAST_ADDR }, #endif #ifdef ENABLE_EDP { LLDPD_MODE_EDP, 0, "EDP", 'e', edp_send, edp_decode, NULL, EDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_FDP { LLDPD_MODE_FDP, 0, "FDP", 'f', fdp_send, cdp_decode, NULL, FDP_MULTICAST_ADDR }, #endif { 0, 0, "any", ' ', NULL, NULL, NULL, {0,0,0,0,0,0} } }; static char **saved_argv; #ifdef HAVE___PROGNAME extern const char *__progname; #else # define __progname "lldpd" #endif static void usage(void) { fprintf(stderr, "Usage: %s [OPTIONS ...]\n", __progname); fprintf(stderr, "Version: %s\n", PACKAGE_STRING); fprintf(stderr, "\n"); fprintf(stderr, "-d Do not daemonize.\n"); fprintf(stderr, "-r Receive-only mode\n"); fprintf(stderr, "-i Disable LLDP-MED inventory TLV transmission.\n"); fprintf(stderr, "-k Disable advertising of kernel release, version, machine.\n"); fprintf(stderr, "-S descr Override the default system description.\n"); fprintf(stderr, "-P name Override the default hardware platform.\n"); fprintf(stderr, "-m IP Specify the IPv4 management addresses of this system.\n"); fprintf(stderr, "-u file Specify the Unix-domain socket used for communication with lldpctl(8).\n"); fprintf(stderr, "-H mode Specify the behaviour when detecting multiple neighbors.\n"); fprintf(stderr, "-I iface Limit interfaces to use.\n"); #ifdef ENABLE_LLDPMED fprintf(stderr, "-M class Enable emission of LLDP-MED frame. 'class' should be one of:\n"); fprintf(stderr, " 1 Generic Endpoint (Class I)\n"); fprintf(stderr, " 2 Media Endpoint (Class II)\n"); fprintf(stderr, " 3 Communication Device Endpoints (Class III)\n"); fprintf(stderr, " 4 Network Connectivity Device\n"); #endif #ifdef USE_SNMP fprintf(stderr, "-x Enable SNMP subagent.\n"); #endif fprintf(stderr, "\n"); #if defined ENABLE_CDP || defined ENABLE_EDP || defined ENABLE_FDP || defined ENABLE_SONMP fprintf(stderr, "Additional protocol support.\n"); #ifdef ENABLE_CDP fprintf(stderr, "-c Enable the support of CDP protocol. (Cisco)\n"); #endif #ifdef ENABLE_EDP fprintf(stderr, "-e Enable the support of EDP protocol. (Extreme)\n"); #endif #ifdef ENABLE_FDP fprintf(stderr, "-f Enable the support of FDP protocol. (Foundry)\n"); #endif #ifdef ENABLE_SONMP fprintf(stderr, "-s Enable the support of SONMP protocol. (Nortel)\n"); #endif fprintf(stderr, "\n"); #endif fprintf(stderr, "see manual page lldpd(8) for more information\n"); exit(1); } struct lldpd_hardware * lldpd_get_hardware(struct lldpd *cfg, char *name, int index, struct lldpd_ops *ops) { struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if ((strcmp(hardware->h_ifname, name) == 0) && (hardware->h_ifindex == index) && ((!ops) || (ops == hardware->h_ops))) break; } return hardware; } /** * Allocate the default local port. This port will be cloned each time we need a * new local port. */ static void lldpd_alloc_default_local_port(struct lldpd *cfg) { struct lldpd_port *port; if ((port = (struct lldpd_port *) calloc(1, sizeof(struct lldpd_port))) == NULL) fatal("main", NULL); #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); TAILQ_INIT(&port->p_ppvids); TAILQ_INIT(&port->p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&port->p_custom_list); #endif cfg->g_default_local_port = port; } /** * Clone a given port. The destination needs to be already allocated. */ static int lldpd_clone_port(struct lldpd_port *destination, struct lldpd_port *source) { u_int8_t *output = NULL; ssize_t output_len; struct lldpd_port *cloned = NULL; output_len = lldpd_port_serialize(source, (void**)&output); if (output_len == -1 || lldpd_port_unserialize(output, output_len, &cloned) <= 0) { log_warnx("alloc", "unable to clone default port"); free(output); return -1; } memcpy(destination, cloned, sizeof(struct lldpd_port)); free(cloned); free(output); #ifdef ENABLE_DOT1 marshal_repair_tailq(lldpd_vlan, &destination->p_vlans, v_entries); marshal_repair_tailq(lldpd_ppvid, &destination->p_ppvids, p_entries); marshal_repair_tailq(lldpd_pi, &destination->p_pids, p_entries); #endif #ifdef ENABLE_CUSTOM marshal_repair_tailq(lldpd_custom, &destination->p_custom_list, next); #endif return 0; } struct lldpd_hardware * lldpd_alloc_hardware(struct lldpd *cfg, char *name, int index) { struct lldpd_hardware *hardware; log_debug("alloc", "allocate a new local port (%s)", name); if ((hardware = (struct lldpd_hardware *) calloc(1, sizeof(struct lldpd_hardware))) == NULL) return NULL; /* Clone default local port */ if (lldpd_clone_port(&hardware->h_lport, cfg->g_default_local_port) == -1) { log_warnx("alloc", "unable to clone default port"); free(hardware); return NULL; } hardware->h_cfg = cfg; strlcpy(hardware->h_ifname, name, sizeof(hardware->h_ifname)); hardware->h_ifindex = index; hardware->h_lport.p_chassis = LOCAL_CHASSIS(cfg); hardware->h_lport.p_chassis->c_refcount++; TAILQ_INIT(&hardware->h_rports); #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_med_cap_available) { hardware->h_lport.p_med_cap_enabled = LLDP_MED_CAP_CAP; if (!cfg->g_config.c_noinventory) hardware->h_lport.p_med_cap_enabled |= LLDP_MED_CAP_IV; } #endif levent_hardware_init(hardware); return hardware; } struct lldpd_mgmt * lldpd_alloc_mgmt(int family, void *addrptr, size_t addrsize, u_int32_t iface) { struct lldpd_mgmt *mgmt; log_debug("alloc", "allocate a new management address (family: %d)", family); if (family <= LLDPD_AF_UNSPEC || family >= LLDPD_AF_LAST) { errno = EAFNOSUPPORT; return NULL; } if (addrsize > LLDPD_MGMT_MAXADDRSIZE) { errno = EOVERFLOW; return NULL; } mgmt = calloc(1, sizeof(struct lldpd_mgmt)); if (mgmt == NULL) { errno = ENOMEM; return NULL; } mgmt->m_family = family; memcpy(&mgmt->m_addr, addrptr, addrsize); mgmt->m_addrsize = addrsize; mgmt->m_iface = iface; return mgmt; } void lldpd_hardware_cleanup(struct lldpd *cfg, struct lldpd_hardware *hardware) { log_debug("alloc", "cleanup hardware port %s", hardware->h_ifname); free(hardware->h_lport_previous); free(hardware->h_lchassis_previous_id); free(hardware->h_lport_previous_id); lldpd_port_cleanup(&hardware->h_lport, 1); if (hardware->h_ops && hardware->h_ops->cleanup) hardware->h_ops->cleanup(cfg, hardware); levent_hardware_release(hardware); free(hardware); } static void lldpd_display_neighbors(struct lldpd *cfg) { if (!cfg->g_config.c_set_ifdescr) return; struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { struct lldpd_port *port; char *description; const char *neighbor = NULL; unsigned neighbors = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (SMART_HIDDEN(port)) continue; neighbors++; neighbor = port->p_chassis->c_name; } if (neighbors == 0) priv_iface_description(hardware->h_ifname, ""); else if (neighbors == 1 && neighbor && *neighbor != '\0') { if (asprintf(&description, "%s", neighbor) != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } else { if (asprintf(&description, "%d neighbor%s", neighbors, (neighbors > 1)?"s":"") != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } } } static void lldpd_count_neighbors(struct lldpd *cfg) { #if HAVE_SETPROCTITLE struct lldpd_chassis *chassis; const char *neighbor; unsigned neighbors = 0; TAILQ_FOREACH(chassis, &cfg->g_chassis, c_entries) { neighbors++; neighbor = chassis->c_name; } neighbors--; if (neighbors == 0) setproctitle("no neighbor"); else if (neighbors == 1 && neighbor && *neighbor != '\0') setproctitle("connected to %s", neighbor); else setproctitle("%d neighbor%s", neighbors, (neighbors > 1)?"s":""); #endif lldpd_display_neighbors(cfg); } static void notify_clients_deletion(struct lldpd_hardware *hardware, struct lldpd_port *rport) { TRACE(LLDPD_NEIGHBOR_DELETE(hardware->h_ifname, rport->p_chassis->c_name, rport->p_descr)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_DELETED, rport); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_DELETED, rport); #endif } static void lldpd_reset_timer(struct lldpd *cfg) { /* Reset timer for ports that have been changed. */ struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { /* We keep a flat copy of the local port to see if there is any * change. To do this, we zero out fields that are not * significant, marshal the port, then restore. */ struct lldpd_port *port = &hardware->h_lport; /* Take the current flags into account to detect a change. */ port->_p_hardware_flags = hardware->h_flags; u_int8_t *output = NULL; ssize_t output_len; char save[LLDPD_PORT_START_MARKER]; memcpy(save, port, sizeof(save)); /* coverity[suspicious_sizeof] We intentionally partially memset port */ memset(port, 0, sizeof(save)); output_len = lldpd_port_serialize(port, (void**)&output); memcpy(port, save, sizeof(save)); if (output_len == -1) { log_warnx("localchassis", "unable to serialize local port %s to check for differences", hardware->h_ifname); continue; } /* Compare with the previous value */ if (hardware->h_lport_previous && output_len == hardware->h_lport_previous_len && !memcmp(output, hardware->h_lport_previous, output_len)) { log_debug("localchassis", "no change detected for port %s", hardware->h_ifname); } else { log_debug("localchassis", "change detected for port %s, resetting its timer", hardware->h_ifname); levent_schedule_pdu(hardware); } /* Update the value */ free(hardware->h_lport_previous); hardware->h_lport_previous = output; hardware->h_lport_previous_len = output_len; } } void lldpd_cleanup(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; struct lldpd_chassis *chassis, *chassis_next; log_debug("localchassis", "cleanup all ports"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); if (!hardware->h_flags) { TRACE(LLDPD_INTERFACES_DELETE(hardware->h_ifname)); TAILQ_REMOVE(&cfg->g_hardware, hardware, h_entries); lldpd_remote_cleanup(hardware, notify_clients_deletion, 1); lldpd_hardware_cleanup(cfg, hardware); } else lldpd_remote_cleanup(hardware, notify_clients_deletion, !(hardware->h_flags & IFF_RUNNING)); } log_debug("localchassis", "cleanup all chassis"); for (chassis = TAILQ_FIRST(&cfg->g_chassis); chassis; chassis = chassis_next) { chassis_next = TAILQ_NEXT(chassis, c_entries); if (chassis->c_refcount == 0) { TAILQ_REMOVE(&cfg->g_chassis, chassis, c_entries); lldpd_chassis_cleanup(chassis, 1); } } lldpd_count_neighbors(cfg); levent_schedule_cleanup(cfg); } /* Update chassis `ochassis' with values from `chassis'. The later one is not expected to be part of a list! It will also be wiped from memory. */ static void lldpd_move_chassis(struct lldpd_chassis *ochassis, struct lldpd_chassis *chassis) { struct lldpd_mgmt *mgmt, *mgmt_next; /* We want to keep refcount, index and list stuff from the current * chassis */ TAILQ_ENTRY(lldpd_chassis) entries; int refcount = ochassis->c_refcount; int index = ochassis->c_index; memcpy(&entries, &ochassis->c_entries, sizeof(entries)); lldpd_chassis_cleanup(ochassis, 0); /* Make the copy. */ /* WARNING: this is a kludgy hack, we need in-place copy and cannot use * marshaling. */ memcpy(ochassis, chassis, sizeof(struct lldpd_chassis)); TAILQ_INIT(&ochassis->c_mgmt); /* Copy of management addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); TAILQ_INSERT_TAIL(&ochassis->c_mgmt, mgmt, m_entries); } /* Restore saved values */ ochassis->c_refcount = refcount; ochassis->c_index = index; memcpy(&ochassis->c_entries, &entries, sizeof(entries)); /* Get rid of the new chassis */ free(chassis); } static int lldpd_guess_type(struct lldpd *cfg, char *frame, int s) { int i; if (s < ETHER_ADDR_LEN) return -1; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].guess == NULL) { if (memcmp(frame, cfg->g_protocols[i].mac, ETHER_ADDR_LEN) == 0) { log_debug("decode", "guessed protocol is %s (from MAC address)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } else { if (cfg->g_protocols[i].guess(frame, s)) { log_debug("decode", "guessed protocol is %s (from detector function)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } } return -1; } static void lldpd_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware) { int i; struct lldpd_chassis *chassis, *ochassis = NULL; struct lldpd_port *port, *oport = NULL, *aport; int guess = LLDPD_MODE_LLDP; log_debug("decode", "decode a received frame on %s", hardware->h_ifname); if (s < sizeof(struct ether_header) + 4) /* Too short, just discard it */ return; /* Decapsulate VLAN frames */ struct ether_header eheader; memcpy(&eheader, frame, sizeof(struct ether_header)); if (eheader.ether_type == htons(ETHERTYPE_VLAN)) { /* VLAN decapsulation means to shift 4 bytes left the frame from * offset 2*ETHER_ADDR_LEN */ memmove(frame + 2*ETHER_ADDR_LEN, frame + 2*ETHER_ADDR_LEN + 4, s - 2*ETHER_ADDR_LEN); s -= 4; } TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if ((oport->p_lastframe != NULL) && (oport->p_lastframe->size == s) && (memcmp(oport->p_lastframe->frame, frame, s) == 0)) { /* Already received the same frame */ log_debug("decode", "duplicate frame, no need to decode"); oport->p_lastupdate = time(NULL); return; } } guess = lldpd_guess_type(cfg, frame, s); for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].mode == guess) { log_debug("decode", "using decode function for %s protocol", cfg->g_protocols[i].name); if (cfg->g_protocols[i].decode(cfg, frame, s, hardware, &chassis, &port) == -1) { log_debug("decode", "function for %s protocol did not decode this frame", cfg->g_protocols[i].name); return; } chassis->c_protocol = port->p_protocol = cfg->g_protocols[i].mode; break; } } if (cfg->g_protocols[i].mode == 0) { log_debug("decode", "unable to guess frame type on %s", hardware->h_ifname); return; } TRACE(LLDPD_FRAME_DECODED( hardware->h_ifname, cfg->g_protocols[i].name, chassis->c_name, port->p_descr)); /* Do we already have the same MSAP somewhere? */ int count = 0; log_debug("decode", "search for the same MSAP"); TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (port->p_protocol == oport->p_protocol) { count++; if ((port->p_id_subtype == oport->p_id_subtype) && (port->p_id_len == oport->p_id_len) && (memcmp(port->p_id, oport->p_id, port->p_id_len) == 0) && (chassis->c_id_subtype == oport->p_chassis->c_id_subtype) && (chassis->c_id_len == oport->p_chassis->c_id_len) && (memcmp(chassis->c_id, oport->p_chassis->c_id, chassis->c_id_len) == 0)) { ochassis = oport->p_chassis; log_debug("decode", "MSAP is already known"); break; } } } /* Do we have room for a new MSAP? */ if (!oport && cfg->g_config.c_max_neighbors) { if (count == (cfg->g_config.c_max_neighbors - 1)) { log_debug("decode", "max neighbors %d reached for port %s, " "dropping any new ones silently", cfg->g_config.c_max_neighbors, hardware->h_ifname); } else if (count > cfg->g_config.c_max_neighbors - 1) { log_debug("decode", "too many neighbors for port %s, drop this new one", hardware->h_ifname); lldpd_port_cleanup(port, 1); lldpd_chassis_cleanup(chassis, 1); free(port); return; } } /* No, but do we already know the system? */ if (!oport) { log_debug("decode", "MSAP is unknown, search for the chassis"); TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) { if ((chassis->c_protocol == ochassis->c_protocol) && (chassis->c_id_subtype == ochassis->c_id_subtype) && (chassis->c_id_len == ochassis->c_id_len) && (memcmp(chassis->c_id, ochassis->c_id, chassis->c_id_len) == 0)) break; } } if (oport) { /* The port is known, remove it before adding it back */ TAILQ_REMOVE(&hardware->h_rports, oport, p_entries); lldpd_port_cleanup(oport, 1); free(oport); } if (ochassis) { lldpd_move_chassis(ochassis, chassis); chassis = ochassis; } else { /* Chassis not known, add it */ log_debug("decode", "unknown chassis, add it to the list"); chassis->c_index = ++cfg->g_lastrid; chassis->c_refcount = 0; TAILQ_INSERT_TAIL(&cfg->g_chassis, chassis, c_entries); i = 0; TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) i++; log_debug("decode", "%d different systems are known", i); } /* Add port */ port->p_lastchange = port->p_lastupdate = time(NULL); if ((port->p_lastframe = (struct lldpd_frame *)malloc(s + sizeof(struct lldpd_frame))) != NULL) { port->p_lastframe->size = s; memcpy(port->p_lastframe->frame, frame, s); } TAILQ_INSERT_TAIL(&hardware->h_rports, port, p_entries); port->p_chassis = chassis; port->p_chassis->c_refcount++; /* Several cases are possible : 1. chassis is new, its refcount was 0. It is now attached to this port, its refcount is 1. 2. chassis already exists and was attached to another port, we increase its refcount accordingly. 3. chassis already exists and was attached to the same port, its refcount was decreased with lldpd_port_cleanup() and is now increased again. In all cases, if the port already existed, it has been freed with lldpd_port_cleanup() and therefore, the refcount of the chassis that was attached to it is decreased. */ /* coverity[use_after_free] TAILQ_REMOVE does the right thing */ i = 0; TAILQ_FOREACH(aport, &hardware->h_rports, p_entries) i++; log_debug("decode", "%d neighbors for %s", i, hardware->h_ifname); if (!oport) hardware->h_insert_cnt++; /* Notify */ log_debug("decode", "send notifications for changes on %s", hardware->h_ifname); if (oport) { TRACE(LLDPD_NEIGHBOR_UPDATE(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_UPDATED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_UPDATED, port); #endif } else { TRACE(LLDPD_NEIGHBOR_NEW(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_ADDED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_ADDED, port); #endif } #ifdef ENABLE_LLDPMED if (!oport && port->p_chassis->c_med_type) { /* New neighbor, fast start */ if (hardware->h_cfg->g_config.c_enable_fast_start && !hardware->h_tx_fast) { log_debug("decode", "%s: entering fast start due to " "new neighbor", hardware->h_ifname); hardware->h_tx_fast = hardware->h_cfg->g_config.c_tx_fast_init; } levent_schedule_pdu(hardware); } #endif return; } /* Get the output of lsb_release -s -d. This is a slow function. It should be called once. It return NULL if any problem happens. Otherwise, this is a statically allocated buffer. The result includes the trailing \n */ static char * lldpd_get_lsb_release() { static char release[1024]; char *const command[] = { "lsb_release", "-s", "-d", NULL }; int pid, status, devnull, count; int pipefd[2]; log_debug("localchassis", "grab LSB release"); if (pipe(pipefd)) { log_warn("localchassis", "unable to get a pair of pipes"); return NULL; } pid = vfork(); switch (pid) { case -1: log_warn("localchassis", "unable to fork"); return NULL; case 0: /* Child, exec lsb_release */ close(pipefd[0]); if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDERR_FILENO); dup2(pipefd[1], STDOUT_FILENO); if (devnull > 2) close(devnull); if (pipefd[1] > 2) close(pipefd[1]); execvp("lsb_release", command); } _exit(127); break; default: /* Father, read the output from the children */ close(pipefd[1]); count = 0; do { status = read(pipefd[0], release+count, sizeof(release)-count); if ((status == -1) && (errno == EINTR)) continue; if (status > 0) count += status; } while (count < sizeof(release) && (status > 0)); if (status < 0) { log_info("localchassis", "unable to read from lsb_release"); close(pipefd[0]); waitpid(pid, &status, 0); return NULL; } close(pipefd[0]); if (count >= sizeof(release)) { log_info("localchassis", "output of lsb_release is too large"); waitpid(pid, &status, 0); return NULL; } status = -1; if (waitpid(pid, &status, 0) != pid) return NULL; if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) { log_info("localchassis", "lsb_release information not available"); return NULL; } if (!count) { log_info("localchassis", "lsb_release returned an empty string"); return NULL; } release[count] = '\0'; return release; } /* Should not be here */ return NULL; } /* Same like lldpd_get_lsb_release but reads /etc/os-release for PRETTY_NAME=. */ static char * lldpd_get_os_release() { static char release[1024]; char line[1024]; char *key, *val; char *ptr1 = release; log_debug("localchassis", "grab OS release"); FILE *fp = fopen("/etc/os-release", "r"); if (!fp) { log_debug("localchassis", "could not open /etc/os-release"); fp = fopen("/usr/lib/os-release", "r"); } if (!fp) { log_info("localchassis", "could not open either /etc/os-release or /usr/lib/os-release"); return NULL; } while ((fgets(line, sizeof(line), fp) != NULL)) { key = strtok(line, "="); val = strtok(NULL, "="); if (strncmp(key, "PRETTY_NAME", sizeof(line)) == 0) { strlcpy(release, val, sizeof(line)); break; } } fclose(fp); /* Remove trailing newline and all " in the string. */ ptr1 = release + strlen(release) - 1; while (ptr1 != release && ((*ptr1 == '"') || (*ptr1 == '\n'))) { *ptr1 = '\0'; ptr1--; } if (release[0] == '"') return release+1; return release; } static void lldpd_hide_ports(struct lldpd *cfg, struct lldpd_hardware *hardware, int mask) { struct lldpd_port *port; int protocols[LLDPD_MODE_MAX+1]; char buffer[256]; int i, j, k, found; unsigned int min; log_debug("smartfilter", "apply smart filter for port %s", hardware->h_ifname); /* Compute the number of occurrences of each protocol */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) protocols[port->p_protocol]++; /* Turn the protocols[] array into an array of enabled/disabled protocols. 1 means enabled, 0 means disabled. */ min = (unsigned int)-1; for (i = 0; i <= LLDPD_MODE_MAX; i++) if (protocols[i] && (protocols[i] < min)) min = protocols[i]; found = 0; for (i = 0; i <= LLDPD_MODE_MAX; i++) if ((protocols[i] == min) && !found) { /* If we need a tie breaker, we take the first protocol only */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_PROTO | SMART_INCOMING_ONE_PROTO)) found = 1; protocols[i] = 1; } else protocols[i] = 0; /* We set the p_hidden flag to 1 if the protocol is disabled */ TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) port->p_hidden_out = protocols[port->p_protocol]?0:1; else port->p_hidden_in = protocols[port->p_protocol]?0:1; } /* If we want only one neighbor, we take the first one */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_NEIGH | SMART_INCOMING_ONE_NEIGH)) { found = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) { if (found) port->p_hidden_out = 1; if (!port->p_hidden_out) found = 1; } if (mask == SMART_INCOMING) { if (found) port->p_hidden_in = 1; if (!port->p_hidden_in) found = 1; } } } /* Print a debug message summarizing the operation */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; k = j = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (!(((mask == SMART_OUTGOING) && port->p_hidden_out) || ((mask == SMART_INCOMING) && port->p_hidden_in))) { k++; protocols[port->p_protocol] = 1; } j++; } buffer[0] = '\0'; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (cfg->g_protocols[i].enabled && protocols[cfg->g_protocols[i].mode]) { if (strlen(buffer) + strlen(cfg->g_protocols[i].name) + 3 > sizeof(buffer)) { /* Unlikely, our buffer is too small */ memcpy(buffer + sizeof(buffer) - 4, "...", 4); break; } if (buffer[0]) strncat(buffer, ", ", 2); strncat(buffer, cfg->g_protocols[i].name, strlen(cfg->g_protocols[i].name)); } } log_debug("smartfilter", "%s: %s: %d visible neighbors (out of %d)", hardware->h_ifname, (mask == SMART_OUTGOING)?"out filter":"in filter", k, j); log_debug("smartfilter", "%s: protocols: %s", hardware->h_ifname, buffer[0]?buffer:"(none)"); } /* Hide unwanted ports depending on smart mode set by the user */ static void lldpd_hide_all(struct lldpd *cfg) { struct lldpd_hardware *hardware; if (!cfg->g_config.c_smart) return; log_debug("smartfilter", "apply smart filter results on all ports"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if (cfg->g_config.c_smart & SMART_INCOMING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_INCOMING); if (cfg->g_config.c_smart & SMART_OUTGOING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_OUTGOING); } } void lldpd_recv(struct lldpd *cfg, struct lldpd_hardware *hardware, int fd) { char *buffer = NULL; int n; log_debug("receive", "receive a frame on %s", hardware->h_ifname); if ((buffer = (char *)malloc(hardware->h_mtu)) == NULL) { log_warn("receive", "failed to alloc reception buffer"); return; } if ((n = hardware->h_ops->recv(cfg, hardware, fd, buffer, hardware->h_mtu)) == -1) { log_debug("receive", "discard frame received on %s", hardware->h_ifname); free(buffer); return; } if (hardware->h_lport.p_disable_rx) { log_debug("receive", "RX disabled, ignore the frame on %s", hardware->h_ifname); free(buffer); return; } if (cfg->g_config.c_paused) { log_debug("receive", "paused, ignore the frame on %s", hardware->h_ifname); free(buffer); return; } hardware->h_rx_cnt++; log_debug("receive", "decode received frame on %s", hardware->h_ifname); TRACE(LLDPD_FRAME_RECEIVED(hardware->h_ifname, buffer, (size_t)n)); lldpd_decode(cfg, buffer, n, hardware); lldpd_hide_all(cfg); /* Immediatly hide */ lldpd_count_neighbors(cfg); free(buffer); } static void lldpd_send_shutdown(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if (hardware->h_lport.p_disable_tx) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; /* It's safe to call `lldp_send_shutdown()` because shutdown LLDPU will * only be emitted if LLDP was sent on that port. */ if (lldp_send_shutdown(hardware->h_cfg, hardware) != 0) log_warnx("send", "unable to send shutdown LLDPDU on %s", hardware->h_ifname); } void lldpd_send(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; struct lldpd_port *port; int i, sent; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if (hardware->h_lport.p_disable_tx) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; log_debug("send", "send PDU on %s", hardware->h_ifname); sent = 0; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; /* We send only if we have at least one remote system * speaking this protocol or if the protocol is forced */ if (cfg->g_protocols[i].enabled > 1) { cfg->g_protocols[i].send(cfg, hardware); sent++; continue; } TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { /* If this remote port is disabled, we don't * consider it */ if (port->p_hidden_out) continue; if (port->p_protocol == cfg->g_protocols[i].mode) { TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "send PDU on %s with protocol %s", hardware->h_ifname, cfg->g_protocols[i].name); cfg->g_protocols[i].send(cfg, hardware); sent++; break; } } } if (!sent) { /* Nothing was sent for this port, let's speak the first * available protocol. */ for (i = 0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "fallback to protocol %s for %s", cfg->g_protocols[i].name, hardware->h_ifname); cfg->g_protocols[i].send(cfg, hardware); break; } if (cfg->g_protocols[i].mode == 0) log_warnx("send", "no protocol enabled, dunno what to send"); } } #ifdef ENABLE_LLDPMED static void lldpd_med(struct lldpd_chassis *chassis) { static short int once = 0; if (!once) { chassis->c_med_hw = dmi_hw(); chassis->c_med_fw = dmi_fw(); chassis->c_med_sn = dmi_sn(); chassis->c_med_manuf = dmi_manuf(); chassis->c_med_model = dmi_model(); chassis->c_med_asset = dmi_asset(); once = 1; } } #endif static int lldpd_routing_enabled(struct lldpd *cfg) { int routing; if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_ROUTER) == 0) return 0; if ((routing = interfaces_routing_enabled(cfg)) == -1) { log_debug("localchassis", "unable to check if routing is enabled"); return 0; } return routing; } static void lldpd_update_localchassis(struct lldpd *cfg) { struct utsname un; char *hp; log_debug("localchassis", "update information for local chassis"); assert(LOCAL_CHASSIS(cfg) != NULL); /* Set system name and description */ if (uname(&un) < 0) fatal("localchassis", "failed to get system information"); if (cfg->g_config.c_hostname) { log_debug("localchassis", "use overridden system name `%s`", cfg->g_config.c_hostname); hp = cfg->g_config.c_hostname; } else { if ((hp = priv_gethostname()) == NULL) fatal("localchassis", "failed to get system name"); } free(LOCAL_CHASSIS(cfg)->c_name); free(LOCAL_CHASSIS(cfg)->c_descr); if ((LOCAL_CHASSIS(cfg)->c_name = strdup(hp)) == NULL) fatal("localchassis", NULL); if (cfg->g_config.c_description) { log_debug("localchassis", "use overridden description `%s`", cfg->g_config.c_description); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_config.c_description) == -1) fatal("localchassis", "failed to set full system description"); } else { if (cfg->g_config.c_advertise_version) { log_debug("localchassis", "advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s %s %s %s %s", cfg->g_lsb_release?cfg->g_lsb_release:"", un.sysname, un.release, un.version, un.machine) == -1) fatal("localchassis", "failed to set full system description"); } else { log_debug("localchassis", "do not advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_lsb_release?cfg->g_lsb_release:un.sysname) == -1) fatal("localchassis", "failed to set minimal system description"); } } if (cfg->g_config.c_platform == NULL) cfg->g_config.c_platform = strdup(un.sysname); /* Check routing */ if (lldpd_routing_enabled(cfg)) { log_debug("localchassis", "routing is enabled, enable router capability"); LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_ROUTER; } else LOCAL_CHASSIS(cfg)->c_cap_enabled &= ~LLDP_CAP_ROUTER; #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_TELEPHONE) LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_TELEPHONE; lldpd_med(LOCAL_CHASSIS(cfg)); free(LOCAL_CHASSIS(cfg)->c_med_sw); if (cfg->g_config.c_advertise_version) LOCAL_CHASSIS(cfg)->c_med_sw = strdup(un.release); else LOCAL_CHASSIS(cfg)->c_med_sw = strdup("Unknown"); #endif if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_STATION) && (LOCAL_CHASSIS(cfg)->c_cap_enabled == 0)) LOCAL_CHASSIS(cfg)->c_cap_enabled = LLDP_CAP_STATION; /* Set chassis ID if needed. This is only done if chassis ID has not been set previously (with the MAC address of an interface for example) */ if (LOCAL_CHASSIS(cfg)->c_id == NULL) { log_debug("localchassis", "no chassis ID is currently set, use chassis name"); if (!(LOCAL_CHASSIS(cfg)->c_id = strdup(LOCAL_CHASSIS(cfg)->c_name))) fatal("localchassis", NULL); LOCAL_CHASSIS(cfg)->c_id_len = strlen(LOCAL_CHASSIS(cfg)->c_name); LOCAL_CHASSIS(cfg)->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; } } void lldpd_update_localports(struct lldpd *cfg) { struct lldpd_hardware *hardware; log_debug("localchassis", "update information for local ports"); /* h_flags is set to 0 for each port. If the port is updated, h_flags * will be set to a non-zero value. This will allow us to clean up any * non up-to-date port */ TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) hardware->h_flags = 0; TRACE(LLDPD_INTERFACES_UPDATE()); interfaces_update(cfg); lldpd_cleanup(cfg); lldpd_reset_timer(cfg); } void lldpd_loop(struct lldpd *cfg) { /* Main loop. 1. Update local ports information 2. Update local chassis information */ log_debug("loop", "start new loop"); LOCAL_CHASSIS(cfg)->c_cap_enabled = 0; /* Information for local ports is triggered even when it is possible to * update them on some other event because we want to refresh them if we * missed something. */ log_debug("loop", "update information for local ports"); lldpd_update_localports(cfg); log_debug("loop", "update information for local chassis"); lldpd_update_localchassis(cfg); lldpd_count_neighbors(cfg); } static void lldpd_exit(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; log_debug("main", "exit lldpd"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) lldpd_send_shutdown(hardware); close(cfg->g_ctl); priv_ctl_cleanup(cfg->g_ctlname); log_debug("main", "cleanup hardware information"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); log_debug("main", "cleanup interface %s", hardware->h_ifname); lldpd_remote_cleanup(hardware, NULL, 1); lldpd_hardware_cleanup(cfg, hardware); } interfaces_cleanup(cfg); free(cfg->g_config.c_platform); } /** * Run lldpcli to configure lldpd. * * @return PID of running lldpcli or -1 if error. */ static pid_t lldpd_configure(int debug, const char *path, const char *ctlname) { pid_t lldpcli = vfork(); int devnull; char sdebug[debug + 3]; memset(sdebug, 'd', debug + 3); sdebug[debug + 2] = '\0'; sdebug[0] = '-'; sdebug[1] = 's'; log_debug("main", "invoke %s %s", path, sdebug); switch (lldpcli) { case -1: log_warn("main", "unable to fork"); return -1; case 0: /* Child, exec lldpcli */ if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDOUT_FILENO); if (devnull > 2) close(devnull); execl(path, "lldpcli", sdebug, "-u", ctlname, "-c", SYSCONFDIR "/lldpd.conf", "-c", SYSCONFDIR "/lldpd.d", "resume", (char *)NULL); log_warn("main", "unable to execute %s", path); log_warnx("main", "configuration is incomplete, lldpd needs to be unpaused"); } _exit(127); break; default: /* Father, don't do anything stupid */ return lldpcli; } /* Should not be here */ return -1; } struct intint { int a; int b; }; static const struct intint filters[] = { { 0, 0 }, { 1, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 2, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO }, { 3, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 4, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER }, { 5, SMART_INCOMING_FILTER }, { 6, SMART_OUTGOING_FILTER }, { 7, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 8, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH }, { 9, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 10, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 11, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH }, { 12, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 13, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 14, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 15, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER }, { 16, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 17, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 18, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 19, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { -1, 0 } }; #ifndef HOST_OS_OSX /** * Tell if we have been started by upstart. */ static int lldpd_started_by_upstart() { #ifdef HOST_OS_LINUX const char *upstartjob = getenv("UPSTART_JOB"); if (!(upstartjob && !strcmp(upstartjob, "lldpd"))) return 0; log_debug("main", "running with upstart, don't fork but stop"); raise(SIGSTOP); unsetenv("UPSTART_JOB"); return 1; #else return 0; #endif } /** * Tell if we have been started by systemd. */ static int lldpd_started_by_systemd() { #ifdef HOST_OS_LINUX int fd = -1; const char *notifysocket = getenv("NOTIFY_SOCKET"); if (!notifysocket || !strchr("@/", notifysocket[0]) || strlen(notifysocket) < 2) return 0; log_debug("main", "running with systemd, don't fork but signal ready"); if ((fd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) { log_warn("main", "unable to open systemd notification socket %s", notifysocket); return 0; } struct sockaddr_un su = { .sun_family = AF_UNIX }; strlcpy(su.sun_path, notifysocket, sizeof(su.sun_path)); if (notifysocket[0] == '@') su.sun_path[0] = 0; struct iovec iov = { .iov_base = "READY=1", .iov_len = strlen("READY=1") }; struct msghdr hdr = { .msg_name = &su, .msg_namelen = offsetof(struct sockaddr_un, sun_path) + strlen(notifysocket), .msg_iov = &iov, .msg_iovlen = 1 }; unsetenv("NOTIFY_SOCKET"); if (sendmsg(fd, &hdr, MSG_NOSIGNAL) < 0) { log_warn("main", "unable to send notification to systemd"); close(fd); return 0; } close(fd); return 1; #else return 0; #endif } #endif #ifdef HOST_OS_LINUX static void version_convert(const char *sversion, unsigned iversion[], size_t n) { const char *p = sversion; char *end; for (size_t i = 0; i < n; i++) { iversion[i] = strtol(p, &end, 10); if (*end != '.') break; p = end + 1; } } static void version_check(void) { struct utsname uts; if (uname(&uts) == -1) return; unsigned version_min[3] = {}; unsigned version_cur[3] = {}; version_convert(uts.release, version_cur, 3); version_convert(MIN_LINUX_KERNEL_VERSION, version_min, 3); if (version_min[0] > version_cur[0] || (version_min[0] == version_cur[0] && version_min[1] > version_cur[1]) || (version_min[1] == version_cur[1] && version_min[2] > version_cur[2])) { log_warnx("lldpd", "minimal kernel version required is %s, got %s", MIN_LINUX_KERNEL_VERSION, uts.release); log_warnx("lldpd", "lldpd may be unable to detect bonds and bridges correctly"); #ifndef ENABLE_OLDIES log_warnx("lldpd", "consider recompiling with --enable-oldies option"); #endif } } #else static void version_check(void) {} #endif int lldpd_main(int argc, char *argv[], char *envp[]) { struct lldpd *cfg; struct lldpd_chassis *lchassis; int ch, debug = 0; #ifdef USE_SNMP int snmp = 0; const char *agentx = NULL; /* AgentX socket */ #endif const char *ctlname = NULL; char *mgmtp = NULL; char *cidp = NULL; char *interfaces = NULL; /* We do not want more options here. Please add them in lldpcli instead * unless there is a very good reason. Most command-line options will * get deprecated at some point. */ char *popt, opts[] = "H:vhkrdD:xX:m:u:4:6:I:C:p:M:P:S:iL:@ "; int i, found, advertise_version = 1; #ifdef ENABLE_LLDPMED int lldpmed = 0, noinventory = 0; int enable_fast_start = 1; #endif char *descr_override = NULL; char *platform_override = NULL; char *lsb_release = NULL; const char *lldpcli = LLDPCLI_PATH; int smart = 15; int receiveonly = 0; int ctl; #ifdef ENABLE_PRIVSEP /* Non privileged user */ struct passwd *user; struct group *group; uid_t uid; gid_t gid; #endif saved_argv = argv; #if HAVE_SETPROCTITLE_INIT setproctitle_init(argc, argv, envp); #endif /* * Get and parse command line options */ if ((popt = strchr(opts, '@')) != NULL) { for (i=0; protos[i].mode != 0 && *popt != '\0'; i++) *(popt++) = protos[i].arg; *popt = '\0'; } while ((ch = getopt(argc, argv, opts)) != -1) { switch (ch) { case 'h': usage(); break; case 'v': fprintf(stdout, "%s\n", PACKAGE_VERSION); exit(0); break; case 'd': debug++; break; case 'D': log_accept(optarg); break; case 'r': receiveonly = 1; break; case 'm': if (mgmtp) { fprintf(stderr, "-m can only be used once\n"); usage(); } mgmtp = strdup(optarg); break; case 'u': if (ctlname) { fprintf(stderr, "-u can only be used once\n"); usage(); } ctlname = optarg; break; case 'I': if (interfaces) { fprintf(stderr, "-I can only be used once\n"); usage(); } interfaces = strdup(optarg); break; case 'C': if (cidp) { fprintf(stderr, "-C can only be used once\n"); usage(); } cidp = strdup(optarg); break; case 'L': if (strlen(optarg)) lldpcli = optarg; else lldpcli = NULL; break; case 'k': advertise_version = 0; break; #ifdef ENABLE_LLDPMED case 'M': lldpmed = atoi(optarg); if ((lldpmed < 1) || (lldpmed > 4)) { fprintf(stderr, "-M requires an argument between 1 and 4\n"); usage(); } break; case 'i': noinventory = 1; break; #else case 'M': case 'i': fprintf(stderr, "LLDP-MED support is not built-in\n"); usage(); break; #endif #ifdef USE_SNMP case 'x': snmp = 1; break; case 'X': if (agentx) { fprintf(stderr, "-X can only be used once\n"); usage(); } snmp = 1; agentx = optarg; break; #else case 'x': case 'X': fprintf(stderr, "SNMP support is not built-in\n"); usage(); #endif break; case 'S': if (descr_override) { fprintf(stderr, "-S can only be used once\n"); usage(); } descr_override = strdup(optarg); break; case 'P': if (platform_override) { fprintf(stderr, "-P can only be used once\n"); usage(); } platform_override = strdup(optarg); break; case 'H': smart = atoi(optarg); break; default: found = 0; for (i=0; protos[i].mode != 0; i++) { if (ch == protos[i].arg) { found = 1; protos[i].enabled++; } } if (!found) usage(); } } if (ctlname == NULL) ctlname = LLDPD_CTL_SOCKET; /* Set correct smart mode */ for (i=0; (filters[i].a != -1) && (filters[i].a != smart); i++); if (filters[i].a == -1) { fprintf(stderr, "Incorrect mode for -H\n"); usage(); } smart = filters[i].b; log_init(debug, __progname); tzset(); /* Get timezone info before chroot */ log_debug("main", "lldpd " PACKAGE_VERSION " starting..."); version_check(); /* Grab uid and gid to use for priv sep */ #ifdef ENABLE_PRIVSEP if ((user = getpwnam(PRIVSEP_USER)) == NULL) fatal("main", "no " PRIVSEP_USER " user for privilege separation"); uid = user->pw_uid; if ((group = getgrnam(PRIVSEP_GROUP)) == NULL) fatal("main", "no " PRIVSEP_GROUP " group for privilege separation"); gid = group->gr_gid; #endif /* Create and setup socket */ int retry = 1; log_debug("main", "creating control socket"); while ((ctl = ctl_create(ctlname)) == -1) { if (retry-- && errno == EADDRINUSE) { /* Check if a daemon is really listening */ int tfd; log_info("main", "unable to create control socket because it already exists"); log_info("main", "check if another instance is running"); if ((tfd = ctl_connect(ctlname)) != -1) { /* Another instance is running */ close(tfd); log_warnx("main", "another instance is running, please stop it"); fatalx("main", "giving up"); } else if (errno == ECONNREFUSED) { /* Nobody is listening */ log_info("main", "old control socket is present, clean it"); ctl_cleanup(ctlname); continue; } log_warn("main", "cannot determine if another daemon is already running"); fatalx("main", "giving up"); } log_warn("main", "unable to create control socket"); fatalx("main", "giving up"); } #ifdef ENABLE_PRIVSEP if (chown(ctlname, uid, gid) == -1) log_warn("main", "unable to chown control socket"); if (chmod(ctlname, S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IWGRP | S_IXGRP) == -1) log_warn("main", "unable to chmod control socket"); #endif /* Disable SIGPIPE */ signal(SIGPIPE, SIG_IGN); /* Disable SIGHUP, until handlers are installed */ signal(SIGHUP, SIG_IGN); /* Configuration with lldpcli */ if (lldpcli) { log_debug("main", "invoking lldpcli for configuration"); if (lldpd_configure(debug, lldpcli, ctlname) == -1) fatal("main", "unable to spawn lldpcli"); } /* Daemonization, unless started by upstart, systemd or launchd or debug */ #ifndef HOST_OS_OSX if (!lldpd_started_by_upstart() && !lldpd_started_by_systemd() && !debug) { int pid; char *spid; log_debug("main", "daemonize"); if (daemon(0, 0) != 0) fatal("main", "failed to detach daemon"); if ((pid = open(LLDPD_PID_FILE, O_TRUNC | O_CREAT | O_WRONLY, 0666)) == -1) fatal("main", "unable to open pid file " LLDPD_PID_FILE); if (asprintf(&spid, "%d\n", getpid()) == -1) fatal("main", "unable to create pid file " LLDPD_PID_FILE); if (write(pid, spid, strlen(spid)) == -1) fatal("main", "unable to write pid file " LLDPD_PID_FILE); free(spid); close(pid); } #endif /* Try to read system information from /etc/os-release if possible. Fall back to lsb_release for compatibility. */ log_debug("main", "get OS/LSB release information"); lsb_release = lldpd_get_os_release(); if (!lsb_release) { lsb_release = lldpd_get_lsb_release(); } log_debug("main", "initialize privilege separation"); #ifdef ENABLE_PRIVSEP priv_init(PRIVSEP_CHROOT, ctl, uid, gid); #else priv_init(PRIVSEP_CHROOT, ctl, 0, 0); #endif /* Initialization of global configuration */ if ((cfg = (struct lldpd *) calloc(1, sizeof(struct lldpd))) == NULL) fatal("main", NULL); lldpd_alloc_default_local_port(cfg); cfg->g_ctlname = ctlname; cfg->g_ctl = ctl; cfg->g_config.c_mgmt_pattern = mgmtp; cfg->g_config.c_cid_pattern = cidp; cfg->g_config.c_iface_pattern = interfaces; cfg->g_config.c_smart = smart; if (lldpcli) cfg->g_config.c_paused = 1; cfg->g_config.c_receiveonly = receiveonly; cfg->g_config.c_tx_interval = LLDPD_TX_INTERVAL; cfg->g_config.c_tx_hold = LLDPD_TX_HOLD; cfg->g_config.c_max_neighbors = LLDPD_MAX_NEIGHBORS; #ifdef ENABLE_LLDPMED cfg->g_config.c_enable_fast_start = enable_fast_start; cfg->g_config.c_tx_fast_init = LLDPD_FAST_INIT; cfg->g_config.c_tx_fast_interval = LLDPD_FAST_TX_INTERVAL; #endif #ifdef USE_SNMP cfg->g_snmp = snmp; cfg->g_snmp_agentx = agentx; #endif /* USE_SNMP */ cfg->g_config.c_bond_slave_src_mac_type = \ LLDP_BOND_SLAVE_SRC_MAC_TYPE_LOCALLY_ADMINISTERED; /* Get ioctl socket */ log_debug("main", "get an ioctl socket"); if ((cfg->g_sock = socket(AF_INET, SOCK_DGRAM, 0)) == -1) fatal("main", "failed to get ioctl socket"); /* Description */ if (!(cfg->g_config.c_advertise_version = advertise_version) && lsb_release && lsb_release[strlen(lsb_release) - 1] == '\n') lsb_release[strlen(lsb_release) - 1] = '\0'; cfg->g_lsb_release = lsb_release; if (descr_override) cfg->g_config.c_description = descr_override; if (platform_override) cfg->g_config.c_platform = platform_override; /* Set system capabilities */ log_debug("main", "set system capabilities"); if ((lchassis = (struct lldpd_chassis*) calloc(1, sizeof(struct lldpd_chassis))) == NULL) fatal("localchassis", NULL); cfg->g_config.c_cap_advertise = 1; lchassis->c_cap_available = LLDP_CAP_BRIDGE | LLDP_CAP_WLAN | LLDP_CAP_ROUTER | LLDP_CAP_STATION; cfg->g_config.c_mgmt_advertise = 1; TAILQ_INIT(&lchassis->c_mgmt); #ifdef ENABLE_LLDPMED if (lldpmed > 0) { if (lldpmed == LLDP_MED_CLASS_III) lchassis->c_cap_available |= LLDP_CAP_TELEPHONE; lchassis->c_med_type = lldpmed; lchassis->c_med_cap_available = LLDP_MED_CAP_CAP | LLDP_MED_CAP_IV | LLDP_MED_CAP_LOCATION | LLDP_MED_CAP_POLICY | LLDP_MED_CAP_MDI_PSE | LLDP_MED_CAP_MDI_PD; cfg->g_config.c_noinventory = noinventory; } else cfg->g_config.c_noinventory = 1; #endif /* Set TTL */ lchassis->c_ttl = cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold; log_debug("main", "initialize protocols"); cfg->g_protocols = protos; for (i=0; protos[i].mode != 0; i++) { /* With -ll, disable LLDP */ if (protos[i].mode == LLDPD_MODE_LLDP) protos[i].enabled %= 3; /* With -ccc force CDPV2, enable CDPV1 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled == 3) { protos[i].enabled = 1; } /* With -cc force CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 2) { protos[i].enabled = 1; } /* With -cccc disable CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled >= 4) { protos[i].enabled = 0; } /* With -cccc disable CDPV1, enable CDPV2; -ccccc will force CDPv2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 4) { protos[i].enabled = 1; } if (protos[i].enabled > 1) log_info("main", "protocol %s enabled and forced", protos[i].name); else if (protos[i].enabled) log_info("main", "protocol %s enabled", protos[i].name); else log_info("main", "protocol %s disabled", protos[i].name); } TAILQ_INIT(&cfg->g_hardware); TAILQ_INIT(&cfg->g_chassis); TAILQ_INSERT_TAIL(&cfg->g_chassis, lchassis, c_entries); lchassis->c_refcount++; /* We should always keep a reference to local chassis */ /* Main loop */ log_debug("main", "start main loop"); levent_loop(cfg); lldpd_exit(cfg); free(cfg); return (0); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_1771_0
crossvul-cpp_data_good_2742_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % M M AAA TTTTT L AAA BBBB % % MM MM A A T L A A B B % % M M M AAAAA T L AAAAA BBBB % % M M A A T L A A B B % % M M A A T LLLLL A A BBBB % % % % % % Read MATLAB Image Format % % % % Software Design % % Jaroslav Fojtik % % 2001-2008 % % % % % % Permission is hereby granted, free of charge, to any person obtaining a % % copy of this software and associated documentation files ("ImageMagick"), % % to deal in ImageMagick without restriction, including without limitation % % the rights to use, copy, modify, merge, publish, distribute, sublicense, % % and/or sell copies of ImageMagick, and to permit persons to whom the % % ImageMagick is furnished to do so, subject to the following conditions: % % % % The above copyright notice and this permission notice shall be included in % % all copies or substantial portions of ImageMagick. % % % % The software is provided "as is", without warranty of any kind, express or % % implied, including but not limited to the warranties of merchantability, % % fitness for a particular purpose and noninfringement. In no event shall % % ImageMagick Studio be liable for any claim, damages or other liability, % % whether in an action of contract, tort or otherwise, arising from, out of % % or in connection with ImageMagick or the use or other dealings in % % ImageMagick. % % % % Except as contained in this notice, the name of the ImageMagick Studio % % shall not be used in advertising or otherwise to promote the sale, use or % % other dealings in ImageMagick without prior written authorization from the % % ImageMagick Studio. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace-private.h" #include "magick/distort.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" #include "magick/transform.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Forward declaration. */ static MagickBooleanType WriteMATImage(const ImageInfo *,Image *); /* Auto coloring method, sorry this creates some artefact inside data MinReal+j*MaxComplex = red MaxReal+j*MaxComplex = black MinReal+j*0 = white MaxReal+j*0 = black MinReal+j*MinComplex = blue MaxReal+j*MinComplex = black */ typedef struct { char identific[124]; unsigned short Version; char EndianIndicator[2]; unsigned long DataType; unsigned int ObjectSize; unsigned long unknown1; unsigned long unknown2; unsigned short unknown5; unsigned char StructureFlag; unsigned char StructureClass; unsigned long unknown3; unsigned long unknown4; unsigned long DimFlag; unsigned long SizeX; unsigned long SizeY; unsigned short Flag1; unsigned short NameFlag; } MATHeader; static const char *MonthsTab[12]={"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"}; static const char *DayOfWTab[7]={"Sun","Mon","Tue","Wed","Thu","Fri","Sat"}; static const char *OsDesc= #if defined(MAGICKCORE_WINDOWS_SUPPORT) "PCWIN"; #else #ifdef __APPLE__ "MAC"; #else "LNX86"; #endif #endif typedef enum { miINT8 = 1, /* 8 bit signed */ miUINT8, /* 8 bit unsigned */ miINT16, /* 16 bit signed */ miUINT16, /* 16 bit unsigned */ miINT32, /* 32 bit signed */ miUINT32, /* 32 bit unsigned */ miSINGLE, /* IEEE 754 single precision float */ miRESERVE1, miDOUBLE, /* IEEE 754 double precision float */ miRESERVE2, miRESERVE3, miINT64, /* 64 bit signed */ miUINT64, /* 64 bit unsigned */ miMATRIX, /* MATLAB array */ miCOMPRESSED, /* Compressed Data */ miUTF8, /* Unicode UTF-8 Encoded Character Data */ miUTF16, /* Unicode UTF-16 Encoded Character Data */ miUTF32 /* Unicode UTF-32 Encoded Character Data */ } mat5_data_type; typedef enum { mxCELL_CLASS=1, /* cell array */ mxSTRUCT_CLASS, /* structure */ mxOBJECT_CLASS, /* object */ mxCHAR_CLASS, /* character array */ mxSPARSE_CLASS, /* sparse array */ mxDOUBLE_CLASS, /* double precision array */ mxSINGLE_CLASS, /* single precision floating point */ mxINT8_CLASS, /* 8 bit signed integer */ mxUINT8_CLASS, /* 8 bit unsigned integer */ mxINT16_CLASS, /* 16 bit signed integer */ mxUINT16_CLASS, /* 16 bit unsigned integer */ mxINT32_CLASS, /* 32 bit signed integer */ mxUINT32_CLASS, /* 32 bit unsigned integer */ mxINT64_CLASS, /* 64 bit signed integer */ mxUINT64_CLASS, /* 64 bit unsigned integer */ mxFUNCTION_CLASS /* Function handle */ } arrayclasstype; #define FLAG_COMPLEX 0x8 #define FLAG_GLOBAL 0x4 #define FLAG_LOGICAL 0x2 static const QuantumType z2qtype[4] = {GrayQuantum, BlueQuantum, GreenQuantum, RedQuantum}; static void InsertComplexDoubleRow(double *p, int y, Image * image, double MinVal, double MaxVal) { ExceptionInfo *exception; double f; int x; register PixelPacket *q; if (MinVal == 0) MinVal = -1; if (MaxVal == 0) MaxVal = 1; exception=(&image->exception); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) return; for (x = 0; x < (ssize_t) image->columns; x++) { if (*p > 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelRed(q)); if (f + GetPixelRed(q) > QuantumRange) SetPixelRed(q,QuantumRange); else SetPixelRed(q,GetPixelRed(q)+(int) f); if ((int) f / 2.0 > GetPixelGreen(q)) { SetPixelGreen(q,0); SetPixelBlue(q,0); } else { SetPixelBlue(q,GetPixelBlue(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelBlue(q)); } } if (*p < 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelBlue(q)); if (f + GetPixelBlue(q) > QuantumRange) SetPixelBlue(q,QuantumRange); else SetPixelBlue(q,GetPixelBlue(q)+(int) f); if ((int) f / 2.0 > q->green) { SetPixelRed(q,0); SetPixelGreen(q,0); } else { SetPixelRed(q,GetPixelRed(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelRed(q)); } } p++; q++; } if (!SyncAuthenticPixels(image,exception)) return; return; } static void InsertComplexFloatRow(float *p, int y, Image * image, double MinVal, double MaxVal) { ExceptionInfo *exception; double f; int x; register PixelPacket *q; if (MinVal == 0) MinVal = -1; if (MaxVal == 0) MaxVal = 1; exception=(&image->exception); q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (PixelPacket *) NULL) return; for (x = 0; x < (ssize_t) image->columns; x++) { if (*p > 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelRed(q)); if (f + GetPixelRed(q) > QuantumRange) SetPixelRed(q,QuantumRange); else SetPixelRed(q,GetPixelRed(q)+(int) f); if ((int) f / 2.0 > GetPixelGreen(q)) { SetPixelGreen(q,0); SetPixelBlue(q,0); } else { SetPixelBlue(q,GetPixelBlue(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelBlue(q)); } } if (*p < 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelBlue(q)); if (f + GetPixelBlue(q) > QuantumRange) SetPixelBlue(q,QuantumRange); else SetPixelBlue(q,GetPixelBlue(q)+(int) f); if ((int) f / 2.0 > q->green) { SetPixelGreen(q,0); SetPixelRed(q,0); } else { SetPixelRed(q,GetPixelRed(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelRed(q)); } } p++; q++; } if (!SyncAuthenticPixels(image,exception)) return; return; } /************** READERS ******************/ /* This function reads one block of floats*/ static void ReadBlobFloatsLSB(Image * image, size_t len, float *data) { while (len >= 4) { *data++ = ReadBlobFloat(image); len -= sizeof(float); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } static void ReadBlobFloatsMSB(Image * image, size_t len, float *data) { while (len >= 4) { *data++ = ReadBlobFloat(image); len -= sizeof(float); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } /* This function reads one block of doubles*/ static void ReadBlobDoublesLSB(Image * image, size_t len, double *data) { while (len >= 8) { *data++ = ReadBlobDouble(image); len -= sizeof(double); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } static void ReadBlobDoublesMSB(Image * image, size_t len, double *data) { while (len >= 8) { *data++ = ReadBlobDouble(image); len -= sizeof(double); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } /* Calculate minimum and maximum from a given block of data */ static void CalcMinMax(Image *image, int endian_indicator, int SizeX, int SizeY, size_t CellType, unsigned ldblk, void *BImgBuff, double *Min, double *Max) { MagickOffsetType filepos; int i, x; void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data); void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data); double *dblrow; float *fltrow; if (endian_indicator == LSBEndian) { ReadBlobDoublesXXX = ReadBlobDoublesLSB; ReadBlobFloatsXXX = ReadBlobFloatsLSB; } else /* MI */ { ReadBlobDoublesXXX = ReadBlobDoublesMSB; ReadBlobFloatsXXX = ReadBlobFloatsMSB; } filepos = TellBlob(image); /* Please note that file seeking occurs only in the case of doubles */ for (i = 0; i < SizeY; i++) { if (CellType==miDOUBLE) { ReadBlobDoublesXXX(image, ldblk, (double *)BImgBuff); dblrow = (double *)BImgBuff; if (i == 0) { *Min = *Max = *dblrow; } for (x = 0; x < SizeX; x++) { if (*Min > *dblrow) *Min = *dblrow; if (*Max < *dblrow) *Max = *dblrow; dblrow++; } } if (CellType==miSINGLE) { ReadBlobFloatsXXX(image, ldblk, (float *)BImgBuff); fltrow = (float *)BImgBuff; if (i == 0) { *Min = *Max = *fltrow; } for (x = 0; x < (ssize_t) SizeX; x++) { if (*Min > *fltrow) *Min = *fltrow; if (*Max < *fltrow) *Max = *fltrow; fltrow++; } } } (void) SeekBlob(image, filepos, SEEK_SET); } static void FixSignedValues(PixelPacket *q, int y) { while(y-->0) { /* Please note that negative values will overflow Q=8; QuantumRange=255: <0;127> + 127+1 = <128; 255> <-1;-128> + 127+1 = <0; 127> */ SetPixelRed(q,GetPixelRed(q)+QuantumRange/2+1); SetPixelGreen(q,GetPixelGreen(q)+QuantumRange/2+1); SetPixelBlue(q,GetPixelBlue(q)+QuantumRange/2+1); q++; } } /** Fix whole row of logical/binary data. It means pack it. */ static void FixLogical(unsigned char *Buff,int ldblk) { unsigned char mask=128; unsigned char *BuffL = Buff; unsigned char val = 0; while(ldblk-->0) { if(*Buff++ != 0) val |= mask; mask >>= 1; if(mask==0) { *BuffL++ = val; val = 0; mask = 128; } } *BuffL = val; } #if defined(MAGICKCORE_ZLIB_DELEGATE) static voidpf AcquireZIPMemory(voidpf context,unsigned int items, unsigned int size) { (void) context; return((voidpf) AcquireQuantumMemory(items,size)); } static void RelinquishZIPMemory(voidpf context,voidpf memory) { (void) context; memory=RelinquishMagickMemory(memory); } #endif #if defined(MAGICKCORE_ZLIB_DELEGATE) /** This procedure decompreses an image block for a new MATLAB format. */ static Image *decompress_block(Image *orig, unsigned int *Size, ImageInfo *clone_info, ExceptionInfo *exception) { Image *image2; void *cache_block, *decompress_block; z_stream zip_info; FILE *mat_file; size_t magick_size; size_t extent; int file; int status; int zip_status; ssize_t TotalSize = 0; if(clone_info==NULL) return NULL; if(clone_info->file) /* Close file opened from previous transaction. */ { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } cache_block = AcquireQuantumMemory((size_t)(*Size< 16384) ? *Size: 16384,sizeof(unsigned char *)); if(cache_block==NULL) return NULL; decompress_block = AcquireQuantumMemory((size_t)(4096),sizeof(unsigned char *)); if(decompress_block==NULL) { RelinquishMagickMemory(cache_block); return NULL; } mat_file=0; file = AcquireUniqueFileResource(clone_info->filename); if (file != -1) mat_file = fdopen(file,"w"); if(!mat_file) { RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Cannot create file stream for decompressed image"); return NULL; } zip_info.zalloc=AcquireZIPMemory; zip_info.zfree=RelinquishZIPMemory; zip_info.opaque = (voidpf) NULL; zip_status = inflateInit(&zip_info); if (zip_status != Z_OK) { RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "UnableToUncompressImage","`%s'",clone_info->filename); (void) fclose(mat_file); RelinquishUniqueFileResource(clone_info->filename); return NULL; } /* zip_info.next_out = 8*4;*/ zip_info.avail_in = 0; zip_info.total_out = 0; while(*Size>0 && !EOFBlob(orig)) { magick_size = ReadBlob(orig, (*Size < 16384) ? *Size : 16384, (unsigned char *) cache_block); zip_info.next_in = (Bytef *) cache_block; zip_info.avail_in = (uInt) magick_size; while(zip_info.avail_in>0) { zip_info.avail_out = 4096; zip_info.next_out = (Bytef *) decompress_block; zip_status = inflate(&zip_info,Z_NO_FLUSH); if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END)) break; extent=fwrite(decompress_block, 4096-zip_info.avail_out, 1, mat_file); (void) extent; TotalSize += 4096-zip_info.avail_out; if(zip_status == Z_STREAM_END) goto DblBreak; } if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END)) break; *Size -= magick_size; } DblBreak: inflateEnd(&zip_info); (void)fclose(mat_file); RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); *Size = TotalSize; if((clone_info->file=fopen(clone_info->filename,"rb"))==NULL) goto UnlinkFile; if( (image2 = AcquireImage(clone_info))==NULL ) goto EraseFile; status = OpenBlob(clone_info,image2,ReadBinaryBlobMode,exception); if (status == MagickFalse) { DeleteImageFromList(&image2); EraseFile: fclose(clone_info->file); clone_info->file = NULL; UnlinkFile: RelinquishUniqueFileResource(clone_info->filename); return NULL; } return image2; } #endif static Image *ReadMATImageV4(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { typedef struct { unsigned char Type[4]; unsigned int nRows; unsigned int nCols; unsigned int imagf; unsigned int nameLen; } MAT4_HDR; long ldblk; EndianType endian; Image *rotate_image; MagickBooleanType status; MAT4_HDR HDR; QuantumInfo *quantum_info; QuantumFormatType format_type; register ssize_t i; ssize_t count, y; unsigned char *pixels; unsigned int depth; (void) SeekBlob(image,0,SEEK_SET); while (EOFBlob(image) != MagickFalse) { /* Object parser. */ ldblk=ReadBlobLSBLong(image); if (EOFBlob(image) != MagickFalse) break; if ((ldblk > 9999) || (ldblk < 0)) break; HDR.Type[3]=ldblk % 10; ldblk /= 10; /* T digit */ HDR.Type[2]=ldblk % 10; ldblk /= 10; /* P digit */ HDR.Type[1]=ldblk % 10; ldblk /= 10; /* O digit */ HDR.Type[0]=ldblk; /* M digit */ if (HDR.Type[3] != 0) break; /* Data format */ if (HDR.Type[2] != 0) break; /* Always 0 */ if (HDR.Type[0] == 0) { HDR.nRows=ReadBlobLSBLong(image); HDR.nCols=ReadBlobLSBLong(image); HDR.imagf=ReadBlobLSBLong(image); HDR.nameLen=ReadBlobLSBLong(image); endian=LSBEndian; } else { HDR.nRows=ReadBlobMSBLong(image); HDR.nCols=ReadBlobMSBLong(image); HDR.imagf=ReadBlobMSBLong(image); HDR.nameLen=ReadBlobMSBLong(image); endian=MSBEndian; } if ((HDR.imagf !=0) && (HDR.imagf !=1)) break; if (HDR.nameLen > 0xFFFF) break; for (i=0; i < (ssize_t) HDR.nameLen; i++) { int byte; /* Skip matrix name. */ byte=ReadBlobByte(image); if (byte == EOF) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } } image->columns=(size_t) HDR.nRows; image->rows=(size_t) HDR.nCols; SetImageColorspace(image,GRAYColorspace); if (image_info->ping != MagickFalse) { Swap(image->columns,image->rows); return(image); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) return((Image *) NULL); quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) return((Image *) NULL); switch(HDR.Type[1]) { case 0: format_type=FloatingPointQuantumFormat; depth=64; break; case 1: format_type=FloatingPointQuantumFormat; depth=32; break; case 2: format_type=UnsignedQuantumFormat; depth=16; break; case 3: format_type=SignedQuantumFormat; depth=16; break; case 4: format_type=UnsignedQuantumFormat; depth=8; break; default: format_type=UnsignedQuantumFormat; depth=8; break; } image->depth=depth; if (HDR.Type[0] != 0) SetQuantumEndian(image,quantum_info,MSBEndian); status=SetQuantumFormat(image,quantum_info,format_type); status=SetQuantumDepth(image,quantum_info,depth); status=SetQuantumEndian(image,quantum_info,endian); SetQuantumScale(quantum_info,1.0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; count=ReadBlob(image,depth/8*image->columns,(unsigned char *) pixels); if (count == -1) break; q=QueueAuthenticPixels(image,0,image->rows-y-1,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,pixels,exception); if ((HDR.Type[1] == 2) || (HDR.Type[1] == 3)) FixSignedValues(q,image->columns); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (HDR.imagf == 1) for (y=0; y < (ssize_t) image->rows; y++) { /* Read complex pixels. */ count=ReadBlob(image,depth/8*image->columns,(unsigned char *) pixels); if (count == -1) break; if (HDR.Type[1] == 0) InsertComplexDoubleRow((double *) pixels,y,image,0,0); else InsertComplexFloatRow((float *) pixels,y,image,0,0); } quantum_info=DestroyQuantumInfo(quantum_info); rotate_image=RotateImage(image,90.0,exception); if (rotate_image != (Image *) NULL) { image=DestroyImage(image); image=rotate_image; } if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d M A T L A B i m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadMATImage() reads an MAT X image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadMATImage method is: % % Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadMATImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: Specifies a pointer to a ImageInfo structure. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image, *image2=NULL, *rotated_image; PixelPacket *q; unsigned int status; MATHeader MATLAB_HDR; size_t size; size_t CellType; QuantumInfo *quantum_info; ImageInfo *clone_info; int i; ssize_t ldblk; unsigned char *BImgBuff = NULL; double MinVal, MaxVal; size_t Unknown6; unsigned z, z2; unsigned Frames; int logging; int sample_size; MagickOffsetType filepos=0x80; BlobInfo *blob; size_t one; unsigned int (*ReadBlobXXXLong)(Image *image); unsigned short (*ReadBlobXXXShort)(Image *image); void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data); void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data); assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter"); /* Open image file. */ quantum_info=(QuantumInfo *) NULL; image = AcquireImage(image_info); status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read MATLAB image. */ clone_info=(ImageInfo *) NULL; if(ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (strncmp(MATLAB_HDR.identific,"MATLAB",6) != 0) { image2=ReadMATImageV4(image_info,image,exception); if (image2 == NULL) goto MATLAB_KO; image=image2; goto END_OF_READING; } MATLAB_HDR.Version = ReadBlobLSBShort(image); if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c", MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]); if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2)) { ReadBlobXXXLong = ReadBlobLSBLong; ReadBlobXXXShort = ReadBlobLSBShort; ReadBlobDoublesXXX = ReadBlobDoublesLSB; ReadBlobFloatsXXX = ReadBlobFloatsLSB; image->endian = LSBEndian; } else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2)) { ReadBlobXXXLong = ReadBlobMSBLong; ReadBlobXXXShort = ReadBlobMSBShort; ReadBlobDoublesXXX = ReadBlobDoublesMSB; ReadBlobFloatsXXX = ReadBlobFloatsMSB; image->endian = MSBEndian; } else goto MATLAB_KO; /* unsupported endian */ if (strncmp(MATLAB_HDR.identific, "MATLAB", 6)) { MATLAB_KO: if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } filepos = TellBlob(image); while(!EOFBlob(image)) /* object parser loop */ { Frames = 1; (void) SeekBlob(image,filepos,SEEK_SET); /* printf("pos=%X\n",TellBlob(image)); */ MATLAB_HDR.DataType = ReadBlobXXXLong(image); if(EOFBlob(image)) break; MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image); if(EOFBlob(image)) break; if((MagickSizeType) (MATLAB_HDR.ObjectSize+filepos) > GetBlobSize(image)) goto MATLAB_KO; filepos += MATLAB_HDR.ObjectSize + 4 + 4; clone_info=CloneImageInfo(image_info); image2 = image; #if defined(MAGICKCORE_ZLIB_DELEGATE) if(MATLAB_HDR.DataType == miCOMPRESSED) { image2 = decompress_block(image,&MATLAB_HDR.ObjectSize,clone_info,exception); if(image2==NULL) continue; MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */ } #endif if(MATLAB_HDR.DataType!=miMATRIX) continue; /* skip another objects. */ MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2); MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2); MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2); MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF; MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF; MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2); if(image!=image2) MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */ MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2); MATLAB_HDR.SizeX = ReadBlobXXXLong(image2); MATLAB_HDR.SizeY = ReadBlobXXXLong(image2); switch(MATLAB_HDR.DimFlag) { case 8: z2=z=1; break; /* 2D matrix*/ case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/ Unknown6 = ReadBlobXXXLong(image2); (void) Unknown6; if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); break; case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */ if(z!=3 && z!=1) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); Frames = ReadBlobXXXLong(image2); if (Frames == 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); break; default: ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); } MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2); MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2); if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), "MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass); if (MATLAB_HDR.StructureClass != mxCHAR_CLASS && MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */ MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */ MATLAB_HDR.StructureClass != mxINT8_CLASS && MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */ MATLAB_HDR.StructureClass != mxINT16_CLASS && MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */ MATLAB_HDR.StructureClass != mxINT32_CLASS && MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */ MATLAB_HDR.StructureClass != mxINT64_CLASS && MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */ ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix"); switch (MATLAB_HDR.NameFlag) { case 0: size = ReadBlobXXXLong(image2); /* Object name string size */ size = 4 * (ssize_t) ((size + 3 + 1) / 4); (void) SeekBlob(image2, size, SEEK_CUR); break; case 1: case 2: case 3: case 4: (void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */ break; default: goto MATLAB_KO; } CellType = ReadBlobXXXLong(image2); /* Additional object type */ if (logging) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "MATLAB_HDR.CellType: %.20g",(double) CellType); (void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */ NEXT_FRAME: switch (CellType) { case miINT8: case miUINT8: sample_size = 8; if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL) image->depth = 1; else image->depth = 8; /* Byte type cell */ ldblk = (ssize_t) MATLAB_HDR.SizeX; break; case miINT16: case miUINT16: sample_size = 16; image->depth = 16; /* Word type cell */ ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX); break; case miINT32: case miUINT32: sample_size = 32; image->depth = 32; /* Dword type cell */ ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX); break; case miINT64: case miUINT64: sample_size = 64; image->depth = 64; /* Qword type cell */ ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX); break; case miSINGLE: sample_size = 32; image->depth = 32; /* double type cell */ (void) SetImageOption(clone_info,"quantum:format","floating-point"); if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* complex float type cell */ } ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX); break; case miDOUBLE: sample_size = 64; image->depth = 64; /* double type cell */ (void) SetImageOption(clone_info,"quantum:format","floating-point"); DisableMSCWarning(4127) if (sizeof(double) != 8) RestoreMSCWarning ThrowReaderException(CoderError, "IncompatibleSizeOfDouble"); if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* complex double type cell */ } ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX); break; default: if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if (clone_info) clone_info=DestroyImageInfo(clone_info); ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix"); } (void) sample_size; image->columns = MATLAB_HDR.SizeX; image->rows = MATLAB_HDR.SizeY; one=1; image->colors = one << image->depth; if (image->columns == 0 || image->rows == 0) goto MATLAB_KO; if((unsigned long)ldblk*MATLAB_HDR.SizeY > MATLAB_HDR.ObjectSize) goto MATLAB_KO; /* Image is gray when no complex flag is set and 2D Matrix */ if ((MATLAB_HDR.DimFlag == 8) && ((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0)) { SetImageColorspace(image,GRAYColorspace); image->type=GrayscaleType; } /* If ping is true, then only set image size and colors without reading any image data. */ if (image_info->ping) { size_t temp = image->columns; image->columns = image->rows; image->rows = temp; goto done_reading; /* !!!!!! BAD !!!! */ } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } quantum_info=AcquireQuantumInfo(clone_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* ----- Load raster data ----- */ BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */ if (BImgBuff == NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(BImgBuff,0,ldblk*sizeof(double)); MinVal = 0; MaxVal = 0; if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */ { CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum); } /* Main loop for reading all scanlines */ if(z==1) z=0; /* read grey scanlines */ /* else read color scanlines */ do { for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception); if (q == (PixelPacket *) NULL) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto done_reading; /* Skip image rotation, when cannot set image pixels */ } if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto ExitLoop; } if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL)) { FixLogical((unsigned char *)BImgBuff,ldblk); if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0) { ImportQuantumPixelsFailed: if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1)); break; } } else { if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0) goto ImportQuantumPixelsFailed; if (z<=1 && /* fix only during a last pass z==0 || z==1 */ (CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64)) FixSignedValues(q,MATLAB_HDR.SizeX); } if (!SyncAuthenticPixels(image,exception)) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto ExitLoop; } } } while(z-- >= 2); ExitLoop: /* Read complex part of numbers here */ if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* Find Min and Max Values for complex parts of floats */ CellType = ReadBlobXXXLong(image2); /* Additional object type */ i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/ if (CellType==miDOUBLE || CellType==miSINGLE) { CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal); } if (CellType==miDOUBLE) for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff); InsertComplexDoubleRow((double *)BImgBuff, i, image, MinVal, MaxVal); } if (CellType==miSINGLE) for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff); InsertComplexFloatRow((float *)BImgBuff, i, image, MinVal, MaxVal); } } /* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */ if ((MATLAB_HDR.DimFlag == 8) && ((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0)) image->type=GrayscaleType; if (image->depth == 1) image->type=BilevelType; if(image2==image) image2 = NULL; /* Remove shadow copy to an image before rotation. */ /* Rotate image. */ rotated_image = RotateImage(image, 90.0, exception); if (rotated_image != (Image *) NULL) { /* Remove page offsets added by RotateImage */ rotated_image->page.x=0; rotated_image->page.y=0; blob = rotated_image->blob; rotated_image->blob = image->blob; rotated_image->colors = image->colors; image->blob = blob; AppendImageToList(&image,rotated_image); DeleteImageFromList(&image); } done_reading: if(image2!=NULL) if(image2!=image) { DeleteImageFromList(&image2); if(clone_info) { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (image->next == (Image *) NULL) break; image=SyncNextImageInList(image); image->columns=image->rows=0; image->colors=0; /* row scan buffer is no longer needed */ RelinquishMagickMemory(BImgBuff); BImgBuff = NULL; if(--Frames>0) { z = z2; if(image2==NULL) image2 = image; goto NEXT_FRAME; } if(image2!=NULL) if(image2!=image) /* Does shadow temporary decompressed image exist? */ { /* CloseBlob(image2); */ DeleteImageFromList(&image2); if(clone_info) { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) unlink(clone_info->filename); } } } } RelinquishMagickMemory(BImgBuff); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); END_OF_READING: if (clone_info) clone_info=DestroyImageInfo(clone_info); CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=scene++; } if(clone_info != NULL) /* cleanup garbage file from compression */ { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } DestroyImageInfo(clone_info); clone_info = NULL; } if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return"); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if(image==NULL) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); return (image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r M A T I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method RegisterMATImage adds attributes for the MAT image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterMATImage method is: % % size_t RegisterMATImage(void) % */ ModuleExport size_t RegisterMATImage(void) { MagickInfo *entry; entry=SetMagickInfo("MAT"); entry->decoder=(DecodeImageHandler *) ReadMATImage; entry->encoder=(EncodeImageHandler *) WriteMATImage; entry->blob_support=MagickFalse; entry->seekable_stream=MagickTrue; entry->description=AcquireString("MATLAB level 5 image format"); entry->module=AcquireString("MAT"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r M A T I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method UnregisterMATImage removes format registrations made by the % MAT module from the list of supported formats. % % The format of the UnregisterMATImage method is: % % UnregisterMATImage(void) % */ ModuleExport void UnregisterMATImage(void) { (void) UnregisterMagickInfo("MAT"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M A T L A B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Function WriteMATImage writes an Matlab matrix to a file. % % The format of the WriteMATImage method is: % % unsigned int WriteMATImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o status: Function WriteMATImage return True if the image is written. % False is returned is there is a memory shortage or if the image file % fails to write. % % o image_info: Specifies a pointer to a ImageInfo structure. % % o image: A pointer to an Image structure. % */ static MagickBooleanType WriteMATImage(const ImageInfo *image_info,Image *image) { char MATLAB_HDR[0x80]; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType scene; struct tm local_time; time_t current_time; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"enter MAT"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(MagickFalse); image->depth=8; current_time=time((time_t *) NULL); #if defined(MAGICKCORE_HAVE_LOCALTIME_R) (void) localtime_r(&current_time,&local_time); #else (void) memcpy(&local_time,localtime(&current_time),sizeof(local_time)); #endif (void) memset(MATLAB_HDR,' ',MagickMin(sizeof(MATLAB_HDR),124)); FormatLocaleString(MATLAB_HDR,sizeof(MATLAB_HDR), "MATLAB 5.0 MAT-file, Platform: %s, Created on: %s %s %2d %2d:%2d:%2d %d", OsDesc,DayOfWTab[local_time.tm_wday],MonthsTab[local_time.tm_mon], local_time.tm_mday,local_time.tm_hour,local_time.tm_min, local_time.tm_sec,local_time.tm_year+1900); MATLAB_HDR[0x7C]=0; MATLAB_HDR[0x7D]=1; MATLAB_HDR[0x7E]='I'; MATLAB_HDR[0x7F]='M'; (void) WriteBlob(image,sizeof(MATLAB_HDR),(unsigned char *) MATLAB_HDR); scene=0; do { char padding; MagickBooleanType is_gray; QuantumInfo *quantum_info; size_t data_size; unsigned char *pixels; unsigned int z; (void) TransformImageColorspace(image,sRGBColorspace); is_gray=SetImageGray(image,&image->exception); z=(is_gray != MagickFalse) ? 0 : 3; /* Store MAT header. */ data_size=image->rows*image->columns; if (is_gray == MagickFalse) data_size*=3; padding=((unsigned char)(data_size-1) & 0x7) ^ 0x7; (void) WriteBlobLSBLong(image,miMATRIX); (void) WriteBlobLSBLong(image,(unsigned int) data_size+padding+ ((is_gray != MagickFalse) ? 48 : 56)); (void) WriteBlobLSBLong(image,0x6); /* 0x88 */ (void) WriteBlobLSBLong(image,0x8); /* 0x8C */ (void) WriteBlobLSBLong(image,0x6); /* 0x90 */ (void) WriteBlobLSBLong(image,0); (void) WriteBlobLSBLong(image,0x5); /* 0x98 */ (void) WriteBlobLSBLong(image,(is_gray != MagickFalse) ? 0x8 : 0xC); /* 0x9C - DimFlag */ (void) WriteBlobLSBLong(image,(unsigned int) image->rows); /* x: 0xA0 */ (void) WriteBlobLSBLong(image,(unsigned int) image->columns); /* y: 0xA4 */ if (is_gray == MagickFalse) { (void) WriteBlobLSBLong(image,3); /* z: 0xA8 */ (void) WriteBlobLSBLong(image,0); } (void) WriteBlobLSBShort(image,1); /* 0xB0 */ (void) WriteBlobLSBShort(image,1); /* 0xB2 */ (void) WriteBlobLSBLong(image,'M'); /* 0xB4 */ (void) WriteBlobLSBLong(image,0x2); /* 0xB8 */ (void) WriteBlobLSBLong(image,(unsigned int) data_size); /* 0xBC */ /* Store image data. */ exception=(&image->exception); quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixels=GetQuantumPixels(quantum_info); do { const PixelPacket *p; ssize_t y; for (y=0; y < (ssize_t)image->columns; y++) { p=GetVirtualPixels(image,y,0,1,image->rows,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info, z2qtype[z],pixels,exception); (void) WriteBlob(image,image->rows,pixels); } if (!SyncAuthenticPixels(image,exception)) break; } while (z-- >= 2); while (padding-- > 0) (void) WriteBlobByte(image,0); quantum_info=DestroyQuantumInfo(quantum_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2742_0
crossvul-cpp_data_bad_487_1
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* lib/krb5/krb/s4u_creds.c */ /* * Copyright (C) 2009 by the Massachusetts Institute of Technology. * All rights reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ #include "k5-int.h" #include "int-proto.h" /* Convert ticket flags to necessary KDC options */ #define FLAGS2OPTS(flags) (flags & KDC_TKT_COMMON_MASK) /* * Implements S4U2Self, by which a service can request a ticket to * itself on behalf of an arbitrary principal. */ static krb5_error_code krb5_get_as_key_noop( krb5_context context, krb5_principal client, krb5_enctype etype, krb5_prompter_fct prompter, void *prompter_data, krb5_data *salt, krb5_data *params, krb5_keyblock *as_key, void *gak_data, k5_response_items *ritems) { /* force a hard error, we don't actually have the key */ return KRB5_PREAUTH_FAILED; } static krb5_error_code s4u_identify_user(krb5_context context, krb5_creds *in_creds, krb5_data *subject_cert, krb5_principal *canon_user) { krb5_error_code code; krb5_preauthtype ptypes[1] = { KRB5_PADATA_S4U_X509_USER }; krb5_creds creds; int use_master = 0; krb5_get_init_creds_opt *opts = NULL; krb5_principal_data client; krb5_s4u_userid userid; *canon_user = NULL; if (in_creds->client == NULL && subject_cert == NULL) { return EINVAL; } if (in_creds->client != NULL && in_creds->client->type != KRB5_NT_ENTERPRISE_PRINCIPAL) { int anonymous; anonymous = krb5_principal_compare(context, in_creds->client, krb5_anonymous_principal()); return krb5_copy_principal(context, anonymous ? in_creds->server : in_creds->client, canon_user); } memset(&creds, 0, sizeof(creds)); memset(&userid, 0, sizeof(userid)); if (subject_cert != NULL) userid.subject_cert = *subject_cert; code = krb5_get_init_creds_opt_alloc(context, &opts); if (code != 0) goto cleanup; krb5_get_init_creds_opt_set_tkt_life(opts, 15); krb5_get_init_creds_opt_set_renew_life(opts, 0); krb5_get_init_creds_opt_set_forwardable(opts, 0); krb5_get_init_creds_opt_set_proxiable(opts, 0); krb5_get_init_creds_opt_set_canonicalize(opts, 1); krb5_get_init_creds_opt_set_preauth_list(opts, ptypes, 1); if (in_creds->client != NULL) { client = *in_creds->client; client.realm = in_creds->server->realm; } else { client.magic = KV5M_PRINCIPAL; client.realm = in_creds->server->realm; /* should this be NULL, empty or a fixed string? XXX */ client.data = NULL; client.length = 0; client.type = KRB5_NT_ENTERPRISE_PRINCIPAL; } code = k5_get_init_creds(context, &creds, &client, NULL, NULL, 0, NULL, opts, krb5_get_as_key_noop, &userid, &use_master, NULL); if (code == 0 || code == KRB5_PREAUTH_FAILED) { *canon_user = userid.user; userid.user = NULL; code = 0; } cleanup: krb5_free_cred_contents(context, &creds); if (opts != NULL) krb5_get_init_creds_opt_free(context, opts); if (userid.user != NULL) krb5_free_principal(context, userid.user); return code; } static krb5_error_code make_pa_for_user_checksum(krb5_context context, krb5_keyblock *key, krb5_pa_for_user *req, krb5_checksum *cksum) { krb5_error_code code; int i; char *p; krb5_data data; data.length = 4; for (i = 0; i < req->user->length; i++) data.length += req->user->data[i].length; data.length += req->user->realm.length; data.length += req->auth_package.length; p = data.data = malloc(data.length); if (data.data == NULL) return ENOMEM; p[0] = (req->user->type >> 0) & 0xFF; p[1] = (req->user->type >> 8) & 0xFF; p[2] = (req->user->type >> 16) & 0xFF; p[3] = (req->user->type >> 24) & 0xFF; p += 4; for (i = 0; i < req->user->length; i++) { if (req->user->data[i].length > 0) memcpy(p, req->user->data[i].data, req->user->data[i].length); p += req->user->data[i].length; } if (req->user->realm.length > 0) memcpy(p, req->user->realm.data, req->user->realm.length); p += req->user->realm.length; if (req->auth_package.length > 0) memcpy(p, req->auth_package.data, req->auth_package.length); /* Per spec, use hmac-md5 checksum regardless of key type. */ code = krb5_c_make_checksum(context, CKSUMTYPE_HMAC_MD5_ARCFOUR, key, KRB5_KEYUSAGE_APP_DATA_CKSUM, &data, cksum); free(data.data); return code; } static krb5_error_code build_pa_for_user(krb5_context context, krb5_creds *tgt, krb5_s4u_userid *userid, krb5_pa_data **out_padata) { krb5_error_code code; krb5_pa_data *padata; krb5_pa_for_user for_user; krb5_data *for_user_data = NULL; char package[] = "Kerberos"; if (userid->user == NULL) return EINVAL; memset(&for_user, 0, sizeof(for_user)); for_user.user = userid->user; for_user.auth_package.data = package; for_user.auth_package.length = sizeof(package) - 1; code = make_pa_for_user_checksum(context, &tgt->keyblock, &for_user, &for_user.cksum); if (code != 0) goto cleanup; code = encode_krb5_pa_for_user(&for_user, &for_user_data); if (code != 0) goto cleanup; padata = malloc(sizeof(*padata)); if (padata == NULL) { code = ENOMEM; goto cleanup; } padata->magic = KV5M_PA_DATA; padata->pa_type = KRB5_PADATA_FOR_USER; padata->length = for_user_data->length; padata->contents = (krb5_octet *)for_user_data->data; free(for_user_data); for_user_data = NULL; *out_padata = padata; cleanup: if (for_user.cksum.contents != NULL) krb5_free_checksum_contents(context, &for_user.cksum); krb5_free_data(context, for_user_data); return code; } /* * This function is invoked by krb5int_make_tgs_request_ext() just before the * request is encoded; it gives us access to the nonce and subkey without * requiring them to be generated by the caller. */ static krb5_error_code build_pa_s4u_x509_user(krb5_context context, krb5_keyblock *subkey, krb5_kdc_req *tgsreq, void *gcvt_data) { krb5_error_code code; krb5_pa_s4u_x509_user *s4u_user = (krb5_pa_s4u_x509_user *)gcvt_data; krb5_data *data = NULL; krb5_pa_data **padata; krb5_cksumtype cksumtype; int i; assert(s4u_user->cksum.contents == NULL); s4u_user->user_id.nonce = tgsreq->nonce; code = encode_krb5_s4u_userid(&s4u_user->user_id, &data); if (code != 0) goto cleanup; /* [MS-SFU] 2.2.2: unusual to say the least, but enc_padata secures it */ if (subkey->enctype == ENCTYPE_ARCFOUR_HMAC || subkey->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) { cksumtype = CKSUMTYPE_RSA_MD4; } else { code = krb5int_c_mandatory_cksumtype(context, subkey->enctype, &cksumtype); } if (code != 0) goto cleanup; code = krb5_c_make_checksum(context, cksumtype, subkey, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data, &s4u_user->cksum); if (code != 0) goto cleanup; krb5_free_data(context, data); data = NULL; code = encode_krb5_pa_s4u_x509_user(s4u_user, &data); if (code != 0) goto cleanup; assert(tgsreq->padata != NULL); for (i = 0; tgsreq->padata[i] != NULL; i++) ; padata = realloc(tgsreq->padata, (i + 2) * sizeof(krb5_pa_data *)); if (padata == NULL) { code = ENOMEM; goto cleanup; } tgsreq->padata = padata; padata[i] = malloc(sizeof(krb5_pa_data)); if (padata[i] == NULL) { code = ENOMEM; goto cleanup; } padata[i]->magic = KV5M_PA_DATA; padata[i]->pa_type = KRB5_PADATA_S4U_X509_USER; padata[i]->length = data->length; padata[i]->contents = (krb5_octet *)data->data; padata[i + 1] = NULL; free(data); data = NULL; cleanup: if (code != 0 && s4u_user->cksum.contents != NULL) { krb5_free_checksum_contents(context, &s4u_user->cksum); s4u_user->cksum.contents = NULL; } krb5_free_data(context, data); return code; } static krb5_error_code verify_s4u2self_reply(krb5_context context, krb5_keyblock *subkey, krb5_pa_s4u_x509_user *req_s4u_user, krb5_pa_data **rep_padata, krb5_pa_data **enc_padata) { krb5_error_code code; krb5_pa_data *rep_s4u_padata, *enc_s4u_padata; krb5_pa_s4u_x509_user *rep_s4u_user = NULL; krb5_data data, *datap = NULL; krb5_keyusage usage; krb5_boolean valid; krb5_boolean not_newer; assert(req_s4u_user != NULL); switch (subkey->enctype) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: case ENCTYPE_DES3_CBC_SHA1: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC_EXP : not_newer = TRUE; break; default: not_newer = FALSE; break; } enc_s4u_padata = krb5int_find_pa_data(context, enc_padata, KRB5_PADATA_S4U_X509_USER); /* XXX this will break newer enctypes with a MIT 1.7 KDC */ rep_s4u_padata = krb5int_find_pa_data(context, rep_padata, KRB5_PADATA_S4U_X509_USER); if (rep_s4u_padata == NULL) { if (not_newer == FALSE || enc_s4u_padata != NULL) return KRB5_KDCREP_MODIFIED; else return 0; } data.length = rep_s4u_padata->length; data.data = (char *)rep_s4u_padata->contents; code = decode_krb5_pa_s4u_x509_user(&data, &rep_s4u_user); if (code != 0) goto cleanup; if (rep_s4u_user->user_id.nonce != req_s4u_user->user_id.nonce) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } code = encode_krb5_s4u_userid(&rep_s4u_user->user_id, &datap); if (code != 0) goto cleanup; if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY; else usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST; code = krb5_c_verify_checksum(context, subkey, usage, datap, &rep_s4u_user->cksum, &valid); if (code != 0) goto cleanup; if (valid == FALSE) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } /* * KDCs that support KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE also return * S4U enc_padata for older (pre-AES) encryption types only. */ if (not_newer) { if (enc_s4u_padata == NULL) { if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } } else { if (enc_s4u_padata->length != req_s4u_user->cksum.length + rep_s4u_user->cksum.length) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } if (memcmp(enc_s4u_padata->contents, req_s4u_user->cksum.contents, req_s4u_user->cksum.length) || memcmp(&enc_s4u_padata->contents[req_s4u_user->cksum.length], rep_s4u_user->cksum.contents, rep_s4u_user->cksum.length)) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } } } else if (!krb5_c_is_keyed_cksum(rep_s4u_user->cksum.checksum_type)) { code = KRB5KRB_AP_ERR_INAPP_CKSUM; goto cleanup; } cleanup: krb5_free_pa_s4u_x509_user(context, rep_s4u_user); krb5_free_data(context, datap); return code; } /* Unparse princ and re-parse it as an enterprise principal. */ static krb5_error_code convert_to_enterprise(krb5_context context, krb5_principal princ, krb5_principal *eprinc_out) { krb5_error_code code; char *str; *eprinc_out = NULL; code = krb5_unparse_name(context, princ, &str); if (code != 0) return code; code = krb5_parse_name_flags(context, str, KRB5_PRINCIPAL_PARSE_ENTERPRISE | KRB5_PRINCIPAL_PARSE_IGNORE_REALM, eprinc_out); krb5_free_unparsed_name(context, str); return code; } static krb5_error_code krb5_get_self_cred_from_kdc(krb5_context context, krb5_flags options, krb5_ccache ccache, krb5_creds *in_creds, krb5_data *subject_cert, krb5_data *user_realm, krb5_creds **out_creds) { krb5_error_code code; krb5_principal tgs = NULL, eprinc = NULL; krb5_principal_data sprinc; krb5_creds tgtq, s4u_creds, *tgt = NULL, *tgtptr; krb5_creds *referral_tgts[KRB5_REFERRAL_MAXHOPS]; krb5_pa_s4u_x509_user s4u_user; int referral_count = 0, i; krb5_flags kdcopt; memset(&tgtq, 0, sizeof(tgtq)); memset(referral_tgts, 0, sizeof(referral_tgts)); *out_creds = NULL; memset(&s4u_user, 0, sizeof(s4u_user)); if (in_creds->client != NULL && in_creds->client->length > 0) { if (in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) { code = krb5_build_principal_ext(context, &s4u_user.user_id.user, user_realm->length, user_realm->data, in_creds->client->data[0].length, in_creds->client->data[0].data, 0); if (code != 0) goto cleanup; s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL; } else { code = krb5_copy_principal(context, in_creds->client, &s4u_user.user_id.user); if (code != 0) goto cleanup; } } else { code = krb5_build_principal_ext(context, &s4u_user.user_id.user, user_realm->length, user_realm->data); if (code != 0) goto cleanup; s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL; } if (subject_cert != NULL) s4u_user.user_id.subject_cert = *subject_cert; s4u_user.user_id.options = KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE; /* First, acquire a TGT to the user's realm. */ code = krb5int_tgtname(context, user_realm, &in_creds->server->realm, &tgs); if (code != 0) goto cleanup; tgtq.client = in_creds->server; tgtq.server = tgs; code = krb5_get_credentials(context, options, ccache, &tgtq, &tgt); if (code != 0) goto cleanup; tgtptr = tgt; /* Convert the server principal to an enterprise principal, for use with * foreign realms. */ code = convert_to_enterprise(context, in_creds->server, &eprinc); if (code != 0) goto cleanup; /* Make a shallow copy of in_creds with client pointing to the server * principal. We will set s4u_creds.server for each request. */ s4u_creds = *in_creds; s4u_creds.client = in_creds->server; /* Then, walk back the referral path to S4U2Self for user */ kdcopt = 0; if (options & KRB5_GC_CANONICALIZE) kdcopt |= KDC_OPT_CANONICALIZE; if (options & KRB5_GC_FORWARDABLE) kdcopt |= KDC_OPT_FORWARDABLE; if (options & KRB5_GC_NO_TRANSIT_CHECK) kdcopt |= KDC_OPT_DISABLE_TRANSITED_CHECK; for (referral_count = 0; referral_count < KRB5_REFERRAL_MAXHOPS; referral_count++) { krb5_pa_data **in_padata = NULL; krb5_pa_data **out_padata = NULL; krb5_pa_data **enc_padata = NULL; krb5_keyblock *subkey = NULL; if (s4u_user.user_id.user != NULL && s4u_user.user_id.user->length) { in_padata = calloc(2, sizeof(krb5_pa_data *)); if (in_padata == NULL) { code = ENOMEM; goto cleanup; } code = build_pa_for_user(context, tgtptr, &s4u_user.user_id, &in_padata[0]); if (code != 0) { krb5_free_pa_data(context, in_padata); goto cleanup; } } if (data_eq(tgtptr->server->data[1], in_creds->server->realm)) { /* When asking the server realm, use the real principal. */ s4u_creds.server = in_creds->server; } else { /* When asking a foreign realm, use the enterprise principal, with * the realm set to the TGS realm. */ sprinc = *eprinc; sprinc.realm = tgtptr->server->data[1]; s4u_creds.server = &sprinc; } code = krb5_get_cred_via_tkt_ext(context, tgtptr, KDC_OPT_CANONICALIZE | FLAGS2OPTS(tgtptr->ticket_flags) | kdcopt, tgtptr->addresses, in_padata, &s4u_creds, build_pa_s4u_x509_user, &s4u_user, &out_padata, &enc_padata, out_creds, &subkey); if (code != 0) { krb5_free_checksum_contents(context, &s4u_user.cksum); krb5_free_pa_data(context, in_padata); goto cleanup; } code = verify_s4u2self_reply(context, subkey, &s4u_user, out_padata, enc_padata); krb5_free_checksum_contents(context, &s4u_user.cksum); krb5_free_pa_data(context, in_padata); krb5_free_pa_data(context, out_padata); krb5_free_pa_data(context, enc_padata); krb5_free_keyblock(context, subkey); if (code != 0) goto cleanup; if (krb5_principal_compare(context, in_creds->server, (*out_creds)->server)) { code = 0; goto cleanup; } else if (IS_TGS_PRINC((*out_creds)->server)) { krb5_data *r1 = &tgtptr->server->data[1]; krb5_data *r2 = &(*out_creds)->server->data[1]; if (data_eq(*r1, *r2)) { krb5_free_creds(context, *out_creds); *out_creds = NULL; code = KRB5_ERR_HOST_REALM_UNKNOWN; break; } for (i = 0; i < referral_count; i++) { if (krb5_principal_compare(context, (*out_creds)->server, referral_tgts[i]->server)) { code = KRB5_KDC_UNREACH; goto cleanup; } } tgtptr = *out_creds; referral_tgts[referral_count] = *out_creds; *out_creds = NULL; } else { krb5_free_creds(context, *out_creds); *out_creds = NULL; code = KRB5KRB_AP_WRONG_PRINC; /* XXX */ break; } } cleanup: for (i = 0; i < KRB5_REFERRAL_MAXHOPS; i++) { if (referral_tgts[i] != NULL) krb5_free_creds(context, referral_tgts[i]); } krb5_free_principal(context, tgs); krb5_free_principal(context, eprinc); krb5_free_creds(context, tgt); krb5_free_principal(context, s4u_user.user_id.user); krb5_free_checksum_contents(context, &s4u_user.cksum); return code; } krb5_error_code KRB5_CALLCONV krb5_get_credentials_for_user(krb5_context context, krb5_flags options, krb5_ccache ccache, krb5_creds *in_creds, krb5_data *subject_cert, krb5_creds **out_creds) { krb5_error_code code; krb5_principal realm = NULL; *out_creds = NULL; if (options & KRB5_GC_CONSTRAINED_DELEGATION) { code = EINVAL; goto cleanup; } if (in_creds->client != NULL) { /* Uncanonicalised check */ code = krb5_get_credentials(context, options | KRB5_GC_CACHED, ccache, in_creds, out_creds); if (code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE) goto cleanup; if ((options & KRB5_GC_CACHED) && !(options & KRB5_GC_CANONICALIZE)) goto cleanup; } code = s4u_identify_user(context, in_creds, subject_cert, &realm); if (code != 0) goto cleanup; if (in_creds->client != NULL && in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) { /* Post-canonicalisation check for enterprise principals */ krb5_creds mcreds = *in_creds; mcreds.client = realm; code = krb5_get_credentials(context, options | KRB5_GC_CACHED, ccache, &mcreds, out_creds); if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE) || (options & KRB5_GC_CACHED)) goto cleanup; } code = krb5_get_self_cred_from_kdc(context, options, ccache, in_creds, subject_cert, &realm->realm, out_creds); if (code != 0) goto cleanup; assert(*out_creds != NULL); if ((options & KRB5_GC_NO_STORE) == 0) { code = krb5_cc_store_cred(context, ccache, *out_creds); if (code != 0) goto cleanup; } cleanup: if (code != 0 && *out_creds != NULL) { krb5_free_creds(context, *out_creds); *out_creds = NULL; } krb5_free_principal(context, realm); return code; } /* * Exported API for constrained delegation (S4U2Proxy). * * This is preferable to using krb5_get_credentials directly because * it can perform some additional checks. */ krb5_error_code KRB5_CALLCONV krb5_get_credentials_for_proxy(krb5_context context, krb5_flags options, krb5_ccache ccache, krb5_creds *in_creds, krb5_ticket *evidence_tkt, krb5_creds **out_creds) { krb5_error_code code; krb5_creds mcreds; krb5_creds *ncreds = NULL; krb5_flags fields; krb5_data *evidence_tkt_data = NULL; krb5_creds s4u_creds; *out_creds = NULL; if (in_creds == NULL || in_creds->client == NULL || evidence_tkt == NULL || evidence_tkt->enc_part2 == NULL) { code = EINVAL; goto cleanup; } /* * Caller should have set in_creds->client to match evidence * ticket client */ if (!krb5_principal_compare(context, evidence_tkt->enc_part2->client, in_creds->client)) { code = EINVAL; goto cleanup; } if ((evidence_tkt->enc_part2->flags & TKT_FLG_FORWARDABLE) == 0) { code = KRB5_TKT_NOT_FORWARDABLE; goto cleanup; } code = krb5int_construct_matching_creds(context, options, in_creds, &mcreds, &fields); if (code != 0) goto cleanup; ncreds = calloc(1, sizeof(*ncreds)); if (ncreds == NULL) { code = ENOMEM; goto cleanup; } ncreds->magic = KV5M_CRED; code = krb5_cc_retrieve_cred(context, ccache, fields, &mcreds, ncreds); if (code != 0) { free(ncreds); ncreds = in_creds; } else { *out_creds = ncreds; } if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE) || options & KRB5_GC_CACHED) goto cleanup; code = encode_krb5_ticket(evidence_tkt, &evidence_tkt_data); if (code != 0) goto cleanup; s4u_creds = *in_creds; s4u_creds.client = evidence_tkt->server; s4u_creds.second_ticket = *evidence_tkt_data; code = krb5_get_credentials(context, options | KRB5_GC_CONSTRAINED_DELEGATION, ccache, &s4u_creds, out_creds); if (code != 0) goto cleanup; /* * Check client name because we couldn't compare that inside * krb5_get_credentials() (enc_part2 is unavailable in clear) */ if (!krb5_principal_compare(context, evidence_tkt->enc_part2->client, (*out_creds)->client)) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } cleanup: if (*out_creds != NULL && code != 0) { krb5_free_creds(context, *out_creds); *out_creds = NULL; } if (evidence_tkt_data != NULL) krb5_free_data(context, evidence_tkt_data); return code; }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_487_1
crossvul-cpp_data_good_219_2
/* * MPEG-4 decoder * Copyright (c) 2000,2001 Fabrice Bellard * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define UNCHECKED_BITSTREAM_READER 1 #include "libavutil/internal.h" #include "libavutil/opt.h" #include "error_resilience.h" #include "hwaccel.h" #include "idctdsp.h" #include "internal.h" #include "mpegutils.h" #include "mpegvideo.h" #include "mpegvideodata.h" #include "mpeg4video.h" #include "h263.h" #include "profiles.h" #include "thread.h" #include "xvididct.h" /* The defines below define the number of bits that are read at once for * reading vlc values. Changing these may improve speed and data cache needs * be aware though that decreasing them may need the number of stages that is * passed to get_vlc* to be increased. */ #define SPRITE_TRAJ_VLC_BITS 6 #define DC_VLC_BITS 9 #define MB_TYPE_B_VLC_BITS 4 #define STUDIO_INTRA_BITS 9 static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb); static VLC dc_lum, dc_chrom; static VLC sprite_trajectory; static VLC mb_type_b_vlc; static const int mb_type_b_map[4] = { MB_TYPE_DIRECT2 | MB_TYPE_L0L1, MB_TYPE_L0L1 | MB_TYPE_16x16, MB_TYPE_L1 | MB_TYPE_16x16, MB_TYPE_L0 | MB_TYPE_16x16, }; /** * Predict the ac. * @param n block index (0-3 are luma, 4-5 are chroma) * @param dir the ac prediction direction */ void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir) { int i; int16_t *ac_val, *ac_val1; int8_t *const qscale_table = s->current_picture.qscale_table; /* find prediction */ ac_val = &s->ac_val[0][0][0] + s->block_index[n] * 16; ac_val1 = ac_val; if (s->ac_pred) { if (dir == 0) { const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride; /* left prediction */ ac_val -= 16; if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i << 3]] += ac_val[i]; } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i << 3]] += ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale); } } else { const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride; /* top prediction */ ac_val -= 16 * s->block_wrap[n]; if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i]] += ac_val[i + 8]; } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale); } } } /* left copy */ for (i = 1; i < 8; i++) ac_val1[i] = block[s->idsp.idct_permutation[i << 3]]; /* top copy */ for (i = 1; i < 8; i++) ac_val1[8 + i] = block[s->idsp.idct_permutation[i]]; } /** * check if the next stuff is a resync marker or the end. * @return 0 if not */ static inline int mpeg4_is_resync(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int bits_count = get_bits_count(&s->gb); int v = show_bits(&s->gb, 16); if (s->workaround_bugs & FF_BUG_NO_PADDING && !ctx->resync_marker) return 0; while (v <= 0xFF) { if (s->pict_type == AV_PICTURE_TYPE_B || (v >> (8 - s->pict_type) != 1) || s->partitioned_frame) break; skip_bits(&s->gb, 8 + s->pict_type); bits_count += 8 + s->pict_type; v = show_bits(&s->gb, 16); } if (bits_count + 8 >= s->gb.size_in_bits) { v >>= 8; v |= 0x7F >> (7 - (bits_count & 7)); if (v == 0x7F) return s->mb_num; } else { if (v == ff_mpeg4_resync_prefix[bits_count & 7]) { int len, mb_num; int mb_num_bits = av_log2(s->mb_num - 1) + 1; GetBitContext gb = s->gb; skip_bits(&s->gb, 1); align_get_bits(&s->gb); for (len = 0; len < 32; len++) if (get_bits1(&s->gb)) break; mb_num = get_bits(&s->gb, mb_num_bits); if (!mb_num || mb_num > s->mb_num || get_bits_count(&s->gb)+6 > s->gb.size_in_bits) mb_num= -1; s->gb = gb; if (len >= ff_mpeg4_get_video_packet_prefix_length(s)) return mb_num; } } return 0; } static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int a = 2 << s->sprite_warping_accuracy; int rho = 3 - s->sprite_warping_accuracy; int r = 16 / a; int alpha = 1; int beta = 0; int w = s->width; int h = s->height; int min_ab, i, w2, h2, w3, h3; int sprite_ref[4][2]; int virtual_ref[2][2]; int64_t sprite_offset[2][2]; int64_t sprite_delta[2][2]; // only true for rectangle shapes const int vop_ref[4][2] = { { 0, 0 }, { s->width, 0 }, { 0, s->height }, { s->width, s->height } }; int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }; if (w <= 0 || h <= 0) return AVERROR_INVALIDDATA; /* the decoder was not properly initialized and we cannot continue */ if (sprite_trajectory.table == NULL) return AVERROR_INVALIDDATA; for (i = 0; i < ctx->num_sprite_warping_points; i++) { int length; int x = 0, y = 0; length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3); if (length > 0) x = get_xbits(gb, length); if (!(ctx->divx_version == 500 && ctx->divx_build == 413)) check_marker(s->avctx, gb, "before sprite_trajectory"); length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3); if (length > 0) y = get_xbits(gb, length); check_marker(s->avctx, gb, "after sprite_trajectory"); ctx->sprite_traj[i][0] = d[i][0] = x; ctx->sprite_traj[i][1] = d[i][1] = y; } for (; i < 4; i++) ctx->sprite_traj[i][0] = ctx->sprite_traj[i][1] = 0; while ((1 << alpha) < w) alpha++; while ((1 << beta) < h) beta++; /* typo in the MPEG-4 std for the definition of w' and h' */ w2 = 1 << alpha; h2 = 1 << beta; // Note, the 4th point isn't used for GMC if (ctx->divx_version == 500 && ctx->divx_build == 413) { sprite_ref[0][0] = a * vop_ref[0][0] + d[0][0]; sprite_ref[0][1] = a * vop_ref[0][1] + d[0][1]; sprite_ref[1][0] = a * vop_ref[1][0] + d[0][0] + d[1][0]; sprite_ref[1][1] = a * vop_ref[1][1] + d[0][1] + d[1][1]; sprite_ref[2][0] = a * vop_ref[2][0] + d[0][0] + d[2][0]; sprite_ref[2][1] = a * vop_ref[2][1] + d[0][1] + d[2][1]; } else { sprite_ref[0][0] = (a >> 1) * (2 * vop_ref[0][0] + d[0][0]); sprite_ref[0][1] = (a >> 1) * (2 * vop_ref[0][1] + d[0][1]); sprite_ref[1][0] = (a >> 1) * (2 * vop_ref[1][0] + d[0][0] + d[1][0]); sprite_ref[1][1] = (a >> 1) * (2 * vop_ref[1][1] + d[0][1] + d[1][1]); sprite_ref[2][0] = (a >> 1) * (2 * vop_ref[2][0] + d[0][0] + d[2][0]); sprite_ref[2][1] = (a >> 1) * (2 * vop_ref[2][1] + d[0][1] + d[2][1]); } /* sprite_ref[3][0] = (a >> 1) * (2 * vop_ref[3][0] + d[0][0] + d[1][0] + d[2][0] + d[3][0]); * sprite_ref[3][1] = (a >> 1) * (2 * vop_ref[3][1] + d[0][1] + d[1][1] + d[2][1] + d[3][1]); */ /* This is mostly identical to the MPEG-4 std (and is totally unreadable * because of that...). Perhaps it should be reordered to be more readable. * The idea behind this virtual_ref mess is to be able to use shifts later * per pixel instead of divides so the distance between points is converted * from w&h based to w2&h2 based which are of the 2^x form. */ virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) + ROUNDED_DIV(((w - w2) * (r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) + w2 * (r * sprite_ref[1][0] - 16LL * vop_ref[1][0])), w); virtual_ref[0][1] = 16 * vop_ref[0][1] + ROUNDED_DIV(((w - w2) * (r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) + w2 * (r * sprite_ref[1][1] - 16LL * vop_ref[1][1])), w); virtual_ref[1][0] = 16 * vop_ref[0][0] + ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) + h2 * (r * sprite_ref[2][0] - 16LL * vop_ref[2][0])), h); virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) + ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) + h2 * (r * sprite_ref[2][1] - 16LL * vop_ref[2][1])), h); switch (ctx->num_sprite_warping_points) { case 0: sprite_offset[0][0] = sprite_offset[0][1] = sprite_offset[1][0] = sprite_offset[1][1] = 0; sprite_delta[0][0] = a; sprite_delta[0][1] = sprite_delta[1][0] = 0; sprite_delta[1][1] = a; ctx->sprite_shift[0] = ctx->sprite_shift[1] = 0; break; case 1: // GMC only sprite_offset[0][0] = sprite_ref[0][0] - a * vop_ref[0][0]; sprite_offset[0][1] = sprite_ref[0][1] - a * vop_ref[0][1]; sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) - a * (vop_ref[0][0] / 2); sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) - a * (vop_ref[0][1] / 2); sprite_delta[0][0] = a; sprite_delta[0][1] = sprite_delta[1][0] = 0; sprite_delta[1][1] = a; ctx->sprite_shift[0] = ctx->sprite_shift[1] = 0; break; case 2: sprite_offset[0][0] = ((int64_t) sprite_ref[0][0] * (1 << alpha + rho)) + ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t) -vop_ref[0][0]) + ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) * ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1)); sprite_offset[0][1] = ((int64_t) sprite_ref[0][1] * (1 << alpha + rho)) + ((int64_t) -r * sprite_ref[0][1] + virtual_ref[0][1]) * ((int64_t) -vop_ref[0][0]) + ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1)); sprite_offset[1][0] = (((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t)-2 * vop_ref[0][0] + 1) + ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) * ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r * (int64_t) sprite_ref[0][0] - 16 * w2 + (1 << (alpha + rho + 1))); sprite_offset[1][1] = (((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * ((int64_t)-2 * vop_ref[0][0] + 1) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r * (int64_t) sprite_ref[0][1] - 16 * w2 + (1 << (alpha + rho + 1))); sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]); sprite_delta[0][1] = (+r * sprite_ref[0][1] - virtual_ref[0][1]); sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]); sprite_delta[1][1] = (-r * sprite_ref[0][0] + virtual_ref[0][0]); ctx->sprite_shift[0] = alpha + rho; ctx->sprite_shift[1] = alpha + rho + 2; break; case 3: min_ab = FFMIN(alpha, beta); w3 = w2 >> min_ab; h3 = h2 >> min_ab; sprite_offset[0][0] = ((int64_t)sprite_ref[0][0] * (1 << (alpha + beta + rho - min_ab))) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-vop_ref[0][0]) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-vop_ref[0][1]) + ((int64_t)1 << (alpha + beta + rho - min_ab - 1)); sprite_offset[0][1] = ((int64_t)sprite_ref[0][1] * (1 << (alpha + beta + rho - min_ab))) + ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-vop_ref[0][0]) + ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-vop_ref[0][1]) + ((int64_t)1 << (alpha + beta + rho - min_ab - 1)); sprite_offset[1][0] = ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-2 * vop_ref[0][0] + 1) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-2 * vop_ref[0][1] + 1) + (int64_t)2 * w2 * h3 * r * sprite_ref[0][0] - 16 * w2 * h3 + ((int64_t)1 << (alpha + beta + rho - min_ab + 1)); sprite_offset[1][1] = ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-2 * vop_ref[0][0] + 1) + ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-2 * vop_ref[0][1] + 1) + (int64_t)2 * w2 * h3 * r * sprite_ref[0][1] - 16 * w2 * h3 + ((int64_t)1 << (alpha + beta + rho - min_ab + 1)); sprite_delta[0][0] = (-r * (int64_t)sprite_ref[0][0] + virtual_ref[0][0]) * h3; sprite_delta[0][1] = (-r * (int64_t)sprite_ref[0][0] + virtual_ref[1][0]) * w3; sprite_delta[1][0] = (-r * (int64_t)sprite_ref[0][1] + virtual_ref[0][1]) * h3; sprite_delta[1][1] = (-r * (int64_t)sprite_ref[0][1] + virtual_ref[1][1]) * w3; ctx->sprite_shift[0] = alpha + beta + rho - min_ab; ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2; break; } /* try to simplify the situation */ if (sprite_delta[0][0] == a << ctx->sprite_shift[0] && sprite_delta[0][1] == 0 && sprite_delta[1][0] == 0 && sprite_delta[1][1] == a << ctx->sprite_shift[0]) { sprite_offset[0][0] >>= ctx->sprite_shift[0]; sprite_offset[0][1] >>= ctx->sprite_shift[0]; sprite_offset[1][0] >>= ctx->sprite_shift[1]; sprite_offset[1][1] >>= ctx->sprite_shift[1]; sprite_delta[0][0] = a; sprite_delta[0][1] = 0; sprite_delta[1][0] = 0; sprite_delta[1][1] = a; ctx->sprite_shift[0] = 0; ctx->sprite_shift[1] = 0; s->real_sprite_warping_points = 1; } else { int shift_y = 16 - ctx->sprite_shift[0]; int shift_c = 16 - ctx->sprite_shift[1]; for (i = 0; i < 2; i++) { if (shift_c < 0 || shift_y < 0 || FFABS( sprite_offset[0][i]) >= INT_MAX >> shift_y || FFABS( sprite_offset[1][i]) >= INT_MAX >> shift_c || FFABS( sprite_delta[0][i]) >= INT_MAX >> shift_y || FFABS( sprite_delta[1][i]) >= INT_MAX >> shift_y ) { avpriv_request_sample(s->avctx, "Too large sprite shift, delta or offset"); goto overflow; } } for (i = 0; i < 2; i++) { sprite_offset[0][i] *= 1 << shift_y; sprite_offset[1][i] *= 1 << shift_c; sprite_delta[0][i] *= 1 << shift_y; sprite_delta[1][i] *= 1 << shift_y; ctx->sprite_shift[i] = 16; } for (i = 0; i < 2; i++) { int64_t sd[2] = { sprite_delta[i][0] - a * (1LL<<16), sprite_delta[i][1] - a * (1LL<<16) }; if (llabs(sprite_offset[0][i] + sprite_delta[i][0] * (w+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sprite_delta[i][1] * (h+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sprite_delta[i][0] * (w+16LL) + sprite_delta[i][1] * (h+16LL)) >= INT_MAX || llabs(sprite_delta[i][0] * (w+16LL)) >= INT_MAX || llabs(sprite_delta[i][1] * (w+16LL)) >= INT_MAX || llabs(sd[0]) >= INT_MAX || llabs(sd[1]) >= INT_MAX || llabs(sprite_offset[0][i] + sd[0] * (w+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sd[1] * (h+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sd[0] * (w+16LL) + sd[1] * (h+16LL)) >= INT_MAX ) { avpriv_request_sample(s->avctx, "Overflow on sprite points"); goto overflow; } } s->real_sprite_warping_points = ctx->num_sprite_warping_points; } for (i = 0; i < 4; i++) { s->sprite_offset[i&1][i>>1] = sprite_offset[i&1][i>>1]; s->sprite_delta [i&1][i>>1] = sprite_delta [i&1][i>>1]; } return 0; overflow: memset(s->sprite_offset, 0, sizeof(s->sprite_offset)); memset(s->sprite_delta, 0, sizeof(s->sprite_delta)); return AVERROR_PATCHWELCOME; } static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int len = FFMIN(ctx->time_increment_bits + 3, 15); get_bits(gb, len); if (get_bits1(gb)) get_bits(gb, len); check_marker(s->avctx, gb, "after new_pred"); return 0; } /** * Decode the next video packet. * @return <0 if something went wrong */ int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int mb_num_bits = av_log2(s->mb_num - 1) + 1; int header_extension = 0, mb_num, len; /* is there enough space left for a video packet + header */ if (get_bits_count(&s->gb) > s->gb.size_in_bits - 20) return AVERROR_INVALIDDATA; for (len = 0; len < 32; len++) if (get_bits1(&s->gb)) break; if (len != ff_mpeg4_get_video_packet_prefix_length(s)) { av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n"); return AVERROR_INVALIDDATA; } if (ctx->shape != RECT_SHAPE) { header_extension = get_bits1(&s->gb); // FIXME more stuff here } mb_num = get_bits(&s->gb, mb_num_bits); if (mb_num >= s->mb_num || !mb_num) { av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num); return AVERROR_INVALIDDATA; } s->mb_x = mb_num % s->mb_width; s->mb_y = mb_num / s->mb_width; if (ctx->shape != BIN_ONLY_SHAPE) { int qscale = get_bits(&s->gb, s->quant_precision); if (qscale) s->chroma_qscale = s->qscale = qscale; } if (ctx->shape == RECT_SHAPE) header_extension = get_bits1(&s->gb); if (header_extension) { int time_incr = 0; while (get_bits1(&s->gb) != 0) time_incr++; check_marker(s->avctx, &s->gb, "before time_increment in video packed header"); skip_bits(&s->gb, ctx->time_increment_bits); /* time_increment */ check_marker(s->avctx, &s->gb, "before vop_coding_type in video packed header"); skip_bits(&s->gb, 2); /* vop coding type */ // FIXME not rect stuff here if (ctx->shape != BIN_ONLY_SHAPE) { skip_bits(&s->gb, 3); /* intra dc vlc threshold */ // FIXME don't just ignore everything if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { if (mpeg4_decode_sprite_trajectory(ctx, &s->gb) < 0) return AVERROR_INVALIDDATA; av_log(s->avctx, AV_LOG_ERROR, "untested\n"); } // FIXME reduced res stuff here if (s->pict_type != AV_PICTURE_TYPE_I) { int f_code = get_bits(&s->gb, 3); /* fcode_for */ if (f_code == 0) av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n"); } if (s->pict_type == AV_PICTURE_TYPE_B) { int b_code = get_bits(&s->gb, 3); if (b_code == 0) av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n"); } } } if (ctx->new_pred) decode_new_pred(ctx, &s->gb); return 0; } static void reset_studio_dc_predictors(MpegEncContext *s) { /* Reset DC Predictors */ s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1 << (s->avctx->bits_per_raw_sample + s->dct_precision + s->intra_dc_precision - 1); } /** * Decode the next video packet. * @return <0 if something went wrong */ int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; GetBitContext *gb = &s->gb; unsigned vlc_len; uint16_t mb_num; if (get_bits_left(gb) >= 32 && get_bits_long(gb, 32) == SLICE_START_CODE) { vlc_len = av_log2(s->mb_width * s->mb_height) + 1; mb_num = get_bits(gb, vlc_len); if (mb_num >= s->mb_num) return AVERROR_INVALIDDATA; s->mb_x = mb_num % s->mb_width; s->mb_y = mb_num / s->mb_width; if (ctx->shape != BIN_ONLY_SHAPE) s->qscale = mpeg_get_qscale(s); if (get_bits1(gb)) { /* slice_extension_flag */ skip_bits1(gb); /* intra_slice */ skip_bits1(gb); /* slice_VOP_id_enable */ skip_bits(gb, 6); /* slice_VOP_id */ while (get_bits1(gb)) /* extra_bit_slice */ skip_bits(gb, 8); /* extra_information_slice */ } reset_studio_dc_predictors(s); } else { return AVERROR_INVALIDDATA; } return 0; } /** * Get the average motion vector for a GMC MB. * @param n either 0 for the x component or 1 for y * @return the average MV for a GMC MB */ static inline int get_amv(Mpeg4DecContext *ctx, int n) { MpegEncContext *s = &ctx->m; int x, y, mb_v, sum, dx, dy, shift; int len = 1 << (s->f_code + 4); const int a = s->sprite_warping_accuracy; if (s->workaround_bugs & FF_BUG_AMV) len >>= s->quarter_sample; if (s->real_sprite_warping_points == 1) { if (ctx->divx_version == 500 && ctx->divx_build == 413) sum = s->sprite_offset[0][n] / (1 << (a - s->quarter_sample)); else sum = RSHIFT(s->sprite_offset[0][n] * (1 << s->quarter_sample), a); } else { dx = s->sprite_delta[n][0]; dy = s->sprite_delta[n][1]; shift = ctx->sprite_shift[0]; if (n) dy -= 1 << (shift + a + 1); else dx -= 1 << (shift + a + 1); mb_v = s->sprite_offset[0][n] + dx * s->mb_x * 16 + dy * s->mb_y * 16; sum = 0; for (y = 0; y < 16; y++) { int v; v = mb_v + dy * y; // FIXME optimize for (x = 0; x < 16; x++) { sum += v >> shift; v += dx; } } sum = RSHIFT(sum, a + 8 - s->quarter_sample); } if (sum < -len) sum = -len; else if (sum >= len) sum = len - 1; return sum; } /** * Decode the dc value. * @param n block index (0-3 are luma, 4-5 are chroma) * @param dir_ptr the prediction direction will be stored here * @return the quantized dc */ static inline int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr) { int level, code; if (n < 4) code = get_vlc2(&s->gb, dc_lum.table, DC_VLC_BITS, 1); else code = get_vlc2(&s->gb, dc_chrom.table, DC_VLC_BITS, 1); if (code < 0 || code > 9 /* && s->nbit < 9 */) { av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n"); return AVERROR_INVALIDDATA; } if (code == 0) { level = 0; } else { if (IS_3IV1) { if (code == 1) level = 2 * get_bits1(&s->gb) - 1; else { if (get_bits1(&s->gb)) level = get_bits(&s->gb, code - 1) + (1 << (code - 1)); else level = -get_bits(&s->gb, code - 1) - (1 << (code - 1)); } } else { level = get_xbits(&s->gb, code); } if (code > 8) { if (get_bits1(&s->gb) == 0) { /* marker */ if (s->avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)) { av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n"); return AVERROR_INVALIDDATA; } } } } return ff_mpeg4_pred_dc(s, n, level, dir_ptr, 0); } /** * Decode first partition. * @return number of MBs decoded or <0 if an error occurred */ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int mb_num = 0; static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; /* decode first partition */ s->first_slice_line = 1; for (; s->mb_y < s->mb_height; s->mb_y++) { ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { const int xy = s->mb_x + s->mb_y * s->mb_stride; int cbpc; int dir = 0; mb_num++; ff_update_block_index(s); if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1) s->first_slice_line = 0; if (s->pict_type == AV_PICTURE_TYPE_I) { int i; do { if (show_bits_long(&s->gb, 19) == DC_MARKER) return mb_num - 1; cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } while (cbpc == 8); s->cbp_table[xy] = cbpc & 3; s->current_picture.mb_type[xy] = MB_TYPE_INTRA; s->mb_intra = 1; if (cbpc & 4) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); s->current_picture.qscale_table[xy] = s->qscale; s->mbintra_table[xy] = 1; for (i = 0; i < 6; i++) { int dc_pred_dir; int dc = mpeg4_decode_dc(s, i, &dc_pred_dir); if (dc < 0) { av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); return dc; } dir <<= 1; if (dc_pred_dir) dir |= 1; } s->pred_dir_table[xy] = dir; } else { /* P/S_TYPE */ int mx, my, pred_x, pred_y, bits; int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]]; const int stride = s->b8_stride * 2; try_again: bits = show_bits(&s->gb, 17); if (bits == MOTION_MARKER) return mb_num - 1; skip_bits1(&s->gb); if (bits & 0x10000) { /* skip mb */ if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; mx = get_amv(ctx, 0); my = get_amv(ctx, 1); } else { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; mx = my = 0; } mot_val[0] = mot_val[2] = mot_val[0 + stride] = mot_val[2 + stride] = mx; mot_val[1] = mot_val[3] = mot_val[1 + stride] = mot_val[3 + stride] = my; if (s->mbintra_table[xy]) ff_clean_intra_table_entries(s); continue; } cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (cbpc == 20) goto try_again; s->cbp_table[xy] = cbpc & (8 + 3); // 8 is dquant s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) { s->current_picture.mb_type[xy] = MB_TYPE_INTRA; s->mbintra_table[xy] = 1; mot_val[0] = mot_val[2] = mot_val[0 + stride] = mot_val[2 + stride] = 0; mot_val[1] = mot_val[3] = mot_val[1 + stride] = mot_val[3 + stride] = 0; } else { if (s->mbintra_table[xy]) ff_clean_intra_table_entries(s); if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0) s->mcsel = get_bits1(&s->gb); else s->mcsel = 0; if ((cbpc & 16) == 0) { /* 16x16 motion prediction */ ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); if (!s->mcsel) { mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; } else { mx = get_amv(ctx, 0); my = get_amv(ctx, 1); s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; } mot_val[0] = mot_val[2] = mot_val[0 + stride] = mot_val[2 + stride] = mx; mot_val[1] = mot_val[3] = mot_val[1 + stride] = mot_val[3 + stride] = my; } else { int i; s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; for (i = 0; i < 4; i++) { int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; mot_val[0] = mx; mot_val[1] = my; } } } } } s->mb_x = 0; } return mb_num; } /** * decode second partition. * @return <0 if an error occurred */ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count) { int mb_num = 0; static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; s->mb_x = s->resync_mb_x; s->first_slice_line = 1; for (s->mb_y = s->resync_mb_y; mb_num < mb_count; s->mb_y++) { ff_init_block_index(s); for (; mb_num < mb_count && s->mb_x < s->mb_width; s->mb_x++) { const int xy = s->mb_x + s->mb_y * s->mb_stride; mb_num++; ff_update_block_index(s); if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1) s->first_slice_line = 0; if (s->pict_type == AV_PICTURE_TYPE_I) { int ac_pred = get_bits1(&s->gb); int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } s->cbp_table[xy] |= cbpy << 2; s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED; } else { /* P || S_TYPE */ if (IS_INTRA(s->current_picture.mb_type[xy])) { int i; int dir = 0; int ac_pred = get_bits1(&s->gb); int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (s->cbp_table[xy] & 8) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); s->current_picture.qscale_table[xy] = s->qscale; for (i = 0; i < 6; i++) { int dc_pred_dir; int dc = mpeg4_decode_dc(s, i, &dc_pred_dir); if (dc < 0) { av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); return dc; } dir <<= 1; if (dc_pred_dir) dir |= 1; } s->cbp_table[xy] &= 3; // remove dquant s->cbp_table[xy] |= cbpy << 2; s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED; s->pred_dir_table[xy] = dir; } else if (IS_SKIP(s->current_picture.mb_type[xy])) { s->current_picture.qscale_table[xy] = s->qscale; s->cbp_table[xy] = 0; } else { int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (s->cbp_table[xy] & 8) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); s->current_picture.qscale_table[xy] = s->qscale; s->cbp_table[xy] &= 3; // remove dquant s->cbp_table[xy] |= (cbpy ^ 0xf) << 2; } } } if (mb_num >= mb_count) return 0; s->mb_x = 0; } return 0; } /** * Decode the first and second partition. * @return <0 if error (and sets error type in the error_status_table) */ int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int mb_num; int ret; const int part_a_error = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR; const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END; mb_num = mpeg4_decode_partition_a(ctx); if (mb_num <= 0) { ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error); return mb_num ? mb_num : AVERROR_INVALIDDATA; } if (s->resync_mb_x + s->resync_mb_y * s->mb_width + mb_num > s->mb_num) { av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n"); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error); return AVERROR_INVALIDDATA; } s->mb_num_left = mb_num; if (s->pict_type == AV_PICTURE_TYPE_I) { while (show_bits(&s->gb, 9) == 1) skip_bits(&s->gb, 9); if (get_bits_long(&s->gb, 19) != DC_MARKER) { av_log(s->avctx, AV_LOG_ERROR, "marker missing after first I partition at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } else { while (show_bits(&s->gb, 10) == 1) skip_bits(&s->gb, 10); if (get_bits(&s->gb, 17) != MOTION_MARKER) { av_log(s->avctx, AV_LOG_ERROR, "marker missing after first P partition at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, part_a_end); ret = mpeg4_decode_partition_b(s, mb_num); if (ret < 0) { if (s->pict_type == AV_PICTURE_TYPE_P) ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_DC_ERROR); return ret; } else { if (s->pict_type == AV_PICTURE_TYPE_P) ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_DC_END); } return 0; } /** * Decode a block. * @return <0 if an error occurred */ static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block, int n, int coded, int intra, int rvlc) { MpegEncContext *s = &ctx->m; int level, i, last, run, qmul, qadd; int av_uninit(dc_pred_dir); RLTable *rl; RL_VLC_ELEM *rl_vlc; const uint8_t *scan_table; // Note intra & rvlc should be optimized away if this is inlined if (intra) { if (ctx->use_intra_dc_vlc) { /* DC coef */ if (s->partitioned_frame) { level = s->dc_val[0][s->block_index[n]]; if (n < 4) level = FASTDIV((level + (s->y_dc_scale >> 1)), s->y_dc_scale); else level = FASTDIV((level + (s->c_dc_scale >> 1)), s->c_dc_scale); dc_pred_dir = (s->pred_dir_table[s->mb_x + s->mb_y * s->mb_stride] << n) & 32; } else { level = mpeg4_decode_dc(s, n, &dc_pred_dir); if (level < 0) return level; } block[0] = level; i = 0; } else { i = -1; ff_mpeg4_pred_dc(s, n, 0, &dc_pred_dir, 0); } if (!coded) goto not_coded; if (rvlc) { rl = &ff_rvlc_rl_intra; rl_vlc = ff_rvlc_rl_intra.rl_vlc[0]; } else { rl = &ff_mpeg4_rl_intra; rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0]; } if (s->ac_pred) { if (dc_pred_dir == 0) scan_table = s->intra_v_scantable.permutated; /* left */ else scan_table = s->intra_h_scantable.permutated; /* top */ } else { scan_table = s->intra_scantable.permutated; } qmul = 1; qadd = 0; } else { i = -1; if (!coded) { s->block_last_index[n] = i; return 0; } if (rvlc) rl = &ff_rvlc_rl_inter; else rl = &ff_h263_rl_inter; scan_table = s->intra_scantable.permutated; if (s->mpeg_quant) { qmul = 1; qadd = 0; if (rvlc) rl_vlc = ff_rvlc_rl_inter.rl_vlc[0]; else rl_vlc = ff_h263_rl_inter.rl_vlc[0]; } else { qmul = s->qscale << 1; qadd = (s->qscale - 1) | 1; if (rvlc) rl_vlc = ff_rvlc_rl_inter.rl_vlc[s->qscale]; else rl_vlc = ff_h263_rl_inter.rl_vlc[s->qscale]; } } { OPEN_READER(re, &s->gb); for (;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0); if (level == 0) { /* escape */ if (rvlc) { if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in rvlc esc\n"); return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 1); last = SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run = SHOW_UBITS(re, &s->gb, 6); SKIP_COUNTER(re, &s->gb, 1 + 1 + 6); UPDATE_CACHE(re, &s->gb); if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in rvlc esc\n"); return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 1); level = SHOW_UBITS(re, &s->gb, 11); SKIP_CACHE(re, &s->gb, 11); if (SHOW_UBITS(re, &s->gb, 5) != 0x10) { av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n"); return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 5); level = level * qmul + qadd; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); SKIP_COUNTER(re, &s->gb, 1 + 11 + 5 + 1); i += run + 1; if (last) i += 192; } else { int cache; cache = GET_CACHE(re, &s->gb); if (IS_3IV1) cache ^= 0xC0000000; if (cache & 0x80000000) { if (cache & 0x40000000) { /* third escape */ SKIP_CACHE(re, &s->gb, 2); last = SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run = SHOW_UBITS(re, &s->gb, 6); SKIP_COUNTER(re, &s->gb, 2 + 1 + 6); UPDATE_CACHE(re, &s->gb); if (IS_3IV1) { level = SHOW_SBITS(re, &s->gb, 12); LAST_SKIP_BITS(re, &s->gb, 12); } else { if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in 3. esc\n"); if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR)) return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 1); level = SHOW_SBITS(re, &s->gb, 12); SKIP_CACHE(re, &s->gb, 12); if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in 3. esc\n"); if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR)) return AVERROR_INVALIDDATA; } SKIP_COUNTER(re, &s->gb, 1 + 12 + 1); } #if 0 if (s->error_recognition >= FF_ER_COMPLIANT) { const int abs_level= FFABS(level); if (abs_level<=MAX_LEVEL && run<=MAX_RUN) { const int run1= run - rl->max_run[last][abs_level] - 1; if (abs_level <= rl->max_level[last][run]) { av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n"); return AVERROR_INVALIDDATA; } if (s->error_recognition > FF_ER_COMPLIANT) { if (abs_level <= rl->max_level[last][run]*2) { av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n"); return AVERROR_INVALIDDATA; } if (run1 >= 0 && abs_level <= rl->max_level[last][run1]) { av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n"); return AVERROR_INVALIDDATA; } } } } #endif if (level > 0) level = level * qmul + qadd; else level = level * qmul - qadd; if ((unsigned)(level + 2048) > 4095) { if (s->avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_AGGRESSIVE)) { if (level > 2560 || level < -2560) { av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc, qp=%d\n", s->qscale); return AVERROR_INVALIDDATA; } } level = level < 0 ? -2048 : 2047; } i += run + 1; if (last) i += 192; } else { /* second escape */ SKIP_BITS(re, &s->gb, 2); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i += run + rl->max_run[run >> 7][level / qmul] + 1; // FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } } else { /* first escape */ SKIP_BITS(re, &s->gb, 1); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i += run; level = level + rl->max_level[run >> 7][(run - 1) & 63] * qmul; // FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } } } else { i += run; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } ff_tlog(s->avctx, "dct[%d][%d] = %- 4d end?:%d\n", scan_table[i&63]&7, scan_table[i&63] >> 3, level, i>62); if (i > 62) { i -= 192; if (i & (~63)) { av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } block[scan_table[i]] = level; break; } block[scan_table[i]] = level; } CLOSE_READER(re, &s->gb); } not_coded: if (intra) { if (!ctx->use_intra_dc_vlc) { block[0] = ff_mpeg4_pred_dc(s, n, block[0], &dc_pred_dir, 0); i -= i >> 31; // if (i == -1) i = 0; } ff_mpeg4_pred_ac(s, block, n, dc_pred_dir); if (s->ac_pred) i = 63; // FIXME not optimal } s->block_last_index[n] = i; return 0; } /** * decode partition C of one MB. * @return <0 if an error occurred */ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64]) { Mpeg4DecContext *ctx = s->avctx->priv_data; int cbp, mb_type; const int xy = s->mb_x + s->mb_y * s->mb_stride; av_assert2(s == (void*)ctx); mb_type = s->current_picture.mb_type[xy]; cbp = s->cbp_table[xy]; ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold; if (s->current_picture.qscale_table[xy] != s->qscale) ff_set_qscale(s, s->current_picture.qscale_table[xy]); if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_S) { int i; for (i = 0; i < 4; i++) { s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; } s->mb_intra = IS_INTRA(mb_type); if (IS_SKIP(mb_type)) { /* skip mb */ for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { s->mcsel = 1; s->mb_skipped = 0; } else { s->mcsel = 0; s->mb_skipped = 1; } } else if (s->mb_intra) { s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]); } else if (!s->mb_intra) { // s->mcsel = 0; // FIXME do we need to init that? s->mv_dir = MV_DIR_FORWARD; if (IS_8X8(mb_type)) { s->mv_type = MV_TYPE_8X8; } else { s->mv_type = MV_TYPE_16X16; } } } else { /* I-Frame */ s->mb_intra = 1; s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]); } if (!IS_SKIP(mb_type)) { int i; s->bdsp.clear_blocks(s->block[0]); /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, s->mb_intra, ctx->rvlc) < 0) { av_log(s->avctx, AV_LOG_ERROR, "texture corrupted at %d %d %d\n", s->mb_x, s->mb_y, s->mb_intra); return AVERROR_INVALIDDATA; } cbp += cbp; } } /* per-MB end of slice check */ if (--s->mb_num_left <= 0) { if (mpeg4_is_resync(ctx)) return SLICE_END; else return SLICE_NOEND; } else { if (mpeg4_is_resync(ctx)) { const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1; if (s->cbp_table[xy + delta]) return SLICE_END; } return SLICE_OK; } } static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64]) { Mpeg4DecContext *ctx = s->avctx->priv_data; int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant; int16_t *mot_val; static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; const int xy = s->mb_x + s->mb_y * s->mb_stride; av_assert2(s == (void*)ctx); av_assert2(s->h263_pred); if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_S) { do { if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel = 1; s->mv[0][0][0] = get_amv(ctx, 0); s->mv[0][0][1] = get_amv(ctx, 1); s->mb_skipped = 0; } else { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel = 0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; } goto end; } cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "mcbpc damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } while (cbpc == 20); s->bdsp.clear_blocks(s->block[0]); dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) goto intra; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0) s->mcsel = get_bits1(&s->gb); else s->mcsel = 0; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F; if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "P cbpy damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } cbp = (cbpc & 3) | (cbpy << 2); if (dquant) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); if ((!s->progressive_sequence) && (cbp || (s->workaround_bugs & FF_BUG_XVID_ILACE))) s->interlaced_dct = get_bits1(&s->gb); s->mv_dir = MV_DIR_FORWARD; if ((cbpc & 16) == 0) { if (s->mcsel) { s->current_picture.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 global motion prediction */ s->mv_type = MV_TYPE_16X16; mx = get_amv(ctx, 0); my = get_amv(ctx, 1); s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } else if ((!s->progressive_sequence) && get_bits1(&s->gb)) { s->current_picture.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED; /* 16x8 field motion prediction */ s->mv_type = MV_TYPE_FIELD; s->field_select[0][0] = get_bits1(&s->gb); s->field_select[0][1] = get_bits1(&s->gb); ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y / 2, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; } } else { s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } } else { s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->mv_type = MV_TYPE_8X8; for (i = 0; i < 4; i++) { mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; mot_val[0] = mx; mot_val[1] = my; } } } else if (s->pict_type == AV_PICTURE_TYPE_B) { int modb1; // first bit of modb int modb2; // second bit of modb int mb_type; s->mb_intra = 0; // B-frames never contain intra blocks s->mcsel = 0; // ... true gmc blocks if (s->mb_x == 0) { for (i = 0; i < 2; i++) { s->last_mv[i][0][0] = s->last_mv[i][0][1] = s->last_mv[i][1][0] = s->last_mv[i][1][1] = 0; } ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0); } /* if we skipped it in the future P-frame than skip it now too */ s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC if (s->mb_skipped) { /* skip mb */ for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->mv[0][0][1] = s->mv[1][0][0] = s->mv[1][0][1] = 0; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; goto end; } modb1 = get_bits1(&s->gb); if (modb1) { // like MB_TYPE_B_DIRECT but no vectors coded mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1; cbp = 0; } else { modb2 = get_bits1(&s->gb); mb_type = get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1); if (mb_type < 0) { av_log(s->avctx, AV_LOG_ERROR, "illegal MB_type\n"); return AVERROR_INVALIDDATA; } mb_type = mb_type_b_map[mb_type]; if (modb2) { cbp = 0; } else { s->bdsp.clear_blocks(s->block[0]); cbp = get_bits(&s->gb, 6); } if ((!IS_DIRECT(mb_type)) && cbp) { if (get_bits1(&s->gb)) ff_set_qscale(s, s->qscale + get_bits1(&s->gb) * 4 - 2); } if (!s->progressive_sequence) { if (cbp) s->interlaced_dct = get_bits1(&s->gb); if (!IS_DIRECT(mb_type) && get_bits1(&s->gb)) { mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; mb_type &= ~MB_TYPE_16x16; if (USES_LIST(mb_type, 0)) { s->field_select[0][0] = get_bits1(&s->gb); s->field_select[0][1] = get_bits1(&s->gb); } if (USES_LIST(mb_type, 1)) { s->field_select[1][0] = get_bits1(&s->gb); s->field_select[1][1] = get_bits1(&s->gb); } } } s->mv_dir = 0; if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) { s->mv_type = MV_TYPE_16X16; if (USES_LIST(mb_type, 0)) { s->mv_dir = MV_DIR_FORWARD; mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code); my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code); s->last_mv[0][1][0] = s->last_mv[0][0][0] = s->mv[0][0][0] = mx; s->last_mv[0][1][1] = s->last_mv[0][0][1] = s->mv[0][0][1] = my; } if (USES_LIST(mb_type, 1)) { s->mv_dir |= MV_DIR_BACKWARD; mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code); my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code); s->last_mv[1][1][0] = s->last_mv[1][0][0] = s->mv[1][0][0] = mx; s->last_mv[1][1][1] = s->last_mv[1][0][1] = s->mv[1][0][1] = my; } } else if (!IS_DIRECT(mb_type)) { s->mv_type = MV_TYPE_FIELD; if (USES_LIST(mb_type, 0)) { s->mv_dir = MV_DIR_FORWARD; for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], s->f_code); my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, s->f_code); s->last_mv[0][i][0] = s->mv[0][i][0] = mx; s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2; } } if (USES_LIST(mb_type, 1)) { s->mv_dir |= MV_DIR_BACKWARD; for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], s->b_code); my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, s->b_code); s->last_mv[1][i][0] = s->mv[1][i][0] = mx; s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2; } } } } if (IS_DIRECT(mb_type)) { if (IS_SKIP(mb_type)) { mx = my = 0; } else { mx = ff_h263_decode_motion(s, 0, 1); my = ff_h263_decode_motion(s, 0, 1); } s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; mb_type |= ff_mpeg4_set_direct_mv(s, mx, my); } s->current_picture.mb_type[xy] = mb_type; } else { /* I-Frame */ do { cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } while (cbpc == 8); dquant = cbpc & 4; s->mb_intra = 1; intra: s->ac_pred = get_bits1(&s->gb); if (s->ac_pred) s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED; else s->current_picture.mb_type[xy] = MB_TYPE_INTRA; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } cbp = (cbpc & 3) | (cbpy << 2); ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold; if (dquant) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); if (!s->progressive_sequence) s->interlaced_dct = get_bits1(&s->gb); s->bdsp.clear_blocks(s->block[0]); /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 1, 0) < 0) return AVERROR_INVALIDDATA; cbp += cbp; } goto end; } /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 0, 0) < 0) return AVERROR_INVALIDDATA; cbp += cbp; } end: /* per-MB end of slice check */ if (s->codec_id == AV_CODEC_ID_MPEG4) { int next = mpeg4_is_resync(ctx); if (next) { if (s->mb_x + s->mb_y*s->mb_width + 1 > next && (s->avctx->err_recognition & AV_EF_AGGRESSIVE)) { return AVERROR_INVALIDDATA; } else if (s->mb_x + s->mb_y*s->mb_width + 1 >= next) return SLICE_END; if (s->pict_type == AV_PICTURE_TYPE_B) { const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1; ff_thread_await_progress(&s->next_picture_ptr->tf, (s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y + 1, s->mb_height - 1) : s->mb_y, 0); if (s->next_picture.mbskip_table[xy + delta]) return SLICE_OK; } return SLICE_END; } } return SLICE_OK; } /* As per spec, studio start code search isn't the same as the old type of start code */ static void next_start_code_studio(GetBitContext *gb) { align_get_bits(gb); while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) { get_bits(gb, 8); } } /* additional_code, vlc index */ static const uint8_t ac_state_tab[22][2] = { {0, 0}, {0, 1}, {1, 1}, {2, 1}, {3, 1}, {4, 1}, {5, 1}, {1, 2}, {2, 2}, {3, 2}, {4, 2}, {5, 2}, {6, 2}, {1, 3}, {2, 4}, {3, 5}, {4, 6}, {5, 7}, {6, 8}, {7, 9}, {8, 10}, {0, 11} }; static int mpeg4_decode_studio_block(MpegEncContext *s, int32_t block[64], int n) { Mpeg4DecContext *ctx = s->avctx->priv_data; int cc, dct_dc_size, dct_diff, code, j, idx = 1, group = 0, run = 0, additional_code_len, sign, mismatch; VLC *cur_vlc = &ctx->studio_intra_tab[0]; uint8_t *const scantable = s->intra_scantable.permutated; const uint16_t *quant_matrix; uint32_t flc; const int min = -1 * (1 << (s->avctx->bits_per_raw_sample + 6)); const int max = ((1 << (s->avctx->bits_per_raw_sample + 6)) - 1); mismatch = 1; memset(block, 0, 64 * sizeof(int32_t)); if (n < 4) { cc = 0; dct_dc_size = get_vlc2(&s->gb, ctx->studio_luma_dc.table, STUDIO_INTRA_BITS, 2); quant_matrix = s->intra_matrix; } else { cc = (n & 1) + 1; if (ctx->rgb) dct_dc_size = get_vlc2(&s->gb, ctx->studio_luma_dc.table, STUDIO_INTRA_BITS, 2); else dct_dc_size = get_vlc2(&s->gb, ctx->studio_chroma_dc.table, STUDIO_INTRA_BITS, 2); quant_matrix = s->chroma_intra_matrix; } if (dct_dc_size < 0) { av_log(s->avctx, AV_LOG_ERROR, "illegal dct_dc_size vlc\n"); return AVERROR_INVALIDDATA; } else if (dct_dc_size == 0) { dct_diff = 0; } else { dct_diff = get_xbits(&s->gb, dct_dc_size); if (dct_dc_size > 8) { if(!check_marker(s->avctx, &s->gb, "dct_dc_size > 8")) return AVERROR_INVALIDDATA; } } s->last_dc[cc] += dct_diff; if (s->mpeg_quant) block[0] = s->last_dc[cc] * (8 >> s->intra_dc_precision); else block[0] = s->last_dc[cc] * (8 >> s->intra_dc_precision) * (8 >> s->dct_precision); /* TODO: support mpeg_quant for AC coefficients */ block[0] = av_clip(block[0], min, max); mismatch ^= block[0]; /* AC Coefficients */ while (1) { group = get_vlc2(&s->gb, cur_vlc->table, STUDIO_INTRA_BITS, 2); if (group < 0) { av_log(s->avctx, AV_LOG_ERROR, "illegal ac coefficient group vlc\n"); return AVERROR_INVALIDDATA; } additional_code_len = ac_state_tab[group][0]; cur_vlc = &ctx->studio_intra_tab[ac_state_tab[group][1]]; if (group == 0) { /* End of Block */ break; } else if (group >= 1 && group <= 6) { /* Zero run length (Table B.47) */ run = 1 << additional_code_len; if (additional_code_len) run += get_bits(&s->gb, additional_code_len); idx += run; continue; } else if (group >= 7 && group <= 12) { /* Zero run length and +/-1 level (Table B.48) */ code = get_bits(&s->gb, additional_code_len); sign = code & 1; code >>= 1; run = (1 << (additional_code_len - 1)) + code; idx += run; j = scantable[idx++]; block[j] = sign ? 1 : -1; } else if (group >= 13 && group <= 20) { /* Level value (Table B.49) */ j = scantable[idx++]; block[j] = get_xbits(&s->gb, additional_code_len); } else if (group == 21) { /* Escape */ j = scantable[idx++]; additional_code_len = s->avctx->bits_per_raw_sample + s->dct_precision + 4; flc = get_bits(&s->gb, additional_code_len); if (flc >> (additional_code_len-1)) block[j] = -1 * (( flc ^ ((1 << additional_code_len) -1)) + 1); else block[j] = flc; } block[j] = ((8 * 2 * block[j] * quant_matrix[j] * s->qscale) >> s->dct_precision) / 32; block[j] = av_clip(block[j], min, max); mismatch ^= block[j]; } block[63] ^= mismatch & 1; return 0; } static int mpeg4_decode_studio_mb(MpegEncContext *s, int16_t block_[12][64]) { int i; /* StudioMacroblock */ /* Assumes I-VOP */ s->mb_intra = 1; if (get_bits1(&s->gb)) { /* compression_mode */ /* DCT */ /* macroblock_type, 1 or 2-bit VLC */ if (!get_bits1(&s->gb)) { skip_bits1(&s->gb); s->qscale = mpeg_get_qscale(s); } for (i = 0; i < mpeg4_block_count[s->chroma_format]; i++) { if (mpeg4_decode_studio_block(s, (*s->block32)[i], i) < 0) return AVERROR_INVALIDDATA; } } else { /* DPCM */ check_marker(s->avctx, &s->gb, "DPCM block start"); avpriv_request_sample(s->avctx, "DPCM encoded block"); next_start_code_studio(&s->gb); return SLICE_ERROR; } if (get_bits_left(&s->gb) >= 24 && show_bits(&s->gb, 23) == 0) { next_start_code_studio(&s->gb); return SLICE_END; } return SLICE_OK; } static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb) { int hours, minutes, seconds; if (!show_bits(gb, 23)) { av_log(s->avctx, AV_LOG_WARNING, "GOP header invalid\n"); return AVERROR_INVALIDDATA; } hours = get_bits(gb, 5); minutes = get_bits(gb, 6); check_marker(s->avctx, gb, "in gop_header"); seconds = get_bits(gb, 6); s->time_base = seconds + 60*(minutes + 60*hours); skip_bits1(gb); skip_bits1(gb); return 0; } static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb, int *profile, int *level) { *profile = get_bits(gb, 4); *level = get_bits(gb, 4); // for Simple profile, level 0 if (*profile == 0 && *level == 8) { *level = 0; } return 0; } static int mpeg4_decode_visual_object(MpegEncContext *s, GetBitContext *gb) { int visual_object_type; int is_visual_object_identifier = get_bits1(gb); if (is_visual_object_identifier) { skip_bits(gb, 4+3); } visual_object_type = get_bits(gb, 4); if (visual_object_type == VOT_VIDEO_ID || visual_object_type == VOT_STILL_TEXTURE_ID) { int video_signal_type = get_bits1(gb); if (video_signal_type) { int video_range, color_description; skip_bits(gb, 3); // video_format video_range = get_bits1(gb); color_description = get_bits1(gb); s->avctx->color_range = video_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (color_description) { s->avctx->color_primaries = get_bits(gb, 8); s->avctx->color_trc = get_bits(gb, 8); s->avctx->colorspace = get_bits(gb, 8); } } } return 0; } static void mpeg4_load_default_matrices(MpegEncContext *s) { int i, v; /* load default matrices */ for (i = 0; i < 64; i++) { int j = s->idsp.idct_permutation[i]; v = ff_mpeg4_default_intra_matrix[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; v = ff_mpeg4_default_non_intra_matrix[i]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } } static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int width, height, vo_ver_id; /* vol header */ skip_bits(gb, 1); /* random access */ s->vo_type = get_bits(gb, 8); /* If we are in studio profile (per vo_type), check if its all consistent * and if so continue pass control to decode_studio_vol_header(). * elIf something is inconsistent, error out * else continue with (non studio) vol header decpoding. */ if (s->vo_type == CORE_STUDIO_VO_TYPE || s->vo_type == SIMPLE_STUDIO_VO_TYPE) { if (s->avctx->profile != FF_PROFILE_UNKNOWN && s->avctx->profile != FF_PROFILE_MPEG4_SIMPLE_STUDIO) return AVERROR_INVALIDDATA; s->studio_profile = 1; s->avctx->profile = FF_PROFILE_MPEG4_SIMPLE_STUDIO; return decode_studio_vol_header(ctx, gb); } else if (s->studio_profile) { return AVERROR_PATCHWELCOME; } if (get_bits1(gb) != 0) { /* is_ol_id */ vo_ver_id = get_bits(gb, 4); /* vo_ver_id */ skip_bits(gb, 3); /* vo_priority */ } else { vo_ver_id = 1; } s->aspect_ratio_info = get_bits(gb, 4); if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height } else { s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info]; } if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */ int chroma_format = get_bits(gb, 2); if (chroma_format != CHROMA_420) av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n"); s->low_delay = get_bits1(gb); if (get_bits1(gb)) { /* vbv parameters */ get_bits(gb, 15); /* first_half_bitrate */ check_marker(s->avctx, gb, "after first_half_bitrate"); get_bits(gb, 15); /* latter_half_bitrate */ check_marker(s->avctx, gb, "after latter_half_bitrate"); get_bits(gb, 15); /* first_half_vbv_buffer_size */ check_marker(s->avctx, gb, "after first_half_vbv_buffer_size"); get_bits(gb, 3); /* latter_half_vbv_buffer_size */ get_bits(gb, 11); /* first_half_vbv_occupancy */ check_marker(s->avctx, gb, "after first_half_vbv_occupancy"); get_bits(gb, 15); /* latter_half_vbv_occupancy */ check_marker(s->avctx, gb, "after latter_half_vbv_occupancy"); } } else { /* is setting low delay flag only once the smartest thing to do? * low delay detection will not be overridden. */ if (s->picture_number == 0) { switch(s->vo_type) { case SIMPLE_VO_TYPE: case ADV_SIMPLE_VO_TYPE: s->low_delay = 1; break; default: s->low_delay = 0; } } } ctx->shape = get_bits(gb, 2); /* vol shape */ if (ctx->shape != RECT_SHAPE) av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n"); if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) { av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n"); skip_bits(gb, 4); /* video_object_layer_shape_extension */ } check_marker(s->avctx, gb, "before time_increment_resolution"); s->avctx->framerate.num = get_bits(gb, 16); if (!s->avctx->framerate.num) { av_log(s->avctx, AV_LOG_ERROR, "framerate==0\n"); return AVERROR_INVALIDDATA; } ctx->time_increment_bits = av_log2(s->avctx->framerate.num - 1) + 1; if (ctx->time_increment_bits < 1) ctx->time_increment_bits = 1; check_marker(s->avctx, gb, "before fixed_vop_rate"); if (get_bits1(gb) != 0) /* fixed_vop_rate */ s->avctx->framerate.den = get_bits(gb, ctx->time_increment_bits); else s->avctx->framerate.den = 1; s->avctx->time_base = av_inv_q(av_mul_q(s->avctx->framerate, (AVRational){s->avctx->ticks_per_frame, 1})); ctx->t_frame = 0; if (ctx->shape != BIN_ONLY_SHAPE) { if (ctx->shape == RECT_SHAPE) { check_marker(s->avctx, gb, "before width"); width = get_bits(gb, 13); check_marker(s->avctx, gb, "before height"); height = get_bits(gb, 13); check_marker(s->avctx, gb, "after height"); if (width && height && /* they should be non zero but who knows */ !(s->width && s->codec_tag == AV_RL32("MP4S"))) { if (s->width && s->height && (s->width != width || s->height != height)) s->context_reinit = 1; s->width = width; s->height = height; } } s->progressive_sequence = s->progressive_frame = get_bits1(gb) ^ 1; s->interlaced_dct = 0; if (!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO)) av_log(s->avctx, AV_LOG_INFO, /* OBMC Disable */ "MPEG-4 OBMC not supported (very likely buggy encoder)\n"); if (vo_ver_id == 1) ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */ else ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */ if (ctx->vol_sprite_usage == STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n"); if (ctx->vol_sprite_usage == STATIC_SPRITE || ctx->vol_sprite_usage == GMC_SPRITE) { if (ctx->vol_sprite_usage == STATIC_SPRITE) { skip_bits(gb, 13); // sprite_width check_marker(s->avctx, gb, "after sprite_width"); skip_bits(gb, 13); // sprite_height check_marker(s->avctx, gb, "after sprite_height"); skip_bits(gb, 13); // sprite_left check_marker(s->avctx, gb, "after sprite_left"); skip_bits(gb, 13); // sprite_top check_marker(s->avctx, gb, "after sprite_top"); } ctx->num_sprite_warping_points = get_bits(gb, 6); if (ctx->num_sprite_warping_points > 3) { av_log(s->avctx, AV_LOG_ERROR, "%d sprite_warping_points\n", ctx->num_sprite_warping_points); ctx->num_sprite_warping_points = 0; return AVERROR_INVALIDDATA; } s->sprite_warping_accuracy = get_bits(gb, 2); ctx->sprite_brightness_change = get_bits1(gb); if (ctx->vol_sprite_usage == STATIC_SPRITE) skip_bits1(gb); // low_latency_sprite } // FIXME sadct disable bit if verid!=1 && shape not rect if (get_bits1(gb) == 1) { /* not_8_bit */ s->quant_precision = get_bits(gb, 4); /* quant_precision */ if (get_bits(gb, 4) != 8) /* bits_per_pixel */ av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n"); if (s->quant_precision != 5) av_log(s->avctx, AV_LOG_ERROR, "quant precision %d\n", s->quant_precision); if (s->quant_precision<3 || s->quant_precision>9) { s->quant_precision = 5; } } else { s->quant_precision = 5; } // FIXME a bunch of grayscale shape things if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */ int i, v; mpeg4_load_default_matrices(s); /* load custom intra matrix */ if (get_bits1(gb)) { int last = 0; for (i = 0; i < 64; i++) { int j; if (get_bits_left(gb) < 8) { av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n"); return AVERROR_INVALIDDATA; } v = get_bits(gb, 8); if (v == 0) break; last = v; j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = last; s->chroma_intra_matrix[j] = last; } /* replicate last value */ for (; i < 64; i++) { int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = last; s->chroma_intra_matrix[j] = last; } } /* load custom non intra matrix */ if (get_bits1(gb)) { int last = 0; for (i = 0; i < 64; i++) { int j; if (get_bits_left(gb) < 8) { av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n"); return AVERROR_INVALIDDATA; } v = get_bits(gb, 8); if (v == 0) break; last = v; j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } /* replicate last value */ for (; i < 64; i++) { int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->inter_matrix[j] = last; s->chroma_inter_matrix[j] = last; } } // FIXME a bunch of grayscale shape things } if (vo_ver_id != 1) s->quarter_sample = get_bits1(gb); else s->quarter_sample = 0; if (get_bits_left(gb) < 4) { av_log(s->avctx, AV_LOG_ERROR, "VOL Header truncated\n"); return AVERROR_INVALIDDATA; } if (!get_bits1(gb)) { int pos = get_bits_count(gb); int estimation_method = get_bits(gb, 2); if (estimation_method < 2) { if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upsampling */ } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */ } if (!check_marker(s->avctx, gb, "in complexity estimation part 1")) { skip_bits_long(gb, pos - get_bits_count(gb)); goto no_cplx_est; } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */ ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */ } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */ ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */ } if (!check_marker(s->avctx, gb, "in complexity estimation part 2")) { skip_bits_long(gb, pos - get_bits_count(gb)); goto no_cplx_est; } if (estimation_method == 1) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */ } } else av_log(s->avctx, AV_LOG_ERROR, "Invalid Complexity estimation method %d\n", estimation_method); } else { no_cplx_est: ctx->cplx_estimation_trash_i = ctx->cplx_estimation_trash_p = ctx->cplx_estimation_trash_b = 0; } ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */ s->data_partitioning = get_bits1(gb); if (s->data_partitioning) ctx->rvlc = get_bits1(gb); if (vo_ver_id != 1) { ctx->new_pred = get_bits1(gb); if (ctx->new_pred) { av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n"); skip_bits(gb, 2); /* requested upstream message type */ skip_bits1(gb); /* newpred segment type */ } if (get_bits1(gb)) // reduced_res_vop av_log(s->avctx, AV_LOG_ERROR, "reduced resolution VOP not supported\n"); } else { ctx->new_pred = 0; } ctx->scalability = get_bits1(gb); if (ctx->scalability) { GetBitContext bak = *gb; int h_sampling_factor_n; int h_sampling_factor_m; int v_sampling_factor_n; int v_sampling_factor_m; skip_bits1(gb); // hierarchy_type skip_bits(gb, 4); /* ref_layer_id */ skip_bits1(gb); /* ref_layer_sampling_dir */ h_sampling_factor_n = get_bits(gb, 5); h_sampling_factor_m = get_bits(gb, 5); v_sampling_factor_n = get_bits(gb, 5); v_sampling_factor_m = get_bits(gb, 5); ctx->enhancement_type = get_bits1(gb); if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 || v_sampling_factor_n == 0 || v_sampling_factor_m == 0) { /* illegal scalability header (VERY broken encoder), * trying to workaround */ ctx->scalability = 0; *gb = bak; } else av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n"); // bin shape stuff FIXME } } if (s->avctx->debug&FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d %s%s%s%s\n", s->avctx->framerate.den, s->avctx->framerate.num, ctx->time_increment_bits, s->quant_precision, s->progressive_sequence, s->low_delay, ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "", s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : "" ); } return 0; } /** * Decode the user data stuff in the header. * Also initializes divx/xvid/lavc_version/build. */ static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; char buf[256]; int i; int e; int ver = 0, build = 0, ver2 = 0, ver3 = 0; char last; for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) { if (show_bits(gb, 23) == 0) break; buf[i] = get_bits(gb, 8); } buf[i] = 0; /* divx detection */ e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last); if (e < 2) e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last); if (e >= 2) { ctx->divx_version = ver; ctx->divx_build = build; s->divx_packed = e == 3 && last == 'p'; } /* libavcodec detection */ e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3; if (e != 4) e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build); if (e != 4) { e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1; if (e > 1) { if (ver > 0xFFU || ver2 > 0xFFU || ver3 > 0xFFU) { av_log(s->avctx, AV_LOG_WARNING, "Unknown Lavc version string encountered, %d.%d.%d; " "clamping sub-version values to 8-bits.\n", ver, ver2, ver3); } build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF); } } if (e != 4) { if (strcmp(buf, "ffmpeg") == 0) ctx->lavc_build = 4600; } if (e == 4) ctx->lavc_build = build; /* Xvid detection */ e = sscanf(buf, "XviD%d", &build); if (e == 1) ctx->xvid_build = build; return 0; } int ff_mpeg4_workaround_bugs(AVCodecContext *avctx) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext *s = &ctx->m; if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) { if (s->codec_tag == AV_RL32("XVID") || s->codec_tag == AV_RL32("XVIX") || s->codec_tag == AV_RL32("RMP4") || s->codec_tag == AV_RL32("ZMP4") || s->codec_tag == AV_RL32("SIPP")) ctx->xvid_build = 0; } if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) if (s->codec_tag == AV_RL32("DIVX") && s->vo_type == 0 && ctx->vol_control_parameters == 0) ctx->divx_version = 400; // divx 4 if (ctx->xvid_build >= 0 && ctx->divx_version >= 0) { ctx->divx_version = ctx->divx_build = -1; } if (s->workaround_bugs & FF_BUG_AUTODETECT) { if (s->codec_tag == AV_RL32("XVIX")) s->workaround_bugs |= FF_BUG_XVID_ILACE; if (s->codec_tag == AV_RL32("UMP4")) s->workaround_bugs |= FF_BUG_UMP4; if (ctx->divx_version >= 500 && ctx->divx_build < 1814) s->workaround_bugs |= FF_BUG_QPEL_CHROMA; if (ctx->divx_version > 502 && ctx->divx_build < 1814) s->workaround_bugs |= FF_BUG_QPEL_CHROMA2; if (ctx->xvid_build <= 3U) s->padding_bug_score = 256 * 256 * 256 * 64; if (ctx->xvid_build <= 1U) s->workaround_bugs |= FF_BUG_QPEL_CHROMA; if (ctx->xvid_build <= 12U) s->workaround_bugs |= FF_BUG_EDGE; if (ctx->xvid_build <= 32U) s->workaround_bugs |= FF_BUG_DC_CLIP; #define SET_QPEL_FUNC(postfix1, postfix2) \ s->qdsp.put_ ## postfix1 = ff_put_ ## postfix2; \ s->qdsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \ s->qdsp.avg_ ## postfix1 = ff_avg_ ## postfix2; if (ctx->lavc_build < 4653U) s->workaround_bugs |= FF_BUG_STD_QPEL; if (ctx->lavc_build < 4655U) s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE; if (ctx->lavc_build < 4670U) s->workaround_bugs |= FF_BUG_EDGE; if (ctx->lavc_build <= 4712U) s->workaround_bugs |= FF_BUG_DC_CLIP; if ((ctx->lavc_build&0xFF) >= 100) { if (ctx->lavc_build > 3621476 && ctx->lavc_build < 3752552 && (ctx->lavc_build < 3752037 || ctx->lavc_build > 3752191) // 3.2.1+ ) s->workaround_bugs |= FF_BUG_IEDGE; } if (ctx->divx_version >= 0) s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE; if (ctx->divx_version == 501 && ctx->divx_build == 20020416) s->padding_bug_score = 256 * 256 * 256 * 64; if (ctx->divx_version < 500U) s->workaround_bugs |= FF_BUG_EDGE; if (ctx->divx_version >= 0) s->workaround_bugs |= FF_BUG_HPEL_CHROMA; } if (s->workaround_bugs & FF_BUG_STD_QPEL) { SET_QPEL_FUNC(qpel_pixels_tab[0][5], qpel16_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][7], qpel16_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][9], qpel16_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][5], qpel8_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][7], qpel8_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][9], qpel8_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c) } if (avctx->debug & FF_DEBUG_BUGS) av_log(s->avctx, AV_LOG_DEBUG, "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n", s->workaround_bugs, ctx->lavc_build, ctx->xvid_build, ctx->divx_version, ctx->divx_build, s->divx_packed ? "p" : ""); if (CONFIG_MPEG4_DECODER && ctx->xvid_build >= 0 && s->codec_id == AV_CODEC_ID_MPEG4 && avctx->idct_algo == FF_IDCT_AUTO) { avctx->idct_algo = FF_IDCT_XVID; ff_mpv_idct_init(s); return 1; } return 0; } static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int time_incr, time_increment; int64_t pts; s->mcsel = 0; s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */ if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay && ctx->vol_control_parameters == 0 && !(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)) { av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set incorrectly, clearing it\n"); s->low_delay = 0; } s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B; if (s->partitioned_frame) s->decode_mb = mpeg4_decode_partitioned_mb; else s->decode_mb = mpeg4_decode_mb; time_incr = 0; while (get_bits1(gb) != 0) time_incr++; check_marker(s->avctx, gb, "before time_increment"); if (ctx->time_increment_bits == 0 || !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) { av_log(s->avctx, AV_LOG_WARNING, "time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n", ctx->time_increment_bits); for (ctx->time_increment_bits = 1; ctx->time_increment_bits < 16; ctx->time_increment_bits++) { if (s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE)) { if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30) break; } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18) break; } av_log(s->avctx, AV_LOG_WARNING, "time_increment_bits set to %d bits, based on bitstream analysis\n", ctx->time_increment_bits); if (s->avctx->framerate.num && 4*s->avctx->framerate.num < 1<<ctx->time_increment_bits) { s->avctx->framerate.num = 1<<ctx->time_increment_bits; s->avctx->time_base = av_inv_q(av_mul_q(s->avctx->framerate, (AVRational){s->avctx->ticks_per_frame, 1})); } } if (IS_3IV1) time_increment = get_bits1(gb); // FIXME investigate further else time_increment = get_bits(gb, ctx->time_increment_bits); if (s->pict_type != AV_PICTURE_TYPE_B) { s->last_time_base = s->time_base; s->time_base += time_incr; s->time = s->time_base * (int64_t)s->avctx->framerate.num + time_increment; if (s->workaround_bugs & FF_BUG_UMP4) { if (s->time < s->last_non_b_time) { /* header is not mpeg-4-compatible, broken encoder, * trying to workaround */ s->time_base++; s->time += s->avctx->framerate.num; } } s->pp_time = s->time - s->last_non_b_time; s->last_non_b_time = s->time; } else { s->time = (s->last_time_base + time_incr) * (int64_t)s->avctx->framerate.num + time_increment; s->pb_time = s->pp_time - (s->last_non_b_time - s->time); if (s->pp_time <= s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time <= 0) { /* messed up order, maybe after seeking? skipping current B-frame */ return FRAME_SKIPPED; } ff_mpeg4_init_direct_mv(s); if (ctx->t_frame == 0) ctx->t_frame = s->pb_time; if (ctx->t_frame == 0) ctx->t_frame = 1; // 1/0 protection s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) { s->pb_field_time = 2; s->pp_field_time = 4; if (!s->progressive_sequence) return FRAME_SKIPPED; } } if (s->avctx->framerate.den) pts = ROUNDED_DIV(s->time, s->avctx->framerate.den); else pts = AV_NOPTS_VALUE; ff_dlog(s->avctx, "MPEG4 PTS: %"PRId64"\n", pts); check_marker(s->avctx, gb, "before vop_coded"); /* vop coded */ if (get_bits1(gb) != 1) { if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n"); return FRAME_SKIPPED; } if (ctx->new_pred) decode_new_pred(ctx, gb); if (ctx->shape != BIN_ONLY_SHAPE && (s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE))) { /* rounding type for motion estimation */ s->no_rounding = get_bits1(gb); } else { s->no_rounding = 0; } // FIXME reduced res stuff if (ctx->shape != RECT_SHAPE) { if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) { skip_bits(gb, 13); /* width */ check_marker(s->avctx, gb, "after width"); skip_bits(gb, 13); /* height */ check_marker(s->avctx, gb, "after height"); skip_bits(gb, 13); /* hor_spat_ref */ check_marker(s->avctx, gb, "after hor_spat_ref"); skip_bits(gb, 13); /* ver_spat_ref */ } skip_bits1(gb); /* change_CR_disable */ if (get_bits1(gb) != 0) skip_bits(gb, 8); /* constant_alpha_value */ } // FIXME complexity estimation stuff if (ctx->shape != BIN_ONLY_SHAPE) { skip_bits_long(gb, ctx->cplx_estimation_trash_i); if (s->pict_type != AV_PICTURE_TYPE_I) skip_bits_long(gb, ctx->cplx_estimation_trash_p); if (s->pict_type == AV_PICTURE_TYPE_B) skip_bits_long(gb, ctx->cplx_estimation_trash_b); if (get_bits_left(gb) < 3) { av_log(s->avctx, AV_LOG_ERROR, "Header truncated\n"); return AVERROR_INVALIDDATA; } ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)]; if (!s->progressive_sequence) { s->top_field_first = get_bits1(gb); s->alternate_scan = get_bits1(gb); } else s->alternate_scan = 0; } if (s->alternate_scan) { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } else { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } if (s->pict_type == AV_PICTURE_TYPE_S) { if((ctx->vol_sprite_usage == STATIC_SPRITE || ctx->vol_sprite_usage == GMC_SPRITE)) { if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0) return AVERROR_INVALIDDATA; if (ctx->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n"); if (ctx->vol_sprite_usage == STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n"); } else { memset(s->sprite_offset, 0, sizeof(s->sprite_offset)); memset(s->sprite_delta, 0, sizeof(s->sprite_delta)); } } if (ctx->shape != BIN_ONLY_SHAPE) { s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision); if (s->qscale == 0) { av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG-4 header (qscale=0)\n"); return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then } if (s->pict_type != AV_PICTURE_TYPE_I) { s->f_code = get_bits(gb, 3); /* fcode_for */ if (s->f_code == 0) { av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG-4 header (f_code=0)\n"); s->f_code = 1; return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then } } else s->f_code = 1; if (s->pict_type == AV_PICTURE_TYPE_B) { s->b_code = get_bits(gb, 3); if (s->b_code == 0) { av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (b_code=0)\n"); s->b_code=1; return AVERROR_INVALIDDATA; // makes no sense to continue, as the MV decoding will break very quickly } } else s->b_code = 1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64" tincr:%d\n", s->qscale, s->f_code, s->b_code, s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")), gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first, s->quarter_sample ? "q" : "h", s->data_partitioning, ctx->resync_marker, ctx->num_sprite_warping_points, s->sprite_warping_accuracy, 1 - s->no_rounding, s->vo_type, ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold, ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p, ctx->cplx_estimation_trash_b, s->time, time_increment ); } if (!ctx->scalability) { if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I) skip_bits1(gb); // vop shape coding type } else { if (ctx->enhancement_type) { int load_backward_shape = get_bits1(gb); if (load_backward_shape) av_log(s->avctx, AV_LOG_ERROR, "load backward shape isn't supported\n"); } skip_bits(gb, 2); // ref_select_code } } /* detect buggy encoders which don't set the low_delay flag * (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames * easily (although it's buggy too) */ if (s->vo_type == 0 && ctx->vol_control_parameters == 0 && ctx->divx_version == -1 && s->picture_number == 0) { av_log(s->avctx, AV_LOG_WARNING, "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n"); s->low_delay = 1; } s->picture_number++; // better than pic number==0 always ;) // FIXME add short header support s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table; s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table; if (s->workaround_bugs & FF_BUG_EDGE) { s->h_edge_pos = s->width; s->v_edge_pos = s->height; } return 0; } static void read_quant_matrix_ext(MpegEncContext *s, GetBitContext *gb) { int i, j, v; if (get_bits1(gb)) { /* intra_quantiser_matrix */ for (i = 0; i < 64; i++) { v = get_bits(gb, 8); j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } } if (get_bits1(gb)) { /* non_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { get_bits(gb, 8); } } if (get_bits1(gb)) { /* chroma_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { v = get_bits(gb, 8); j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->chroma_intra_matrix[j] = v; } } if (get_bits1(gb)) { /* chroma_non_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { get_bits(gb, 8); } } next_start_code_studio(gb); } static void extension_and_user_data(MpegEncContext *s, GetBitContext *gb, int id) { uint32_t startcode; uint8_t extension_type; startcode = show_bits_long(gb, 32); if (startcode == USER_DATA_STARTCODE || startcode == EXT_STARTCODE) { if ((id == 2 || id == 4) && startcode == EXT_STARTCODE) { skip_bits_long(gb, 32); extension_type = get_bits(gb, 4); if (extension_type == QUANT_MATRIX_EXT_ID) read_quant_matrix_ext(s, gb); } } } static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; skip_bits(gb, 16); /* Time_code[63..48] */ check_marker(s->avctx, gb, "after Time_code[63..48]"); skip_bits(gb, 16); /* Time_code[47..32] */ check_marker(s->avctx, gb, "after Time_code[47..32]"); skip_bits(gb, 16); /* Time_code[31..16] */ check_marker(s->avctx, gb, "after Time_code[31..16]"); skip_bits(gb, 16); /* Time_code[15..0] */ check_marker(s->avctx, gb, "after Time_code[15..0]"); skip_bits(gb, 4); /* reserved_bits */ } /** * Decode the next studio vop header. * @return <0 if something went wrong */ static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; if (get_bits_left(gb) <= 32) return 0; s->decode_mb = mpeg4_decode_studio_mb; decode_smpte_tc(ctx, gb); skip_bits(gb, 10); /* temporal_reference */ skip_bits(gb, 2); /* vop_structure */ s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* vop_coding_type */ if (get_bits1(gb)) { /* vop_coded */ skip_bits1(gb); /* top_field_first */ skip_bits1(gb); /* repeat_first_field */ s->progressive_frame = get_bits1(gb) ^ 1; /* progressive_frame */ } if (s->pict_type == AV_PICTURE_TYPE_I) { if (get_bits1(gb)) reset_studio_dc_predictors(s); } if (ctx->shape != BIN_ONLY_SHAPE) { s->alternate_scan = get_bits1(gb); s->frame_pred_frame_dct = get_bits1(gb); s->dct_precision = get_bits(gb, 2); s->intra_dc_precision = get_bits(gb, 2); s->q_scale_type = get_bits1(gb); } if (s->alternate_scan) { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } else { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } mpeg4_load_default_matrices(s); next_start_code_studio(gb); extension_and_user_data(s, gb, 4); return 0; } static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int visual_object_type; skip_bits(gb, 4); /* visual_object_verid */ visual_object_type = get_bits(gb, 4); if (visual_object_type != VOT_VIDEO_ID) { avpriv_request_sample(s->avctx, "VO type %u", visual_object_type); return AVERROR_PATCHWELCOME; } next_start_code_studio(gb); extension_and_user_data(s, gb, 1); return 0; } static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int width, height; int bits_per_raw_sample; // random_accessible_vol and video_object_type_indication have already // been read by the caller decode_vol_header() skip_bits(gb, 4); /* video_object_layer_verid */ ctx->shape = get_bits(gb, 2); /* video_object_layer_shape */ skip_bits(gb, 4); /* video_object_layer_shape_extension */ skip_bits1(gb); /* progressive_sequence */ if (ctx->shape != BIN_ONLY_SHAPE) { ctx->rgb = get_bits1(gb); /* rgb_components */ s->chroma_format = get_bits(gb, 2); /* chroma_format */ if (!s->chroma_format) { av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n"); return AVERROR_INVALIDDATA; } bits_per_raw_sample = get_bits(gb, 4); /* bit_depth */ if (bits_per_raw_sample == 10) { if (ctx->rgb) { s->avctx->pix_fmt = AV_PIX_FMT_GBRP10; } else { s->avctx->pix_fmt = s->chroma_format == CHROMA_422 ? AV_PIX_FMT_YUV422P10 : AV_PIX_FMT_YUV444P10; } } else { avpriv_request_sample(s->avctx, "MPEG-4 Studio profile bit-depth %u", bits_per_raw_sample); return AVERROR_PATCHWELCOME; } s->avctx->bits_per_raw_sample = bits_per_raw_sample; } if (ctx->shape == RECT_SHAPE) { check_marker(s->avctx, gb, "before video_object_layer_width"); width = get_bits(gb, 14); /* video_object_layer_width */ check_marker(s->avctx, gb, "before video_object_layer_height"); height = get_bits(gb, 14); /* video_object_layer_height */ check_marker(s->avctx, gb, "after video_object_layer_height"); /* Do the same check as non-studio profile */ if (width && height) { if (s->width && s->height && (s->width != width || s->height != height)) s->context_reinit = 1; s->width = width; s->height = height; } } s->aspect_ratio_info = get_bits(gb, 4); if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height } else { s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info]; } skip_bits(gb, 4); /* frame_rate_code */ skip_bits(gb, 15); /* first_half_bit_rate */ check_marker(s->avctx, gb, "after first_half_bit_rate"); skip_bits(gb, 15); /* latter_half_bit_rate */ check_marker(s->avctx, gb, "after latter_half_bit_rate"); skip_bits(gb, 15); /* first_half_vbv_buffer_size */ check_marker(s->avctx, gb, "after first_half_vbv_buffer_size"); skip_bits(gb, 3); /* latter_half_vbv_buffer_size */ skip_bits(gb, 11); /* first_half_vbv_buffer_size */ check_marker(s->avctx, gb, "after first_half_vbv_buffer_size"); skip_bits(gb, 15); /* latter_half_vbv_occupancy */ check_marker(s->avctx, gb, "after latter_half_vbv_occupancy"); s->low_delay = get_bits1(gb); s->mpeg_quant = get_bits1(gb); /* mpeg2_stream */ next_start_code_studio(gb); extension_and_user_data(s, gb, 2); return 0; } /** * Decode MPEG-4 headers. * @return <0 if no VOP found (or a damaged one) * FRAME_SKIPPED if a not coded VOP is found * 0 if a VOP is found */ int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; unsigned startcode, v; int ret; int vol = 0; /* search next start code */ align_get_bits(gb); // If we have not switched to studio profile than we also did not switch bps // that means something else (like a previous instance) outside set bps which // would be inconsistant with the currect state, thus reset it if (!s->studio_profile && s->avctx->bits_per_raw_sample != 8) s->avctx->bits_per_raw_sample = 0; if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) { skip_bits(gb, 24); if (get_bits(gb, 8) == 0xF0) goto end; } startcode = 0xff; for (;;) { if (get_bits_count(gb) >= gb->size_in_bits) { if (gb->size_in_bits == 8 && (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) { av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits); return FRAME_SKIPPED; // divx bug } else return AVERROR_INVALIDDATA; // end of stream } /* use the bits after the test */ v = get_bits(gb, 8); startcode = ((startcode << 8) | v) & 0xffffffff; if ((startcode & 0xFFFFFF00) != 0x100) continue; // no startcode if (s->avctx->debug & FF_DEBUG_STARTCODE) { av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode); if (startcode <= 0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start"); else if (startcode <= 0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start"); else if (startcode <= 0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode <= 0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start"); else if (startcode <= 0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode == 0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start"); else if (startcode == 0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End"); else if (startcode == 0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data"); else if (startcode == 0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start"); else if (startcode == 0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error"); else if (startcode == 0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start"); else if (startcode == 0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start"); else if (startcode == 0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start"); else if (startcode == 0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start"); else if (startcode == 0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start"); else if (startcode == 0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start"); else if (startcode == 0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); else if (startcode == 0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); else if (startcode == 0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); else if (startcode == 0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start"); else if (startcode == 0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start"); else if (startcode == 0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start"); else if (startcode == 0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start"); else if (startcode == 0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start"); else if (startcode == 0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); else if (startcode <= 0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); else if (startcode <= 0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb)); } if (startcode >= 0x120 && startcode <= 0x12F) { if (vol) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring multiple VOL headers\n"); continue; } vol++; if ((ret = decode_vol_header(ctx, gb)) < 0) return ret; } else if (startcode == USER_DATA_STARTCODE) { decode_user_data(ctx, gb); } else if (startcode == GOP_STARTCODE) { mpeg4_decode_gop_header(s, gb); } else if (startcode == VOS_STARTCODE) { int profile, level; mpeg4_decode_profile_level(s, gb, &profile, &level); if (profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO && (level > 0 && level < 9)) { s->studio_profile = 1; next_start_code_studio(gb); extension_and_user_data(s, gb, 0); } else if (s->studio_profile) { avpriv_request_sample(s->avctx, "Mixes studio and non studio profile\n"); return AVERROR_PATCHWELCOME; } s->avctx->profile = profile; s->avctx->level = level; } else if (startcode == VISUAL_OBJ_STARTCODE) { if (s->studio_profile) { if ((ret = decode_studiovisualobject(ctx, gb)) < 0) return ret; } else mpeg4_decode_visual_object(s, gb); } else if (startcode == VOP_STARTCODE) { break; } align_get_bits(gb); startcode = 0xff; } end: if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) s->low_delay = 1; s->avctx->has_b_frames = !s->low_delay; if (s->studio_profile) { if (!s->avctx->bits_per_raw_sample) { av_log(s->avctx, AV_LOG_ERROR, "Missing VOL header\n"); return AVERROR_INVALIDDATA; } return decode_studio_vop_header(ctx, gb); } else return decode_vop_header(ctx, gb); } av_cold void ff_mpeg4videodec_static_init(void) { static int done = 0; if (!done) { ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]); ff_rl_init(&ff_rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]); ff_rl_init(&ff_rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]); INIT_VLC_RL(ff_mpeg4_rl_intra, 554); INIT_VLC_RL(ff_rvlc_rl_inter, 1072); INIT_VLC_RL(ff_rvlc_rl_intra, 1072); INIT_VLC_STATIC(&dc_lum, DC_VLC_BITS, 10 /* 13 */, &ff_mpeg4_DCtab_lum[0][1], 2, 1, &ff_mpeg4_DCtab_lum[0][0], 2, 1, 512); INIT_VLC_STATIC(&dc_chrom, DC_VLC_BITS, 10 /* 13 */, &ff_mpeg4_DCtab_chrom[0][1], 2, 1, &ff_mpeg4_DCtab_chrom[0][0], 2, 1, 512); INIT_VLC_STATIC(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15, &ff_sprite_trajectory_tab[0][1], 4, 2, &ff_sprite_trajectory_tab[0][0], 4, 2, 128); INIT_VLC_STATIC(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4, &ff_mb_type_b_tab[0][1], 2, 1, &ff_mb_type_b_tab[0][0], 2, 1, 16); done = 1; } } int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext *s = &ctx->m; /* divx 5.01+ bitstream reorder stuff */ /* Since this clobbers the input buffer and hwaccel codecs still need the * data during hwaccel->end_frame we should not do this any earlier */ if (s->divx_packed) { int current_pos = s->gb.buffer == s->bitstream_buffer ? 0 : (get_bits_count(&s->gb) >> 3); int startcode_found = 0; if (buf_size - current_pos > 7) { int i; for (i = current_pos; i < buf_size - 4; i++) if (buf[i] == 0 && buf[i + 1] == 0 && buf[i + 2] == 1 && buf[i + 3] == 0xB6) { startcode_found = !(buf[i + 4] & 0x40); break; } } if (startcode_found) { if (!ctx->showed_packed_warning) { av_log(s->avctx, AV_LOG_INFO, "Video uses a non-standard and " "wasteful way to store B-frames ('packed B-frames'). " "Consider using the mpeg4_unpack_bframes bitstream filter without encoding but stream copy to fix it.\n"); ctx->showed_packed_warning = 1; } av_fast_padded_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, buf_size - current_pos); if (!s->bitstream_buffer) { s->bitstream_buffer_size = 0; return AVERROR(ENOMEM); } memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); s->bitstream_buffer_size = buf_size - current_pos; } } return 0; } #if HAVE_THREADS static int mpeg4_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { Mpeg4DecContext *s = dst->priv_data; const Mpeg4DecContext *s1 = src->priv_data; int init = s->m.context_initialized; int ret = ff_mpeg_update_thread_context(dst, src); if (ret < 0) return ret; memcpy(((uint8_t*)s) + sizeof(MpegEncContext), ((uint8_t*)s1) + sizeof(MpegEncContext), sizeof(Mpeg4DecContext) - sizeof(MpegEncContext)); if (CONFIG_MPEG4_DECODER && !init && s1->xvid_build >= 0) ff_xvid_idct_init(&s->m.idsp, dst); return 0; } #endif static av_cold int init_studio_vlcs(Mpeg4DecContext *ctx) { int i, ret; for (i = 0; i < 12; i++) { ret = init_vlc(&ctx->studio_intra_tab[i], STUDIO_INTRA_BITS, 22, &ff_mpeg4_studio_intra[i][0][1], 4, 2, &ff_mpeg4_studio_intra[i][0][0], 4, 2, 0); if (ret < 0) return ret; } ret = init_vlc(&ctx->studio_luma_dc, STUDIO_INTRA_BITS, 19, &ff_mpeg4_studio_dc_luma[0][1], 4, 2, &ff_mpeg4_studio_dc_luma[0][0], 4, 2, 0); if (ret < 0) return ret; ret = init_vlc(&ctx->studio_chroma_dc, STUDIO_INTRA_BITS, 19, &ff_mpeg4_studio_dc_chroma[0][1], 4, 2, &ff_mpeg4_studio_dc_chroma[0][0], 4, 2, 0); if (ret < 0) return ret; return 0; } static av_cold int decode_init(AVCodecContext *avctx) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext *s = &ctx->m; int ret; ctx->divx_version = ctx->divx_build = ctx->xvid_build = ctx->lavc_build = -1; if ((ret = ff_h263_decode_init(avctx)) < 0) return ret; ff_mpeg4videodec_static_init(); if ((ret = init_studio_vlcs(ctx)) < 0) return ret; s->h263_pred = 1; s->low_delay = 0; /* default, might be overridden in the vol header during header parsing */ s->decode_mb = mpeg4_decode_mb; ctx->time_increment_bits = 4; /* default value for broken headers */ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; avctx->internal->allocate_progress = 1; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { Mpeg4DecContext *ctx = avctx->priv_data; int i; if (!avctx->internal->is_copy) { for (i = 0; i < 12; i++) ff_free_vlc(&ctx->studio_intra_tab[i]); ff_free_vlc(&ctx->studio_luma_dc); ff_free_vlc(&ctx->studio_chroma_dc); } return ff_h263_decode_end(avctx); } static const AVOption mpeg4_options[] = { {"quarter_sample", "1/4 subpel MC", offsetof(MpegEncContext, quarter_sample), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0}, {"divx_packed", "divx style packed b frames", offsetof(MpegEncContext, divx_packed), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0}, {NULL} }; static const AVClass mpeg4_class = { .class_name = "MPEG4 Video Decoder", .item_name = av_default_item_name, .option = mpeg4_options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_mpeg4_decoder = { .name = "mpeg4", .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_MPEG4, .priv_data_size = sizeof(Mpeg4DecContext), .init = decode_init, .close = decode_end, .decode = ff_h263_decode_frame, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_FRAME_THREADS, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = ff_mpeg_flush, .max_lowres = 3, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420, .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg4_video_profiles), .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context), .priv_class = &mpeg4_class, .hw_configs = (const AVCodecHWConfigInternal*[]) { #if CONFIG_MPEG4_NVDEC_HWACCEL HWACCEL_NVDEC(mpeg4), #endif #if CONFIG_MPEG4_VAAPI_HWACCEL HWACCEL_VAAPI(mpeg4), #endif #if CONFIG_MPEG4_VDPAU_HWACCEL HWACCEL_VDPAU(mpeg4), #endif #if CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL HWACCEL_VIDEOTOOLBOX(mpeg4), #endif NULL }, };
./CrossVul/dataset_final_sorted/CWE-617/c/good_219_2
crossvul-cpp_data_good_2488_1
/* Copyright (c) 2001 Matej Pfajfar. * Copyright (c) 2001-2004, Roger Dingledine. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2016, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file connection_edge.c * \brief Handle edge streams. * * An edge_connection_t is a subtype of a connection_t, and represents two * critical concepts in Tor: a stream, and an edge connection. From the Tor * protocol's point of view, a stream is a bi-directional channel that is * multiplexed on a single circuit. Each stream on a circuit is identified * with a separate 16-bit stream ID, local to the (circuit,exit) pair. * Streams are created in response to client requests. * * An edge connection is one thing that can implement a stream: it is either a * TCP application socket that has arrived via (e.g.) a SOCKS request, or an * exit connection. * * Not every instance of edge_connection_t truly represents an edge connction, * however. (Sorry!) We also create edge_connection_t objects for streams that * we will not be handling with TCP. The types of these streams are: * <ul> * <li>DNS lookup streams, created on the client side in response to * a UDP DNS request received on a DNSPort, or a RESOLVE command * on a controller. * <li>DNS lookup streams, created on the exit side in response to * a RELAY_RESOLVE cell from a client. * <li>Tunneled directory streams, created on the directory cache side * in response to a RELAY_BEGINDIR cell. These streams attach directly * to a dir_connection_t object without ever using TCP. * </ul> * * This module handles general-purpose functionality having to do with * edge_connection_t. On the client side, it accepts various types of * application requests on SocksPorts, TransPorts, and NATDPorts, and * creates streams appropriately. * * This module is also responsible for implementing stream isolation: * ensuring that streams that should not be linkable to one another are * kept to different circuits. * * On the exit side, this module handles the various stream-creating * type of RELAY cells by launching appropriate outgoing connections, * DNS requests, or directory connection objects. * * And for all edge connections, this module is responsible for handling * incoming and outdoing data as it arrives or leaves in the relay.c * module. (Outgoing data will be packaged in * connection_edge_process_inbuf() as it calls * connection_edge_package_raw_inbuf(); incoming data from RELAY_DATA * cells is applied in connection_edge_process_relay_cell().) **/ #define CONNECTION_EDGE_PRIVATE #include "or.h" #include "backtrace.h" #include "addressmap.h" #include "buffers.h" #include "channel.h" #include "circpathbias.h" #include "circuitlist.h" #include "circuituse.h" #include "config.h" #include "connection.h" #include "connection_edge.h" #include "connection_or.h" #include "control.h" #include "dns.h" #include "dnsserv.h" #include "directory.h" #include "dirserv.h" #include "hibernate.h" #include "hs_common.h" #include "main.h" #include "nodelist.h" #include "policies.h" #include "reasons.h" #include "relay.h" #include "rendclient.h" #include "rendcommon.h" #include "rendservice.h" #include "rephist.h" #include "router.h" #include "routerlist.h" #include "routerset.h" #include "circuitbuild.h" #ifdef HAVE_LINUX_TYPES_H #include <linux/types.h> #endif #ifdef HAVE_LINUX_NETFILTER_IPV4_H #include <linux/netfilter_ipv4.h> #define TRANS_NETFILTER #define TRANS_NETFILTER_IPV4 #endif #ifdef HAVE_LINUX_IF_H #include <linux/if.h> #endif #ifdef HAVE_LINUX_NETFILTER_IPV6_IP6_TABLES_H #include <linux/netfilter_ipv6/ip6_tables.h> #if defined(IP6T_SO_ORIGINAL_DST) #define TRANS_NETFILTER #define TRANS_NETFILTER_IPV6 #endif #endif #if defined(HAVE_NET_IF_H) && defined(HAVE_NET_PFVAR_H) #include <net/if.h> #include <net/pfvar.h> #define TRANS_PF #endif #ifdef IP_TRANSPARENT #define TRANS_TPROXY #endif #define SOCKS4_GRANTED 90 #define SOCKS4_REJECT 91 static int connection_ap_handshake_process_socks(entry_connection_t *conn); static int connection_ap_process_natd(entry_connection_t *conn); static int connection_exit_connect_dir(edge_connection_t *exitconn); static int consider_plaintext_ports(entry_connection_t *conn, uint16_t port); static int connection_ap_supports_optimistic_data(const entry_connection_t *); /** An AP stream has failed/finished. If it hasn't already sent back * a socks reply, send one now (based on endreason). Also set * has_sent_end to 1, and mark the conn. */ MOCK_IMPL(void, connection_mark_unattached_ap_,(entry_connection_t *conn, int endreason, int line, const char *file)) { connection_t *base_conn = ENTRY_TO_CONN(conn); edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn); tor_assert(base_conn->type == CONN_TYPE_AP); ENTRY_TO_EDGE_CONN(conn)->edge_has_sent_end = 1; /* no circ yet */ /* If this is a rendezvous stream and it is failing without ever * being attached to a circuit, assume that an attempt to connect to * the destination hidden service has just ended. * * XXXX This condition doesn't limit to only streams failing * without ever being attached. That sloppiness should be harmless, * but we should fix it someday anyway. */ if ((edge_conn->on_circuit != NULL || edge_conn->edge_has_sent_end) && connection_edge_is_rendezvous_stream(edge_conn)) { rend_client_note_connection_attempt_ended(edge_conn->rend_data); } if (base_conn->marked_for_close) { /* This call will warn as appropriate. */ connection_mark_for_close_(base_conn, line, file); return; } if (!conn->socks_request->has_finished) { if (endreason & END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED) log_warn(LD_BUG, "stream (marked at %s:%d) sending two socks replies?", file, line); if (SOCKS_COMMAND_IS_CONNECT(conn->socks_request->command)) connection_ap_handshake_socks_reply(conn, NULL, 0, endreason); else if (SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR_TRANSIENT, 0, NULL, -1, -1); else /* unknown or no handshake at all. send no response. */ conn->socks_request->has_finished = 1; } connection_mark_and_flush_(base_conn, line, file); ENTRY_TO_EDGE_CONN(conn)->end_reason = endreason; } /** There was an EOF. Send an end and mark the connection for close. */ int connection_edge_reached_eof(edge_connection_t *conn) { if (connection_get_inbuf_len(TO_CONN(conn)) && connection_state_is_open(TO_CONN(conn))) { /* it still has stuff to process. don't let it die yet. */ return 0; } log_info(LD_EDGE,"conn (fd "TOR_SOCKET_T_FORMAT") reached eof. Closing.", conn->base_.s); if (!conn->base_.marked_for_close) { /* only mark it if not already marked. it's possible to * get the 'end' right around when the client hangs up on us. */ connection_edge_end(conn, END_STREAM_REASON_DONE); if (conn->base_.type == CONN_TYPE_AP) { /* eof, so don't send a socks reply back */ if (EDGE_TO_ENTRY_CONN(conn)->socks_request) EDGE_TO_ENTRY_CONN(conn)->socks_request->has_finished = 1; } connection_mark_for_close(TO_CONN(conn)); } return 0; } /** Handle new bytes on conn->inbuf based on state: * - If it's waiting for socks info, try to read another step of the * socks handshake out of conn->inbuf. * - If it's waiting for the original destination, fetch it. * - If it's open, then package more relay cells from the stream. * - Else, leave the bytes on inbuf alone for now. * * Mark and return -1 if there was an unexpected error with the conn, * else return 0. */ int connection_edge_process_inbuf(edge_connection_t *conn, int package_partial) { tor_assert(conn); switch (conn->base_.state) { case AP_CONN_STATE_SOCKS_WAIT: if (connection_ap_handshake_process_socks(EDGE_TO_ENTRY_CONN(conn)) <0) { /* already marked */ return -1; } return 0; case AP_CONN_STATE_NATD_WAIT: if (connection_ap_process_natd(EDGE_TO_ENTRY_CONN(conn)) < 0) { /* already marked */ return -1; } return 0; case AP_CONN_STATE_OPEN: case EXIT_CONN_STATE_OPEN: if (connection_edge_package_raw_inbuf(conn, package_partial, NULL) < 0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return -1; } return 0; case AP_CONN_STATE_CONNECT_WAIT: if (connection_ap_supports_optimistic_data(EDGE_TO_ENTRY_CONN(conn))) { log_info(LD_EDGE, "data from edge while in '%s' state. Sending it anyway. " "package_partial=%d, buflen=%ld", conn_state_to_string(conn->base_.type, conn->base_.state), package_partial, (long)connection_get_inbuf_len(TO_CONN(conn))); if (connection_edge_package_raw_inbuf(conn, package_partial, NULL)<0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return -1; } return 0; } /* Fall through if the connection is on a circuit without optimistic * data support. */ case EXIT_CONN_STATE_CONNECTING: case AP_CONN_STATE_RENDDESC_WAIT: case AP_CONN_STATE_CIRCUIT_WAIT: case AP_CONN_STATE_RESOLVE_WAIT: case AP_CONN_STATE_CONTROLLER_WAIT: log_info(LD_EDGE, "data from edge while in '%s' state. Leaving it on buffer.", conn_state_to_string(conn->base_.type, conn->base_.state)); return 0; } log_warn(LD_BUG,"Got unexpected state %d. Closing.",conn->base_.state); tor_fragile_assert(); connection_edge_end(conn, END_STREAM_REASON_INTERNAL); connection_mark_for_close(TO_CONN(conn)); return -1; } /** This edge needs to be closed, because its circuit has closed. * Mark it for close and return 0. */ int connection_edge_destroy(circid_t circ_id, edge_connection_t *conn) { if (!conn->base_.marked_for_close) { log_info(LD_EDGE, "CircID %u: At an edge. Marking connection for close.", (unsigned) circ_id); if (conn->base_.type == CONN_TYPE_AP) { entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_DESTROY); control_event_stream_bandwidth(conn); control_event_stream_status(entry_conn, STREAM_EVENT_CLOSED, END_STREAM_REASON_DESTROY); conn->end_reason |= END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED; } else { /* closing the circuit, nothing to send an END to */ conn->edge_has_sent_end = 1; conn->end_reason = END_STREAM_REASON_DESTROY; conn->end_reason |= END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED; connection_mark_and_flush(TO_CONN(conn)); } } conn->cpath_layer = NULL; conn->on_circuit = NULL; return 0; } /** Send a raw end cell to the stream with ID <b>stream_id</b> out over the * <b>circ</b> towards the hop identified with <b>cpath_layer</b>. If this * is not a client connection, set the relay end cell's reason for closing * as <b>reason</b> */ static int relay_send_end_cell_from_edge(streamid_t stream_id, circuit_t *circ, uint8_t reason, crypt_path_t *cpath_layer) { char payload[1]; if (CIRCUIT_PURPOSE_IS_CLIENT(circ->purpose)) { /* Never send the server an informative reason code; it doesn't need to * know why the client stream is failing. */ reason = END_STREAM_REASON_MISC; } payload[0] = (char) reason; return relay_send_command_from_edge(stream_id, circ, RELAY_COMMAND_END, payload, 1, cpath_layer); } /* If the connection <b>conn</b> is attempting to connect to an external * destination that is an hidden service and the reason is a connection * refused or timeout, log it so the operator can take appropriate actions. * The log statement is a rate limited warning. */ static void warn_if_hs_unreachable(const edge_connection_t *conn, uint8_t reason) { tor_assert(conn); if (conn->base_.type == CONN_TYPE_EXIT && connection_edge_is_rendezvous_stream(conn) && (reason == END_STREAM_REASON_CONNECTREFUSED || reason == END_STREAM_REASON_TIMEOUT)) { #define WARN_FAILED_HS_CONNECTION 300 static ratelim_t warn_limit = RATELIM_INIT(WARN_FAILED_HS_CONNECTION); char *m; if ((m = rate_limit_log(&warn_limit, approx_time()))) { log_warn(LD_EDGE, "Onion service connection to %s failed (%s)", (conn->base_.socket_family == AF_UNIX) ? safe_str(conn->base_.address) : safe_str(fmt_addrport(&conn->base_.addr, conn->base_.port)), stream_end_reason_to_string(reason)); tor_free(m); } } } /** Send a relay end cell from stream <b>conn</b> down conn's circuit, and * remember that we've done so. If this is not a client connection, set the * relay end cell's reason for closing as <b>reason</b>. * * Return -1 if this function has already been called on this conn, * else return 0. */ int connection_edge_end(edge_connection_t *conn, uint8_t reason) { char payload[RELAY_PAYLOAD_SIZE]; size_t payload_len=1; circuit_t *circ; uint8_t control_reason = reason; if (conn->edge_has_sent_end) { log_warn(LD_BUG,"(Harmless.) Calling connection_edge_end (reason %d) " "on an already ended stream?", reason); tor_fragile_assert(); return -1; } if (conn->base_.marked_for_close) { log_warn(LD_BUG, "called on conn that's already marked for close at %s:%d.", conn->base_.marked_for_close_file, conn->base_.marked_for_close); return 0; } circ = circuit_get_by_edge_conn(conn); if (circ && CIRCUIT_PURPOSE_IS_CLIENT(circ->purpose)) { /* If this is a client circuit, don't send the server an informative * reason code; it doesn't need to know why the client stream is * failing. */ reason = END_STREAM_REASON_MISC; } payload[0] = (char)reason; if (reason == END_STREAM_REASON_EXITPOLICY && !connection_edge_is_rendezvous_stream(conn)) { int addrlen; if (tor_addr_family(&conn->base_.addr) == AF_INET) { set_uint32(payload+1, tor_addr_to_ipv4n(&conn->base_.addr)); addrlen = 4; } else { memcpy(payload+1, tor_addr_to_in6_addr8(&conn->base_.addr), 16); addrlen = 16; } set_uint32(payload+1+addrlen, htonl(dns_clip_ttl(conn->address_ttl))); payload_len += 4+addrlen; } if (circ && !circ->marked_for_close) { log_debug(LD_EDGE,"Sending end on conn (fd "TOR_SOCKET_T_FORMAT").", conn->base_.s); connection_edge_send_command(conn, RELAY_COMMAND_END, payload, payload_len); /* We'll log warn if the connection was an hidden service and couldn't be * made because the service wasn't available. */ warn_if_hs_unreachable(conn, control_reason); } else { log_debug(LD_EDGE,"No circ to send end on conn " "(fd "TOR_SOCKET_T_FORMAT").", conn->base_.s); } conn->edge_has_sent_end = 1; conn->end_reason = control_reason; return 0; } /** An error has just occurred on an operation on an edge connection * <b>conn</b>. Extract the errno; convert it to an end reason, and send an * appropriate relay end cell to the other end of the connection's circuit. **/ int connection_edge_end_errno(edge_connection_t *conn) { uint8_t reason; tor_assert(conn); reason = errno_to_stream_end_reason(tor_socket_errno(conn->base_.s)); return connection_edge_end(conn, reason); } /** We just wrote some data to <b>conn</b>; act appropriately. * * (That is, if it's open, consider sending a stream-level sendme cell if we * have just flushed enough.) */ int connection_edge_flushed_some(edge_connection_t *conn) { switch (conn->base_.state) { case AP_CONN_STATE_OPEN: case EXIT_CONN_STATE_OPEN: connection_edge_consider_sending_sendme(conn); break; } return 0; } /** Connection <b>conn</b> has finished writing and has no bytes left on * its outbuf. * * If it's in state 'open', stop writing, consider responding with a * sendme, and return. * Otherwise, stop writing and return. * * If <b>conn</b> is broken, mark it for close and return -1, else * return 0. */ int connection_edge_finished_flushing(edge_connection_t *conn) { tor_assert(conn); switch (conn->base_.state) { case AP_CONN_STATE_OPEN: case EXIT_CONN_STATE_OPEN: connection_edge_consider_sending_sendme(conn); return 0; case AP_CONN_STATE_SOCKS_WAIT: case AP_CONN_STATE_NATD_WAIT: case AP_CONN_STATE_RENDDESC_WAIT: case AP_CONN_STATE_CIRCUIT_WAIT: case AP_CONN_STATE_CONNECT_WAIT: case AP_CONN_STATE_CONTROLLER_WAIT: case AP_CONN_STATE_RESOLVE_WAIT: return 0; default: log_warn(LD_BUG, "Called in unexpected state %d.",conn->base_.state); tor_fragile_assert(); return -1; } return 0; } /** Longest size for the relay payload of a RELAY_CONNECTED cell that we're * able to generate. */ /* 4 zero bytes; 1 type byte; 16 byte IPv6 address; 4 byte TTL. */ #define MAX_CONNECTED_CELL_PAYLOAD_LEN 25 /** Set the buffer at <b>payload_out</b> -- which must have at least * MAX_CONNECTED_CELL_PAYLOAD_LEN bytes available -- to the body of a * RELAY_CONNECTED cell indicating that we have connected to <b>addr</b>, and * that the name resolution that led us to <b>addr</b> will be valid for * <b>ttl</b> seconds. Return -1 on error, or the number of bytes used on * success. */ STATIC int connected_cell_format_payload(uint8_t *payload_out, const tor_addr_t *addr, uint32_t ttl) { const sa_family_t family = tor_addr_family(addr); int connected_payload_len; /* should be needless */ memset(payload_out, 0, MAX_CONNECTED_CELL_PAYLOAD_LEN); if (family == AF_INET) { set_uint32(payload_out, tor_addr_to_ipv4n(addr)); connected_payload_len = 4; } else if (family == AF_INET6) { set_uint32(payload_out, 0); set_uint8(payload_out + 4, 6); memcpy(payload_out + 5, tor_addr_to_in6_addr8(addr), 16); connected_payload_len = 21; } else { return -1; } set_uint32(payload_out + connected_payload_len, htonl(dns_clip_ttl(ttl))); connected_payload_len += 4; tor_assert(connected_payload_len <= MAX_CONNECTED_CELL_PAYLOAD_LEN); return connected_payload_len; } /** Connected handler for exit connections: start writing pending * data, deliver 'CONNECTED' relay cells as appropriate, and check * any pending data that may have been received. */ int connection_edge_finished_connecting(edge_connection_t *edge_conn) { connection_t *conn; tor_assert(edge_conn); tor_assert(edge_conn->base_.type == CONN_TYPE_EXIT); conn = TO_CONN(edge_conn); tor_assert(conn->state == EXIT_CONN_STATE_CONNECTING); log_info(LD_EXIT,"Exit connection to %s:%u (%s) established.", escaped_safe_str(conn->address), conn->port, safe_str(fmt_and_decorate_addr(&conn->addr))); rep_hist_note_exit_stream_opened(conn->port); conn->state = EXIT_CONN_STATE_OPEN; connection_watch_events(conn, READ_EVENT); /* stop writing, keep reading */ if (connection_get_outbuf_len(conn)) /* in case there are any queued relay * cells */ connection_start_writing(conn); /* deliver a 'connected' relay cell back through the circuit. */ if (connection_edge_is_rendezvous_stream(edge_conn)) { if (connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, NULL, 0) < 0) return 0; /* circuit is closed, don't continue */ } else { uint8_t connected_payload[MAX_CONNECTED_CELL_PAYLOAD_LEN]; int connected_payload_len = connected_cell_format_payload(connected_payload, &conn->addr, edge_conn->address_ttl); if (connected_payload_len < 0) return -1; if (connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, (char*)connected_payload, connected_payload_len) < 0) return 0; /* circuit is closed, don't continue */ } tor_assert(edge_conn->package_window > 0); /* in case the server has written anything */ return connection_edge_process_inbuf(edge_conn, 1); } /** A list of all the entry_connection_t * objects that are not marked * for close, and are in AP_CONN_STATE_CIRCUIT_WAIT. * * (Right now, we check in several places to make sure that this list is * correct. When it's incorrect, we'll fix it, and log a BUG message.) */ static smartlist_t *pending_entry_connections = NULL; static int untried_pending_connections = 0; /** Common code to connection_(ap|exit)_about_to_close. */ static void connection_edge_about_to_close(edge_connection_t *edge_conn) { if (!edge_conn->edge_has_sent_end) { connection_t *conn = TO_CONN(edge_conn); log_warn(LD_BUG, "(Harmless.) Edge connection (marked at %s:%d) " "hasn't sent end yet?", conn->marked_for_close_file, conn->marked_for_close); tor_fragile_assert(); } } /** Called when we're about to finally unlink and free an AP (client) * connection: perform necessary accounting and cleanup */ void connection_ap_about_to_close(entry_connection_t *entry_conn) { circuit_t *circ; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(entry_conn); connection_t *conn = ENTRY_TO_CONN(entry_conn); connection_edge_about_to_close(edge_conn); if (entry_conn->socks_request->has_finished == 0) { /* since conn gets removed right after this function finishes, * there's no point trying to send back a reply at this point. */ log_warn(LD_BUG,"Closing stream (marked at %s:%d) without sending" " back a socks reply.", conn->marked_for_close_file, conn->marked_for_close); } if (!edge_conn->end_reason) { log_warn(LD_BUG,"Closing stream (marked at %s:%d) without having" " set end_reason.", conn->marked_for_close_file, conn->marked_for_close); } if (entry_conn->dns_server_request) { log_warn(LD_BUG,"Closing stream (marked at %s:%d) without having" " replied to DNS request.", conn->marked_for_close_file, conn->marked_for_close); dnsserv_reject_request(entry_conn); } if (TO_CONN(edge_conn)->state == AP_CONN_STATE_CIRCUIT_WAIT) { smartlist_remove(pending_entry_connections, entry_conn); } #if 1 /* Check to make sure that this isn't in pending_entry_connections if it * didn't actually belong there. */ if (TO_CONN(edge_conn)->type == CONN_TYPE_AP) { connection_ap_warn_and_unmark_if_pending_circ(entry_conn, "about_to_close"); } #endif control_event_stream_bandwidth(edge_conn); control_event_stream_status(entry_conn, STREAM_EVENT_CLOSED, edge_conn->end_reason); circ = circuit_get_by_edge_conn(edge_conn); if (circ) circuit_detach_stream(circ, edge_conn); } /** Called when we're about to finally unlink and free an exit * connection: perform necessary accounting and cleanup */ void connection_exit_about_to_close(edge_connection_t *edge_conn) { circuit_t *circ; connection_t *conn = TO_CONN(edge_conn); connection_edge_about_to_close(edge_conn); circ = circuit_get_by_edge_conn(edge_conn); if (circ) circuit_detach_stream(circ, edge_conn); if (conn->state == EXIT_CONN_STATE_RESOLVING) { connection_dns_remove(edge_conn); } } /** Define a schedule for how long to wait between retrying * application connections. Rather than waiting a fixed amount of * time between each retry, we wait 10 seconds each for the first * two tries, and 15 seconds for each retry after * that. Hopefully this will improve the expected user experience. */ static int compute_retry_timeout(entry_connection_t *conn) { int timeout = get_options()->CircuitStreamTimeout; if (timeout) /* if our config options override the default, use them */ return timeout; if (conn->num_socks_retries < 2) /* try 0 and try 1 */ return 10; return 15; } /** Find all general-purpose AP streams waiting for a response that sent their * begin/resolve cell too long ago. Detach from their current circuit, and * mark their current circuit as unsuitable for new streams. Then call * connection_ap_handshake_attach_circuit() to attach to a new circuit (if * available) or launch a new one. * * For rendezvous streams, simply give up after SocksTimeout seconds (with no * retry attempt). */ void connection_ap_expire_beginning(void) { edge_connection_t *conn; entry_connection_t *entry_conn; circuit_t *circ; time_t now = time(NULL); const or_options_t *options = get_options(); int severity; int cutoff; int seconds_idle, seconds_since_born; smartlist_t *conns = get_connection_array(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, base_conn) { if (base_conn->type != CONN_TYPE_AP || base_conn->marked_for_close) continue; entry_conn = TO_ENTRY_CONN(base_conn); conn = ENTRY_TO_EDGE_CONN(entry_conn); /* if it's an internal linked connection, don't yell its status. */ severity = (tor_addr_is_null(&base_conn->addr) && !base_conn->port) ? LOG_INFO : LOG_NOTICE; seconds_idle = (int)( now - base_conn->timestamp_lastread ); seconds_since_born = (int)( now - base_conn->timestamp_created ); if (base_conn->state == AP_CONN_STATE_OPEN) continue; /* We already consider SocksTimeout in * connection_ap_handshake_attach_circuit(), but we need to consider * it here too because controllers that put streams in controller_wait * state never ask Tor to attach the circuit. */ if (AP_CONN_STATE_IS_UNATTACHED(base_conn->state)) { if (seconds_since_born >= options->SocksTimeout) { log_fn(severity, LD_APP, "Tried for %d seconds to get a connection to %s:%d. " "Giving up. (%s)", seconds_since_born, safe_str_client(entry_conn->socks_request->address), entry_conn->socks_request->port, conn_state_to_string(CONN_TYPE_AP, base_conn->state)); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); } continue; } /* We're in state connect_wait or resolve_wait now -- waiting for a * reply to our relay cell. See if we want to retry/give up. */ cutoff = compute_retry_timeout(entry_conn); if (seconds_idle < cutoff) continue; circ = circuit_get_by_edge_conn(conn); if (!circ) { /* it's vanished? */ log_info(LD_APP,"Conn is waiting (address %s), but lost its circ.", safe_str_client(entry_conn->socks_request->address)); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); continue; } if (circ->purpose == CIRCUIT_PURPOSE_C_REND_JOINED) { if (seconds_idle >= options->SocksTimeout) { log_fn(severity, LD_REND, "Rend stream is %d seconds late. Giving up on address" " '%s.onion'.", seconds_idle, safe_str_client(entry_conn->socks_request->address)); /* Roll back path bias use state so that we probe the circuit * if nothing else succeeds on it */ pathbias_mark_use_rollback(TO_ORIGIN_CIRCUIT(circ)); connection_edge_end(conn, END_STREAM_REASON_TIMEOUT); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); } continue; } if (circ->purpose != CIRCUIT_PURPOSE_C_GENERAL && circ->purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT && circ->purpose != CIRCUIT_PURPOSE_PATH_BIAS_TESTING) { log_warn(LD_BUG, "circuit->purpose == CIRCUIT_PURPOSE_C_GENERAL failed. " "The purpose on the circuit was %s; it was in state %s, " "path_state %s.", circuit_purpose_to_string(circ->purpose), circuit_state_to_string(circ->state), CIRCUIT_IS_ORIGIN(circ) ? pathbias_state_to_string(TO_ORIGIN_CIRCUIT(circ)->path_state) : "none"); } log_fn(cutoff < 15 ? LOG_INFO : severity, LD_APP, "We tried for %d seconds to connect to '%s' using exit %s." " Retrying on a new circuit.", seconds_idle, safe_str_client(entry_conn->socks_request->address), conn->cpath_layer ? extend_info_describe(conn->cpath_layer->extend_info): "*unnamed*"); /* send an end down the circuit */ connection_edge_end(conn, END_STREAM_REASON_TIMEOUT); /* un-mark it as ending, since we're going to reuse it */ conn->edge_has_sent_end = 0; conn->end_reason = 0; /* make us not try this circuit again, but allow * current streams on it to survive if they can */ mark_circuit_unusable_for_new_conns(TO_ORIGIN_CIRCUIT(circ)); /* give our stream another 'cutoff' seconds to try */ conn->base_.timestamp_lastread += cutoff; if (entry_conn->num_socks_retries < 250) /* avoid overflow */ entry_conn->num_socks_retries++; /* move it back into 'pending' state, and try to attach. */ if (connection_ap_detach_retriable(entry_conn, TO_ORIGIN_CIRCUIT(circ), END_STREAM_REASON_TIMEOUT)<0) { if (!base_conn->marked_for_close) connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_CANT_ATTACH); } } SMARTLIST_FOREACH_END(base_conn); } /** * As connection_ap_attach_pending, but first scans the entire connection * array to see if any elements are missing. */ void connection_ap_rescan_and_attach_pending(void) { entry_connection_t *entry_conn; smartlist_t *conns = get_connection_array(); if (PREDICT_UNLIKELY(NULL == pending_entry_connections)) pending_entry_connections = smartlist_new(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) { if (conn->marked_for_close || conn->type != CONN_TYPE_AP || conn->state != AP_CONN_STATE_CIRCUIT_WAIT) continue; entry_conn = TO_ENTRY_CONN(conn); tor_assert(entry_conn); if (! smartlist_contains(pending_entry_connections, entry_conn)) { log_warn(LD_BUG, "Found a connection %p that was supposed to be " "in pending_entry_connections, but wasn't. No worries; " "adding it.", pending_entry_connections); untried_pending_connections = 1; connection_ap_mark_as_pending_circuit(entry_conn); } } SMARTLIST_FOREACH_END(conn); connection_ap_attach_pending(1); } #ifdef DEBUGGING_17659 #define UNMARK() do { \ entry_conn->marked_pending_circ_line = 0; \ entry_conn->marked_pending_circ_file = 0; \ } while (0) #else #define UNMARK() do { } while (0) #endif /** Tell any AP streams that are listed as waiting for a new circuit to try * again. If there is an available circuit for a stream, attach it. Otherwise, * launch a new circuit. * * If <b>retry</b> is false, only check the list if it contains at least one * streams that we have not yet tried to attach to a circuit. */ void connection_ap_attach_pending(int retry) { if (PREDICT_UNLIKELY(!pending_entry_connections)) { return; } if (untried_pending_connections == 0 && !retry) return; /* Don't allow any modifications to list while we are iterating over * it. We'll put streams back on this list if we can't attach them * immediately. */ smartlist_t *pending = pending_entry_connections; pending_entry_connections = smartlist_new(); SMARTLIST_FOREACH_BEGIN(pending, entry_connection_t *, entry_conn) { connection_t *conn = ENTRY_TO_CONN(entry_conn); tor_assert(conn && entry_conn); if (conn->marked_for_close) { UNMARK(); continue; } if (conn->magic != ENTRY_CONNECTION_MAGIC) { log_warn(LD_BUG, "%p has impossible magic value %u.", entry_conn, (unsigned)conn->magic); UNMARK(); continue; } if (conn->state != AP_CONN_STATE_CIRCUIT_WAIT) { log_warn(LD_BUG, "%p is no longer in circuit_wait. Its current state " "is %s. Why is it on pending_entry_connections?", entry_conn, conn_state_to_string(conn->type, conn->state)); UNMARK(); continue; } /* Okay, we're through the sanity checks. Try to handle this stream. */ if (connection_ap_handshake_attach_circuit(entry_conn) < 0) { if (!conn->marked_for_close) connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_CANT_ATTACH); } if (! conn->marked_for_close && conn->type == CONN_TYPE_AP && conn->state == AP_CONN_STATE_CIRCUIT_WAIT) { /* Is it still waiting for a circuit? If so, we didn't attach it, * so it's still pending. Put it back on the list. */ if (!smartlist_contains(pending_entry_connections, entry_conn)) { smartlist_add(pending_entry_connections, entry_conn); continue; } } /* If we got here, then we either closed the connection, or * we attached it. */ UNMARK(); } SMARTLIST_FOREACH_END(entry_conn); smartlist_free(pending); untried_pending_connections = 0; } /** Mark <b>entry_conn</b> as needing to get attached to a circuit. * * And <b>entry_conn</b> must be in AP_CONN_STATE_CIRCUIT_WAIT, * should not already be pending a circuit. The circuit will get * launched or the connection will get attached the next time we * call connection_ap_attach_pending(). */ void connection_ap_mark_as_pending_circuit_(entry_connection_t *entry_conn, const char *fname, int lineno) { connection_t *conn = ENTRY_TO_CONN(entry_conn); tor_assert(conn->state == AP_CONN_STATE_CIRCUIT_WAIT); tor_assert(conn->magic == ENTRY_CONNECTION_MAGIC); if (conn->marked_for_close) return; if (PREDICT_UNLIKELY(NULL == pending_entry_connections)) pending_entry_connections = smartlist_new(); if (PREDICT_UNLIKELY(smartlist_contains(pending_entry_connections, entry_conn))) { log_warn(LD_BUG, "What?? pending_entry_connections already contains %p! " "(Called from %s:%d.)", entry_conn, fname, lineno); #ifdef DEBUGGING_17659 const char *f2 = entry_conn->marked_pending_circ_file; log_warn(LD_BUG, "(Previously called from %s:%d.)\n", f2 ? f2 : "<NULL>", entry_conn->marked_pending_circ_line); #endif log_backtrace(LOG_WARN, LD_BUG, "To debug, this may help"); return; } #ifdef DEBUGGING_17659 entry_conn->marked_pending_circ_line = (uint16_t) lineno; entry_conn->marked_pending_circ_file = fname; #endif untried_pending_connections = 1; smartlist_add(pending_entry_connections, entry_conn); /* Work-around for bug 19969: we handle pending_entry_connections at * the end of run_main_loop_once(), but in many cases that function will * take a very long time, if ever, to finish its call to event_base_loop(). * * So the fix is to tell it right now that it ought to finish its loop at * its next available opportunity. */ tell_event_loop_to_finish(); } /** Mark <b>entry_conn</b> as no longer waiting for a circuit. */ void connection_ap_mark_as_non_pending_circuit(entry_connection_t *entry_conn) { if (PREDICT_UNLIKELY(NULL == pending_entry_connections)) return; UNMARK(); smartlist_remove(pending_entry_connections, entry_conn); } /* DOCDOC */ void connection_ap_warn_and_unmark_if_pending_circ(entry_connection_t *entry_conn, const char *where) { if (pending_entry_connections && smartlist_contains(pending_entry_connections, entry_conn)) { log_warn(LD_BUG, "What was %p doing in pending_entry_connections in %s?", entry_conn, where); connection_ap_mark_as_non_pending_circuit(entry_conn); } } /** Tell any AP streams that are waiting for a one-hop tunnel to * <b>failed_digest</b> that they are going to fail. */ /* XXXX We should get rid of this function, and instead attach * one-hop streams to circ->p_streams so they get marked in * circuit_mark_for_close like normal p_streams. */ void connection_ap_fail_onehop(const char *failed_digest, cpath_build_state_t *build_state) { entry_connection_t *entry_conn; char digest[DIGEST_LEN]; smartlist_t *conns = get_connection_array(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) { if (conn->marked_for_close || conn->type != CONN_TYPE_AP || conn->state != AP_CONN_STATE_CIRCUIT_WAIT) continue; entry_conn = TO_ENTRY_CONN(conn); if (!entry_conn->want_onehop) continue; if (hexdigest_to_digest(entry_conn->chosen_exit_name, digest) < 0 || tor_memneq(digest, failed_digest, DIGEST_LEN)) continue; if (tor_digest_is_zero(digest)) { /* we don't know the digest; have to compare addr:port */ tor_addr_t addr; if (!build_state || !build_state->chosen_exit || !entry_conn->socks_request) { continue; } if (tor_addr_parse(&addr, entry_conn->socks_request->address)<0 || !tor_addr_eq(&build_state->chosen_exit->addr, &addr) || build_state->chosen_exit->port != entry_conn->socks_request->port) continue; } log_info(LD_APP, "Closing one-hop stream to '%s/%s' because the OR conn " "just failed.", entry_conn->chosen_exit_name, entry_conn->socks_request->address); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); } SMARTLIST_FOREACH_END(conn); } /** A circuit failed to finish on its last hop <b>info</b>. If there * are any streams waiting with this exit node in mind, but they * don't absolutely require it, make them give up on it. */ void circuit_discard_optional_exit_enclaves(extend_info_t *info) { entry_connection_t *entry_conn; const node_t *r1, *r2; smartlist_t *conns = get_connection_array(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) { if (conn->marked_for_close || conn->type != CONN_TYPE_AP || conn->state != AP_CONN_STATE_CIRCUIT_WAIT) continue; entry_conn = TO_ENTRY_CONN(conn); if (!entry_conn->chosen_exit_optional && !entry_conn->chosen_exit_retries) continue; r1 = node_get_by_nickname(entry_conn->chosen_exit_name, 0); r2 = node_get_by_id(info->identity_digest); if (!r1 || !r2 || r1 != r2) continue; tor_assert(entry_conn->socks_request); if (entry_conn->chosen_exit_optional) { log_info(LD_APP, "Giving up on enclave exit '%s' for destination %s.", safe_str_client(entry_conn->chosen_exit_name), escaped_safe_str_client(entry_conn->socks_request->address)); entry_conn->chosen_exit_optional = 0; tor_free(entry_conn->chosen_exit_name); /* clears it */ /* if this port is dangerous, warn or reject it now that we don't * think it'll be using an enclave. */ consider_plaintext_ports(entry_conn, entry_conn->socks_request->port); } if (entry_conn->chosen_exit_retries) { if (--entry_conn->chosen_exit_retries == 0) { /* give up! */ clear_trackexithost_mappings(entry_conn->chosen_exit_name); tor_free(entry_conn->chosen_exit_name); /* clears it */ /* if this port is dangerous, warn or reject it now that we don't * think it'll be using an enclave. */ consider_plaintext_ports(entry_conn, entry_conn->socks_request->port); } } } SMARTLIST_FOREACH_END(conn); } /** The AP connection <b>conn</b> has just failed while attaching or * sending a BEGIN or resolving on <b>circ</b>, but another circuit * might work. Detach the circuit, and either reattach it, launch a * new circuit, tell the controller, or give up as appropriate. * * Returns -1 on err, 1 on success, 0 on not-yet-sure. */ int connection_ap_detach_retriable(entry_connection_t *conn, origin_circuit_t *circ, int reason) { control_event_stream_status(conn, STREAM_EVENT_FAILED_RETRIABLE, reason); ENTRY_TO_CONN(conn)->timestamp_lastread = time(NULL); /* Roll back path bias use state so that we probe the circuit * if nothing else succeeds on it */ pathbias_mark_use_rollback(circ); if (conn->pending_optimistic_data) { buf_set_to_copy(&conn->sending_optimistic_data, conn->pending_optimistic_data); } if (!get_options()->LeaveStreamsUnattached || conn->use_begindir) { /* If we're attaching streams ourself, or if this connection is * a tunneled directory connection, then just attach it. */ ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CIRCUIT_WAIT; circuit_detach_stream(TO_CIRCUIT(circ),ENTRY_TO_EDGE_CONN(conn)); connection_ap_mark_as_pending_circuit(conn); } else { CONNECTION_AP_EXPECT_NONPENDING(conn); ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CONTROLLER_WAIT; circuit_detach_stream(TO_CIRCUIT(circ),ENTRY_TO_EDGE_CONN(conn)); } return 0; } /** Check if <b>conn</b> is using a dangerous port. Then warn and/or * reject depending on our config options. */ static int consider_plaintext_ports(entry_connection_t *conn, uint16_t port) { const or_options_t *options = get_options(); int reject = smartlist_contains_int_as_string( options->RejectPlaintextPorts, port); if (smartlist_contains_int_as_string(options->WarnPlaintextPorts, port)) { log_warn(LD_APP, "Application request to port %d: this port is " "commonly used for unencrypted protocols. Please make sure " "you don't send anything you would mind the rest of the " "Internet reading!%s", port, reject ? " Closing." : ""); control_event_client_status(LOG_WARN, "DANGEROUS_PORT PORT=%d RESULT=%s", port, reject ? "REJECT" : "WARN"); } if (reject) { log_info(LD_APP, "Port %d listed in RejectPlaintextPorts. Closing.", port); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } return 0; } /** How many times do we try connecting with an exit configured via * TrackHostExits before concluding that it won't work any more and trying a * different one? */ #define TRACKHOSTEXITS_RETRIES 5 /** Call connection_ap_handshake_rewrite_and_attach() unless a controller * asked us to leave streams unattached. Return 0 in that case. * * See connection_ap_handshake_rewrite_and_attach()'s * documentation for arguments and return value. */ int connection_ap_rewrite_and_attach_if_allowed(entry_connection_t *conn, origin_circuit_t *circ, crypt_path_t *cpath) { const or_options_t *options = get_options(); if (options->LeaveStreamsUnattached) { CONNECTION_AP_EXPECT_NONPENDING(conn); ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CONTROLLER_WAIT; return 0; } return connection_ap_handshake_rewrite_and_attach(conn, circ, cpath); } /* Try to perform any map-based rewriting of the target address in * <b>conn</b>, filling in the fields of <b>out</b> as we go, and modifying * conn->socks_request.address as appropriate. */ STATIC void connection_ap_handshake_rewrite(entry_connection_t *conn, rewrite_result_t *out) { socks_request_t *socks = conn->socks_request; const or_options_t *options = get_options(); tor_addr_t addr_tmp; /* Initialize all the fields of 'out' to reasonable defaults */ out->automap = 0; out->exit_source = ADDRMAPSRC_NONE; out->map_expires = TIME_MAX; out->end_reason = 0; out->should_close = 0; out->orig_address[0] = 0; /* We convert all incoming addresses to lowercase. */ tor_strlower(socks->address); /* Remember the original address. */ strlcpy(out->orig_address, socks->address, sizeof(out->orig_address)); log_debug(LD_APP,"Client asked for %s:%d", safe_str_client(socks->address), socks->port); /* Check for whether this is a .exit address. By default, those are * disallowed when they're coming straight from the client, but you're * allowed to have them in MapAddress commands and so forth. */ if (!strcmpend(socks->address, ".exit") && !options->AllowDotExit) { log_warn(LD_APP, "The \".exit\" notation is disabled in Tor due to " "security risks. Set AllowDotExit in your torrc to enable " "it (at your own risk)."); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); out->end_reason = END_STREAM_REASON_TORPROTOCOL; out->should_close = 1; return; } /* Remember the original address so we can tell the user about what * they actually said, not just what it turned into. */ /* XXX yes, this is the same as out->orig_address above. One is * in the output, and one is in the connection. */ if (! conn->original_dest_address) { /* Is the 'if' necessary here? XXXX */ conn->original_dest_address = tor_strdup(conn->socks_request->address); } /* First, apply MapAddress and MAPADDRESS mappings. We need to do * these only for non-reverse lookups, since they don't exist for those. * We also need to do this before we consider automapping, since we might * e.g. resolve irc.oftc.net into irconionaddress.onion, at which point * we'd need to automap it. */ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR) { const unsigned rewrite_flags = AMR_FLAG_USE_MAPADDRESS; if (addressmap_rewrite(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires, &out->exit_source)) { control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_CACHE); } } /* Now see if we need to create or return an existing Hostname->IP * automapping. Automapping happens when we're asked to resolve a * hostname, and AutomapHostsOnResolve is set, and the hostname has a * suffix listed in AutomapHostsSuffixes. It's a handy feature * that lets you have Tor assign e.g. IPv6 addresses for .onion * names, and return them safely from DNSPort. */ if (socks->command == SOCKS_COMMAND_RESOLVE && tor_addr_parse(&addr_tmp, socks->address)<0 && options->AutomapHostsOnResolve) { /* Check the suffix... */ out->automap = addressmap_address_should_automap(socks->address, options); if (out->automap) { /* If we get here, then we should apply an automapping for this. */ const char *new_addr; /* We return an IPv4 address by default, or an IPv6 address if we * are allowed to do so. */ int addr_type = RESOLVED_TYPE_IPV4; if (conn->socks_request->socks_version != 4) { if (!conn->entry_cfg.ipv4_traffic || (conn->entry_cfg.ipv6_traffic && conn->entry_cfg.prefer_ipv6) || conn->entry_cfg.prefer_ipv6_virtaddr) addr_type = RESOLVED_TYPE_IPV6; } /* Okay, register the target address as automapped, and find the new * address we're supposed to give as a resolve answer. (Return a cached * value if we've looked up this address before. */ new_addr = addressmap_register_virtual_address( addr_type, tor_strdup(socks->address)); if (! new_addr) { log_warn(LD_APP, "Unable to automap address %s", escaped_safe_str(socks->address)); out->end_reason = END_STREAM_REASON_INTERNAL; out->should_close = 1; return; } log_info(LD_APP, "Automapping %s to %s", escaped_safe_str_client(socks->address), safe_str_client(new_addr)); strlcpy(socks->address, new_addr, sizeof(socks->address)); } } /* Now handle reverse lookups, if they're in the cache. This doesn't * happen too often, since client-side DNS caching is off by default, * and very deprecated. */ if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) { unsigned rewrite_flags = 0; if (conn->entry_cfg.use_cached_ipv4_answers) rewrite_flags |= AMR_FLAG_USE_IPV4_DNS; if (conn->entry_cfg.use_cached_ipv6_answers) rewrite_flags |= AMR_FLAG_USE_IPV6_DNS; if (addressmap_rewrite_reverse(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires)) { char *result = tor_strdup(socks->address); /* remember _what_ is supposed to have been resolved. */ tor_snprintf(socks->address, sizeof(socks->address), "REVERSE[%s]", out->orig_address); connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_HOSTNAME, strlen(result), (uint8_t*)result, -1, out->map_expires); tor_free(result); out->end_reason = END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED; out->should_close = 1; return; } /* Hang on, did we find an answer saying that this is a reverse lookup for * an internal address? If so, we should reject it if we're configured to * do so. */ if (options->ClientDNSRejectInternalAddresses) { /* Don't let clients try to do a reverse lookup on 10.0.0.1. */ tor_addr_t addr; int ok; ok = tor_addr_parse_PTR_name( &addr, socks->address, AF_UNSPEC, 1); if (ok == 1 && tor_addr_is_internal(&addr, 0)) { connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR, 0, NULL, -1, TIME_MAX); out->end_reason = END_STREAM_REASON_SOCKSPROTOCOL | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED; out->should_close = 1; return; } } } /* If we didn't automap it before, then this is still the address that * came straight from the user, mapped according to any * MapAddress/MAPADDRESS commands. Now apply other mappings, * including previously registered Automap entries (IP back to * hostname), TrackHostExits entries, and client-side DNS cache * entries (if they're turned on). */ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR && !out->automap) { unsigned rewrite_flags = AMR_FLAG_USE_AUTOMAP | AMR_FLAG_USE_TRACKEXIT; addressmap_entry_source_t exit_source2; if (conn->entry_cfg.use_cached_ipv4_answers) rewrite_flags |= AMR_FLAG_USE_IPV4_DNS; if (conn->entry_cfg.use_cached_ipv6_answers) rewrite_flags |= AMR_FLAG_USE_IPV6_DNS; if (addressmap_rewrite(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires, &exit_source2)) { control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_CACHE); } if (out->exit_source == ADDRMAPSRC_NONE) { /* If it wasn't a .exit before, maybe it turned into a .exit. Remember * the original source of a .exit. */ out->exit_source = exit_source2; } } /* Check to see whether we're about to use an address in the virtual * range without actually having gotten it from an Automap. */ if (!out->automap && address_is_in_virtual_range(socks->address)) { /* This address was probably handed out by * client_dns_get_unmapped_address, but the mapping was discarded for some * reason. Or the user typed in a virtual address range manually. We * *don't* want to send the address through Tor; that's likely to fail, * and may leak information. */ log_warn(LD_APP,"Missing mapping for virtual address '%s'. Refusing.", safe_str_client(socks->address)); out->end_reason = END_STREAM_REASON_INTERNAL; out->should_close = 1; return; } } /** Connection <b>conn</b> just finished its socks handshake, or the * controller asked us to take care of it. If <b>circ</b> is defined, * then that's where we'll want to attach it. Otherwise we have to * figure it out ourselves. * * First, parse whether it's a .exit address, remap it, and so on. Then * if it's for a general circuit, try to attach it to a circuit (or launch * one as needed), else if it's for a rendezvous circuit, fetch a * rendezvous descriptor first (or attach/launch a circuit if the * rendezvous descriptor is already here and fresh enough). * * The stream will exit from the hop * indicated by <b>cpath</b>, or from the last hop in circ's cpath if * <b>cpath</b> is NULL. */ int connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn, origin_circuit_t *circ, crypt_path_t *cpath) { socks_request_t *socks = conn->socks_request; const or_options_t *options = get_options(); connection_t *base_conn = ENTRY_TO_CONN(conn); time_t now = time(NULL); rewrite_result_t rr; /* First we'll do the rewrite part. Let's see if we get a reasonable * answer. */ memset(&rr, 0, sizeof(rr)); connection_ap_handshake_rewrite(conn,&rr); if (rr.should_close) { /* connection_ap_handshake_rewrite told us to close the connection: * either because it sent back an answer, or because it sent back an * error */ connection_mark_unattached_ap(conn, rr.end_reason); if (END_STREAM_REASON_DONE == (rr.end_reason & END_STREAM_REASON_MASK)) return 0; else return -1; } const time_t map_expires = rr.map_expires; const int automap = rr.automap; const addressmap_entry_source_t exit_source = rr.exit_source; /* Now, we parse the address to see if it's an .onion or .exit or * other special address. */ const hostname_type_t addresstype = parse_extended_hostname(socks->address); /* Now see whether the hostname is bogus. This could happen because of an * onion hostname whose format we don't recognize. */ if (addresstype == BAD_HOSTNAME) { control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* If this is a .exit hostname, strip off the .name.exit part, and * see whether we're willing to connect there, and and otherwise handle the * .exit address. * * We'll set chosen_exit_name and/or close the connection as appropriate. */ if (addresstype == EXIT_HOSTNAME) { /* If StrictNodes is not set, then .exit overrides ExcludeNodes but * not ExcludeExitNodes. */ routerset_t *excludeset = options->StrictNodes ? options->ExcludeExitNodesUnion_ : options->ExcludeExitNodes; const node_t *node = NULL; /* If this .exit was added by an AUTOMAP, then it came straight from * a user. Make sure that options->AllowDotExit permits that! */ if (exit_source == ADDRMAPSRC_AUTOMAP && !options->AllowDotExit) { /* Whoops; this one is stale. It must have gotten added earlier, * when AllowDotExit was on. */ log_warn(LD_APP,"Stale automapped address for '%s.exit', with " "AllowDotExit disabled. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* Double-check to make sure there are no .exits coming from * impossible/weird sources. */ if (exit_source == ADDRMAPSRC_DNS || (exit_source == ADDRMAPSRC_NONE && !options->AllowDotExit)) { /* It shouldn't be possible to get a .exit address from any of these * sources. */ log_warn(LD_BUG,"Address '%s.exit', with impossible source for the " ".exit part. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } tor_assert(!automap); /* Now, find the character before the .(name) part. * (The ".exit" part got stripped off by "parse_extended_hostname"). * * We're going to put the exit name into conn->chosen_exit_name, and * look up a node correspondingly. */ char *s = strrchr(socks->address,'.'); if (s) { /* The address was of the form "(stuff).(name).exit */ if (s[1] != '\0') { /* Looks like a real .exit one. */ conn->chosen_exit_name = tor_strdup(s+1); node = node_get_by_nickname(conn->chosen_exit_name, 1); if (exit_source == ADDRMAPSRC_TRACKEXIT) { /* We 5 tries before it expires the addressmap */ conn->chosen_exit_retries = TRACKHOSTEXITS_RETRIES; } *s = 0; } else { /* Oops, the address was (stuff)..exit. That's not okay. */ log_warn(LD_APP,"Malformed exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } } else { /* It looks like they just asked for "foo.exit". That's a special * form that means (foo's address).foo.exit. */ conn->chosen_exit_name = tor_strdup(socks->address); node = node_get_by_nickname(conn->chosen_exit_name, 1); if (node) { *socks->address = 0; node_get_address_string(node, socks->address, sizeof(socks->address)); } } /* Now make sure that the chosen exit exists... */ if (!node) { log_warn(LD_APP, "Unrecognized relay in exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* ...and make sure that it isn't excluded. */ if (routerset_contains_node(excludeset, node)) { log_warn(LD_APP, "Excluded relay in exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* XXXX-1090 Should we also allow foo.bar.exit if ExitNodes is set and Bar is not listed in it? I say yes, but our revised manpage branch implies no. */ } /* Now, we handle everything that isn't a .onion address. */ if (addresstype != ONION_HOSTNAME) { /* Not a hidden-service request. It's either a hostname or an IP, * possibly with a .exit that we stripped off. We're going to check * if we're allowed to connect/resolve there, and then launch the * appropriate request. */ /* Check for funny characters in the address. */ if (address_is_invalid_destination(socks->address, 1)) { control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); log_warn(LD_APP, "Destination '%s' seems to be an invalid hostname. Failing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } #ifdef ENABLE_TOR2WEB_MODE /* If we're running in Tor2webMode, we don't allow anything BUT .onion * addresses. */ if (options->Tor2webMode) { log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname " "or IP address %s because tor2web mode is enabled.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } #endif /* socks->address is a non-onion hostname or IP address. * If we can't do any non-onion requests, refuse the connection. * If we have a hostname but can't do DNS, refuse the connection. * If we have an IP address, but we can't use that address family, * refuse the connection. * * If we can do DNS requests, and we can use at least one address family, * then we have to resolve the address first. Then we'll know if it * resolves to a usable address family. */ /* First, check if all non-onion traffic is disabled */ if (!conn->entry_cfg.dns_request && !conn->entry_cfg.ipv4_traffic && !conn->entry_cfg.ipv6_traffic) { log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname " "or IP address %s because Port has OnionTrafficOnly set (or " "NoDNSRequest, NoIPv4Traffic, and NoIPv6Traffic).", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } /* Then check if we have a hostname or IP address, and whether DNS or * the IP address family are permitted. Reject if not. */ tor_addr_t dummy_addr; int socks_family = tor_addr_parse(&dummy_addr, socks->address); /* family will be -1 for a non-onion hostname that's not an IP */ if (socks_family == -1) { if (!conn->entry_cfg.dns_request) { log_warn(LD_APP, "Refusing to connect to hostname %s " "because Port has NoDNSRequest set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else if (socks_family == AF_INET) { if (!conn->entry_cfg.ipv4_traffic) { log_warn(LD_APP, "Refusing to connect to IPv4 address %s because " "Port has NoIPv4Traffic set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else if (socks_family == AF_INET6) { if (!conn->entry_cfg.ipv6_traffic) { log_warn(LD_APP, "Refusing to connect to IPv6 address %s because " "Port has NoIPv6Traffic set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else { tor_assert_nonfatal_unreached_once(); } /* See if this is a hostname lookup that we can answer immediately. * (For example, an attempt to look up the IP address for an IP address.) */ if (socks->command == SOCKS_COMMAND_RESOLVE) { tor_addr_t answer; /* Reply to resolves immediately if we can. */ if (tor_addr_parse(&answer, socks->address) >= 0) {/* is it an IP? */ /* remember _what_ is supposed to have been resolved. */ strlcpy(socks->address, rr.orig_address, sizeof(socks->address)); connection_ap_handshake_socks_resolved_addr(conn, &answer, -1, map_expires); connection_mark_unattached_ap(conn, END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return 0; } tor_assert(!automap); rep_hist_note_used_resolve(now); /* help predict this next time */ } else if (socks->command == SOCKS_COMMAND_CONNECT) { /* Now see if this is a connect request that we can reject immediately */ tor_assert(!automap); /* Don't allow connections to port 0. */ if (socks->port == 0) { log_notice(LD_APP,"Application asked to connect to port 0. Refusing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* You can't make connections to internal addresses, by default. * Exceptions are begindir requests (where the address is meaningless), * or cases where you've hand-configured a particular exit, thereby * making the local address meaningful. */ if (options->ClientRejectInternalAddresses && !conn->use_begindir && !conn->chosen_exit_name && !circ) { /* If we reach this point then we don't want to allow internal * addresses. Check if we got one. */ tor_addr_t addr; if (tor_addr_hostname_is_local(socks->address) || (tor_addr_parse(&addr, socks->address) >= 0 && tor_addr_is_internal(&addr, 0))) { /* If this is an explicit private address with no chosen exit node, * then we really don't want to try to connect to it. That's * probably an error. */ if (conn->is_transparent_ap) { #define WARN_INTRVL_LOOP 300 static ratelim_t loop_warn_limit = RATELIM_INIT(WARN_INTRVL_LOOP); char *m; if ((m = rate_limit_log(&loop_warn_limit, approx_time()))) { log_warn(LD_NET, "Rejecting request for anonymous connection to private " "address %s on a TransPort or NATDPort. Possible loop " "in your NAT rules?%s", safe_str_client(socks->address), m); tor_free(m); } } else { #define WARN_INTRVL_PRIV 300 static ratelim_t priv_warn_limit = RATELIM_INIT(WARN_INTRVL_PRIV); char *m; if ((m = rate_limit_log(&priv_warn_limit, approx_time()))) { log_warn(LD_NET, "Rejecting SOCKS request for anonymous connection to " "private address %s.%s", safe_str_client(socks->address),m); tor_free(m); } } connection_mark_unattached_ap(conn, END_STREAM_REASON_PRIVATE_ADDR); return -1; } } /* end "if we should check for internal addresses" */ /* Okay. We're still doing a CONNECT, and it wasn't a private * address. Here we do special handling for literal IP addresses, * to see if we should reject this preemptively, and to set up * fields in conn->entry_cfg to tell the exit what AF we want. */ { tor_addr_t addr; /* XXX Duplicate call to tor_addr_parse. */ if (tor_addr_parse(&addr, socks->address) >= 0) { /* If we reach this point, it's an IPv4 or an IPv6 address. */ sa_family_t family = tor_addr_family(&addr); if ((family == AF_INET && ! conn->entry_cfg.ipv4_traffic) || (family == AF_INET6 && ! conn->entry_cfg.ipv6_traffic)) { /* You can't do an IPv4 address on a v6-only socks listener, * or vice versa. */ log_warn(LD_NET, "Rejecting SOCKS request for an IP address " "family that this listener does not support."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (family == AF_INET6 && socks->socks_version == 4) { /* You can't make a socks4 request to an IPv6 address. Socks4 * doesn't support that. */ log_warn(LD_NET, "Rejecting SOCKS4 request for an IPv6 address."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (socks->socks_version == 4 && !conn->entry_cfg.ipv4_traffic) { /* You can't do any kind of Socks4 request when IPv4 is forbidden. * * XXX raise this check outside the enclosing block? */ log_warn(LD_NET, "Rejecting SOCKS4 request on a listener with " "no IPv4 traffic supported."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (family == AF_INET6) { /* Tell the exit: we won't accept any ipv4 connection to an IPv6 * address. */ conn->entry_cfg.ipv4_traffic = 0; } else if (family == AF_INET) { /* Tell the exit: we won't accept any ipv6 connection to an IPv4 * address. */ conn->entry_cfg.ipv6_traffic = 0; } } } /* we never allow IPv6 answers on socks4. (TODO: Is this smart?) */ if (socks->socks_version == 4) conn->entry_cfg.ipv6_traffic = 0; /* Still handling CONNECT. Now, check for exit enclaves. (Which we * don't do on BEGINDIR, or when there is a chosen exit.) * * TODO: Should we remove this? Exit enclaves are nutty and don't * work very well */ if (!conn->use_begindir && !conn->chosen_exit_name && !circ) { /* see if we can find a suitable enclave exit */ const node_t *r = router_find_exact_exit_enclave(socks->address, socks->port); if (r) { log_info(LD_APP, "Redirecting address %s to exit at enclave router %s", safe_str_client(socks->address), node_describe(r)); /* use the hex digest, not nickname, in case there are two routers with this nickname */ conn->chosen_exit_name = tor_strdup(hex_str(r->identity, DIGEST_LEN)); conn->chosen_exit_optional = 1; } } /* Still handling CONNECT: warn or reject if it's using a dangerous * port. */ if (!conn->use_begindir && !conn->chosen_exit_name && !circ) if (consider_plaintext_ports(conn, socks->port) < 0) return -1; /* Remember the port so that we will predict that more requests there will happen in the future. */ if (!conn->use_begindir) { /* help predict this next time */ rep_hist_note_used_port(now, socks->port); } } else if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) { rep_hist_note_used_resolve(now); /* help predict this next time */ /* no extra processing needed */ } else { /* We should only be doing CONNECT, RESOLVE, or RESOLVE_PTR! */ tor_fragile_assert(); } /* Okay. At this point we've set chosen_exit_name if needed, rewritten the * address, and decided not to reject it for any number of reasons. Now * mark the connection as waiting for a circuit, and try to attach it! */ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT; /* If we were given a circuit to attach to, try to attach. Otherwise, * try to find a good one and attach to that. */ int rv; if (circ) { rv = connection_ap_handshake_attach_chosen_circuit(conn, circ, cpath); } else { /* We'll try to attach it at the next event loop, or whenever * we call connection_ap_attach_pending() */ connection_ap_mark_as_pending_circuit(conn); rv = 0; } /* If the above function returned 0 then we're waiting for a circuit. * if it returned 1, we're attached. Both are okay. But if it returned * -1, there was an error, so make sure the connection is marked, and * return -1. */ if (rv < 0) { if (!base_conn->marked_for_close) connection_mark_unattached_ap(conn, END_STREAM_REASON_CANT_ATTACH); return -1; } return 0; } else { /* If we get here, it's a request for a .onion address! */ tor_assert(!automap); /* If .onion address requests are disabled, refuse the request */ if (!conn->entry_cfg.onion_traffic) { log_warn(LD_APP, "Onion address %s requested from a port with .onion " "disabled", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } /* Check whether it's RESOLVE or RESOLVE_PTR. We don't handle those * for hidden service addresses. */ if (SOCKS_COMMAND_IS_RESOLVE(socks->command)) { /* if it's a resolve request, fail it right now, rather than * building all the circuits and then realizing it won't work. */ log_warn(LD_APP, "Resolve requests to hidden services not allowed. Failing."); connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_ERROR, 0,NULL,-1,TIME_MAX); connection_mark_unattached_ap(conn, END_STREAM_REASON_SOCKSPROTOCOL | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return -1; } /* If we were passed a circuit, then we need to fail. .onion addresses * only work when we launch our own circuits for now. */ if (circ) { log_warn(LD_CONTROL, "Attachstream to a circuit is not " "supported for .onion addresses currently. Failing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* Look up if we have client authorization configured for this hidden * service. If we do, associate it with the rend_data. */ rend_service_authorization_t *client_auth = rend_client_lookup_service_authorization(socks->address); const uint8_t *cookie = NULL; rend_auth_type_t auth_type = REND_NO_AUTH; if (client_auth) { log_info(LD_REND, "Using previously configured client authorization " "for hidden service request."); auth_type = client_auth->auth_type; cookie = client_auth->descriptor_cookie; } /* Fill in the rend_data field so we can start doing a connection to * a hidden service. */ rend_data_t *rend_data = ENTRY_TO_EDGE_CONN(conn)->rend_data = rend_data_client_create(socks->address, NULL, (char *) cookie, auth_type); if (rend_data == NULL) { return -1; } const char *onion_address = rend_data_get_address(rend_data); log_info(LD_REND,"Got a hidden service request for ID '%s'", safe_str_client(onion_address)); /* Lookup the given onion address. If invalid, stop right now. * Otherwise, we might have it in the cache or not. */ unsigned int refetch_desc = 0; rend_cache_entry_t *entry = NULL; const int rend_cache_lookup_result = rend_cache_lookup_entry(onion_address, -1, &entry); if (rend_cache_lookup_result < 0) { switch (-rend_cache_lookup_result) { case EINVAL: /* We should already have rejected this address! */ log_warn(LD_BUG,"Invalid service name '%s'", safe_str_client(onion_address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; case ENOENT: /* We didn't have this; we should look it up. */ refetch_desc = 1; break; default: log_warn(LD_BUG, "Unknown cache lookup error %d", rend_cache_lookup_result); return -1; } } /* Help predict that we'll want to do hidden service circuits in the * future. We're not sure if it will need a stable circuit yet, but * we know we'll need *something*. */ rep_hist_note_used_internal(now, 0, 1); /* Now we have a descriptor but is it usable or not? If not, refetch. * Also, a fetch could have been requested if the onion address was not * found in the cache previously. */ if (refetch_desc || !rend_client_any_intro_points_usable(entry)) { connection_ap_mark_as_non_pending_circuit(conn); base_conn->state = AP_CONN_STATE_RENDDESC_WAIT; log_info(LD_REND, "Unknown descriptor %s. Fetching.", safe_str_client(onion_address)); rend_client_refetch_v2_renddesc(rend_data); return 0; } /* We have the descriptor! So launch a connection to the HS. */ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT; log_info(LD_REND, "Descriptor is here. Great."); /* We'll try to attach it at the next event loop, or whenever * we call connection_ap_attach_pending() */ connection_ap_mark_as_pending_circuit(conn); return 0; } return 0; /* unreached but keeps the compiler happy */ } #ifdef TRANS_PF static int pf_socket = -1; int get_pf_socket(void) { int pf; /* This should be opened before dropping privileges. */ if (pf_socket >= 0) return pf_socket; #if defined(OpenBSD) /* only works on OpenBSD */ pf = tor_open_cloexec("/dev/pf", O_RDONLY, 0); #else /* works on NetBSD and FreeBSD */ pf = tor_open_cloexec("/dev/pf", O_RDWR, 0); #endif if (pf < 0) { log_warn(LD_NET, "open(\"/dev/pf\") failed: %s", strerror(errno)); return -1; } pf_socket = pf; return pf_socket; } #endif #if defined(TRANS_NETFILTER) || defined(TRANS_PF) || defined(TRANS_TPROXY) /** Try fill in the address of <b>req</b> from the socket configured * with <b>conn</b>. */ static int destination_from_socket(entry_connection_t *conn, socks_request_t *req) { struct sockaddr_storage orig_dst; socklen_t orig_dst_len = sizeof(orig_dst); tor_addr_t addr; #ifdef TRANS_TRPOXY if (options->TransProxyType_parsed == TPT_TPROXY) { if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&orig_dst, &orig_dst_len) < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockname() failed: %s", tor_socket_strerror(e)); return -1; } goto done; } #endif #ifdef TRANS_NETFILTER int rv = -1; switch (ENTRY_TO_CONN(conn)->socket_family) { #ifdef TRANS_NETFILTER_IPV4 case AF_INET: rv = getsockopt(ENTRY_TO_CONN(conn)->s, SOL_IP, SO_ORIGINAL_DST, (struct sockaddr*)&orig_dst, &orig_dst_len); break; #endif #ifdef TRANS_NETFILTER_IPV6 case AF_INET6: rv = getsockopt(ENTRY_TO_CONN(conn)->s, SOL_IPV6, IP6T_SO_ORIGINAL_DST, (struct sockaddr*)&orig_dst, &orig_dst_len); break; #endif default: log_warn(LD_BUG, "Received transparent data from an unsuported socket family %d", ENTRY_TO_CONN(conn)->socket_family); return -1; } if (rv < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockopt() failed: %s", tor_socket_strerror(e)); return -1; } goto done; #elif defined(TRANS_PF) if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&orig_dst, &orig_dst_len) < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockname() failed: %s", tor_socket_strerror(e)); return -1; } goto done; #else (void)conn; (void)req; log_warn(LD_BUG, "Unable to determine destination from socket."); return -1; #endif done: tor_addr_from_sockaddr(&addr, (struct sockaddr*)&orig_dst, &req->port); tor_addr_to_str(req->address, &addr, sizeof(req->address), 1); return 0; } #endif #ifdef TRANS_PF static int destination_from_pf(entry_connection_t *conn, socks_request_t *req) { struct sockaddr_storage proxy_addr; socklen_t proxy_addr_len = sizeof(proxy_addr); struct sockaddr *proxy_sa = (struct sockaddr*) &proxy_addr; struct pfioc_natlook pnl; tor_addr_t addr; int pf = -1; if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&proxy_addr, &proxy_addr_len) < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockname() to determine transocks destination " "failed: %s", tor_socket_strerror(e)); return -1; } #ifdef __FreeBSD__ if (get_options()->TransProxyType_parsed == TPT_IPFW) { /* ipfw(8) is used and in this case getsockname returned the original destination */ if (tor_addr_from_sockaddr(&addr, proxy_sa, &req->port) < 0) { tor_fragile_assert(); return -1; } tor_addr_to_str(req->address, &addr, sizeof(req->address), 0); return 0; } #endif memset(&pnl, 0, sizeof(pnl)); pnl.proto = IPPROTO_TCP; pnl.direction = PF_OUT; if (proxy_sa->sa_family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)proxy_sa; pnl.af = AF_INET; pnl.saddr.v4.s_addr = tor_addr_to_ipv4n(&ENTRY_TO_CONN(conn)->addr); pnl.sport = htons(ENTRY_TO_CONN(conn)->port); pnl.daddr.v4.s_addr = sin->sin_addr.s_addr; pnl.dport = sin->sin_port; } else if (proxy_sa->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)proxy_sa; pnl.af = AF_INET6; memcpy(&pnl.saddr.v6, tor_addr_to_in6(&ENTRY_TO_CONN(conn)->addr), sizeof(struct in6_addr)); pnl.sport = htons(ENTRY_TO_CONN(conn)->port); memcpy(&pnl.daddr.v6, &sin6->sin6_addr, sizeof(struct in6_addr)); pnl.dport = sin6->sin6_port; } else { log_warn(LD_NET, "getsockname() gave an unexpected address family (%d)", (int)proxy_sa->sa_family); return -1; } pf = get_pf_socket(); if (pf<0) return -1; if (ioctl(pf, DIOCNATLOOK, &pnl) < 0) { log_warn(LD_NET, "ioctl(DIOCNATLOOK) failed: %s", strerror(errno)); return -1; } if (pnl.af == AF_INET) { tor_addr_from_ipv4n(&addr, pnl.rdaddr.v4.s_addr); } else if (pnl.af == AF_INET6) { tor_addr_from_in6(&addr, &pnl.rdaddr.v6); } else { tor_fragile_assert(); return -1; } tor_addr_to_str(req->address, &addr, sizeof(req->address), 1); req->port = ntohs(pnl.rdport); return 0; } #endif /** Fetch the original destination address and port from a * system-specific interface and put them into a * socks_request_t as if they came from a socks request. * * Return -1 if an error prevents fetching the destination, * else return 0. */ static int connection_ap_get_original_destination(entry_connection_t *conn, socks_request_t *req) { #ifdef TRANS_NETFILTER return destination_from_socket(conn, req); #elif defined(TRANS_PF) const or_options_t *options = get_options(); if (options->TransProxyType_parsed == TPT_PF_DIVERT) return destination_from_socket(conn, req); if (options->TransProxyType_parsed == TPT_DEFAULT || options->TransProxyType_parsed == TPT_IPFW) return destination_from_pf(conn, req); (void)conn; (void)req; log_warn(LD_BUG, "Proxy destination determination mechanism %s unknown.", options->TransProxyType); return -1; #else (void)conn; (void)req; log_warn(LD_BUG, "Called connection_ap_get_original_destination, but no " "transparent proxy method was configured."); return -1; #endif } /** connection_edge_process_inbuf() found a conn in state * socks_wait. See if conn->inbuf has the right bytes to proceed with * the socks handshake. * * If the handshake is complete, send it to * connection_ap_handshake_rewrite_and_attach(). * * Return -1 if an unexpected error with conn occurs (and mark it for close), * else return 0. */ static int connection_ap_handshake_process_socks(entry_connection_t *conn) { socks_request_t *socks; int sockshere; const or_options_t *options = get_options(); int had_reply = 0; connection_t *base_conn = ENTRY_TO_CONN(conn); tor_assert(conn); tor_assert(base_conn->type == CONN_TYPE_AP); tor_assert(base_conn->state == AP_CONN_STATE_SOCKS_WAIT); tor_assert(conn->socks_request); socks = conn->socks_request; log_debug(LD_APP,"entered."); sockshere = fetch_from_buf_socks(base_conn->inbuf, socks, options->TestSocks, options->SafeSocks); if (socks->replylen) { had_reply = 1; connection_write_to_buf((const char*)socks->reply, socks->replylen, base_conn); socks->replylen = 0; if (sockshere == -1) { /* An invalid request just got a reply, no additional * one is necessary. */ socks->has_finished = 1; } } if (sockshere == 0) { log_debug(LD_APP,"socks handshake not all here yet."); return 0; } else if (sockshere == -1) { if (!had_reply) { log_warn(LD_APP,"Fetching socks handshake failed. Closing."); connection_ap_handshake_socks_reply(conn, NULL, 0, END_STREAM_REASON_SOCKSPROTOCOL); } connection_mark_unattached_ap(conn, END_STREAM_REASON_SOCKSPROTOCOL | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return -1; } /* else socks handshake is done, continue processing */ if (SOCKS_COMMAND_IS_CONNECT(socks->command)) control_event_stream_status(conn, STREAM_EVENT_NEW, 0); else control_event_stream_status(conn, STREAM_EVENT_NEW_RESOLVE, 0); return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL); } /** connection_init_accepted_conn() found a new trans AP conn. * Get the original destination and send it to * connection_ap_handshake_rewrite_and_attach(). * * Return -1 if an unexpected error with conn (and it should be marked * for close), else return 0. */ int connection_ap_process_transparent(entry_connection_t *conn) { socks_request_t *socks; tor_assert(conn); tor_assert(conn->socks_request); socks = conn->socks_request; /* pretend that a socks handshake completed so we don't try to * send a socks reply down a transparent conn */ socks->command = SOCKS_COMMAND_CONNECT; socks->has_finished = 1; log_debug(LD_APP,"entered."); if (connection_ap_get_original_destination(conn, socks) < 0) { log_warn(LD_APP,"Fetching original destination failed. Closing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_CANT_FETCH_ORIG_DEST); return -1; } /* we have the original destination */ control_event_stream_status(conn, STREAM_EVENT_NEW, 0); return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL); } /** connection_edge_process_inbuf() found a conn in state natd_wait. See if * conn-\>inbuf has the right bytes to proceed. See FreeBSD's libalias(3) and * ProxyEncodeTcpStream() in src/lib/libalias/alias_proxy.c for the encoding * form of the original destination. * * If the original destination is complete, send it to * connection_ap_handshake_rewrite_and_attach(). * * Return -1 if an unexpected error with conn (and it should be marked * for close), else return 0. */ static int connection_ap_process_natd(entry_connection_t *conn) { char tmp_buf[36], *tbuf, *daddr; size_t tlen = 30; int err, port_ok; socks_request_t *socks; tor_assert(conn); tor_assert(ENTRY_TO_CONN(conn)->state == AP_CONN_STATE_NATD_WAIT); tor_assert(conn->socks_request); socks = conn->socks_request; log_debug(LD_APP,"entered."); /* look for LF-terminated "[DEST ip_addr port]" * where ip_addr is a dotted-quad and port is in string form */ err = connection_fetch_from_buf_line(ENTRY_TO_CONN(conn), tmp_buf, &tlen); if (err == 0) return 0; if (err < 0) { log_warn(LD_APP,"NATD handshake failed (DEST too long). Closing"); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } if (strcmpstart(tmp_buf, "[DEST ")) { log_warn(LD_APP,"NATD handshake was ill-formed; closing. The client " "said: %s", escaped(tmp_buf)); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } daddr = tbuf = &tmp_buf[0] + 6; /* after end of "[DEST " */ if (!(tbuf = strchr(tbuf, ' '))) { log_warn(LD_APP,"NATD handshake was ill-formed; closing. The client " "said: %s", escaped(tmp_buf)); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } *tbuf++ = '\0'; /* pretend that a socks handshake completed so we don't try to * send a socks reply down a natd conn */ strlcpy(socks->address, daddr, sizeof(socks->address)); socks->port = (uint16_t) tor_parse_long(tbuf, 10, 1, 65535, &port_ok, &daddr); if (!port_ok) { log_warn(LD_APP,"NATD handshake failed; port %s is ill-formed or out " "of range.", escaped(tbuf)); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } socks->command = SOCKS_COMMAND_CONNECT; socks->has_finished = 1; control_event_stream_status(conn, STREAM_EVENT_NEW, 0); ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CIRCUIT_WAIT; return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL); } /** Iterate over the two bytes of stream_id until we get one that is not * already in use; return it. Return 0 if can't get a unique stream_id. */ streamid_t get_unique_stream_id_by_circ(origin_circuit_t *circ) { edge_connection_t *tmpconn; streamid_t test_stream_id; uint32_t attempts=0; again: test_stream_id = circ->next_stream_id++; if (++attempts > 1<<16) { /* Make sure we don't loop forever if all stream_id's are used. */ log_warn(LD_APP,"No unused stream IDs. Failing."); return 0; } if (test_stream_id == 0) goto again; for (tmpconn = circ->p_streams; tmpconn; tmpconn=tmpconn->next_stream) if (tmpconn->stream_id == test_stream_id) goto again; return test_stream_id; } /** Return true iff <b>conn</b> is linked to a circuit and configured to use * an exit that supports optimistic data. */ static int connection_ap_supports_optimistic_data(const entry_connection_t *conn) { const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn); /* We can only send optimistic data if we're connected to an open general circuit. */ if (edge_conn->on_circuit == NULL || edge_conn->on_circuit->state != CIRCUIT_STATE_OPEN || (edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_GENERAL && edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_REND_JOINED)) return 0; return conn->may_use_optimistic_data; } /** Return a bitmask of BEGIN_FLAG_* flags that we should transmit in the * RELAY_BEGIN cell for <b>ap_conn</b>. */ static uint32_t connection_ap_get_begincell_flags(entry_connection_t *ap_conn) { edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn); const node_t *exitnode = NULL; const crypt_path_t *cpath_layer = edge_conn->cpath_layer; uint32_t flags = 0; /* No flags for begindir */ if (ap_conn->use_begindir) return 0; /* No flags for hidden services. */ if (edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_GENERAL) return 0; /* If only IPv4 is supported, no flags */ if (ap_conn->entry_cfg.ipv4_traffic && !ap_conn->entry_cfg.ipv6_traffic) return 0; if (! cpath_layer || ! cpath_layer->extend_info) return 0; if (!ap_conn->entry_cfg.ipv4_traffic) flags |= BEGIN_FLAG_IPV4_NOT_OK; exitnode = node_get_by_id(cpath_layer->extend_info->identity_digest); if (ap_conn->entry_cfg.ipv6_traffic && exitnode) { tor_addr_t a; tor_addr_make_null(&a, AF_INET6); if (compare_tor_addr_to_node_policy(&a, ap_conn->socks_request->port, exitnode) != ADDR_POLICY_REJECTED) { /* Only say "IPv6 OK" if the exit node supports IPv6. Otherwise there's * no point. */ flags |= BEGIN_FLAG_IPV6_OK; } } if (flags == BEGIN_FLAG_IPV6_OK) { /* When IPv4 and IPv6 are both allowed, consider whether to say we * prefer IPv6. Otherwise there's no point in declaring a preference */ if (ap_conn->entry_cfg.prefer_ipv6) flags |= BEGIN_FLAG_IPV6_PREFERRED; } if (flags == BEGIN_FLAG_IPV4_NOT_OK) { log_warn(LD_EDGE, "I'm about to ask a node for a connection that I " "am telling it to fulfil with neither IPv4 nor IPv6. That's " "not going to work. Did you perhaps ask for an IPv6 address " "on an IPv4Only port, or vice versa?"); } return flags; } /** Write a relay begin cell, using destaddr and destport from ap_conn's * socks_request field, and send it down circ. * * If ap_conn is broken, mark it for close and return -1. Else return 0. */ int connection_ap_handshake_send_begin(entry_connection_t *ap_conn) { char payload[CELL_PAYLOAD_SIZE]; int payload_len; int begin_type; const or_options_t *options = get_options(); origin_circuit_t *circ; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn); connection_t *base_conn = TO_CONN(edge_conn); tor_assert(edge_conn->on_circuit); circ = TO_ORIGIN_CIRCUIT(edge_conn->on_circuit); tor_assert(base_conn->type == CONN_TYPE_AP); tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT); tor_assert(ap_conn->socks_request); tor_assert(SOCKS_COMMAND_IS_CONNECT(ap_conn->socks_request->command)); edge_conn->stream_id = get_unique_stream_id_by_circ(circ); if (edge_conn->stream_id==0) { /* XXXX+ Instead of closing this stream, we should make it get * retried on another circuit. */ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); /* Mark this circuit "unusable for new streams". */ mark_circuit_unusable_for_new_conns(circ); return -1; } /* Set up begin cell flags. */ edge_conn->begincell_flags = connection_ap_get_begincell_flags(ap_conn); tor_snprintf(payload,RELAY_PAYLOAD_SIZE, "%s:%d", (circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL) ? ap_conn->socks_request->address : "", ap_conn->socks_request->port); payload_len = (int)strlen(payload)+1; if (payload_len <= RELAY_PAYLOAD_SIZE - 4 && edge_conn->begincell_flags) { set_uint32(payload + payload_len, htonl(edge_conn->begincell_flags)); payload_len += 4; } log_info(LD_APP, "Sending relay cell %d on circ %u to begin stream %d.", (int)ap_conn->use_begindir, (unsigned)circ->base_.n_circ_id, edge_conn->stream_id); begin_type = ap_conn->use_begindir ? RELAY_COMMAND_BEGIN_DIR : RELAY_COMMAND_BEGIN; /* Check that circuits are anonymised, based on their type. */ if (begin_type == RELAY_COMMAND_BEGIN) { /* This connection is a standard OR connection. * Make sure its path length is anonymous, or that we're in a * non-anonymous mode. */ assert_circ_anonymity_ok(circ, options); } else if (begin_type == RELAY_COMMAND_BEGIN_DIR) { /* This connection is a begindir directory connection. * Look at the linked directory connection to access the directory purpose. * If a BEGINDIR connection is ever not linked, that's a bug. */ if (BUG(!base_conn->linked)) { return -1; } connection_t *linked_dir_conn_base = base_conn->linked_conn; /* If the linked connection has been unlinked by other code, we can't send * a begin cell on it. */ if (!linked_dir_conn_base) { return -1; } /* Sensitive directory connections must have an anonymous path length. * Otherwise, directory connections are typically one-hop. * This matches the earlier check for directory connection path anonymity * in directory_initiate_command_rend(). */ if (purpose_needs_anonymity(linked_dir_conn_base->purpose, TO_DIR_CONN(linked_dir_conn_base)->router_purpose, TO_DIR_CONN(linked_dir_conn_base)->requested_resource)) { assert_circ_anonymity_ok(circ, options); } } else { /* This code was written for the two connection types BEGIN and BEGIN_DIR */ tor_assert_unreached(); } if (connection_edge_send_command(edge_conn, begin_type, begin_type == RELAY_COMMAND_BEGIN ? payload : NULL, begin_type == RELAY_COMMAND_BEGIN ? payload_len : 0) < 0) return -1; /* circuit is closed, don't continue */ edge_conn->package_window = STREAMWINDOW_START; edge_conn->deliver_window = STREAMWINDOW_START; base_conn->state = AP_CONN_STATE_CONNECT_WAIT; log_info(LD_APP,"Address/port sent, ap socket "TOR_SOCKET_T_FORMAT ", n_circ_id %u", base_conn->s, (unsigned)circ->base_.n_circ_id); control_event_stream_status(ap_conn, STREAM_EVENT_SENT_CONNECT, 0); /* If there's queued-up data, send it now */ if ((connection_get_inbuf_len(base_conn) || ap_conn->sending_optimistic_data) && connection_ap_supports_optimistic_data(ap_conn)) { log_info(LD_APP, "Sending up to %ld + %ld bytes of queued-up data", (long)connection_get_inbuf_len(base_conn), ap_conn->sending_optimistic_data ? (long)buf_datalen(ap_conn->sending_optimistic_data) : 0); if (connection_edge_package_raw_inbuf(edge_conn, 1, NULL) < 0) { connection_mark_for_close(base_conn); } } return 0; } /** Write a relay resolve cell, using destaddr and destport from ap_conn's * socks_request field, and send it down circ. * * If ap_conn is broken, mark it for close and return -1. Else return 0. */ int connection_ap_handshake_send_resolve(entry_connection_t *ap_conn) { int payload_len, command; const char *string_addr; char inaddr_buf[REVERSE_LOOKUP_NAME_BUF_LEN]; origin_circuit_t *circ; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn); connection_t *base_conn = TO_CONN(edge_conn); tor_assert(edge_conn->on_circuit); circ = TO_ORIGIN_CIRCUIT(edge_conn->on_circuit); tor_assert(base_conn->type == CONN_TYPE_AP); tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT); tor_assert(ap_conn->socks_request); tor_assert(circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL); command = ap_conn->socks_request->command; tor_assert(SOCKS_COMMAND_IS_RESOLVE(command)); edge_conn->stream_id = get_unique_stream_id_by_circ(circ); if (edge_conn->stream_id==0) { /* XXXX+ Instead of closing this stream, we should make it get * retried on another circuit. */ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); /* Mark this circuit "unusable for new streams". */ mark_circuit_unusable_for_new_conns(circ); return -1; } if (command == SOCKS_COMMAND_RESOLVE) { string_addr = ap_conn->socks_request->address; payload_len = (int)strlen(string_addr)+1; } else { /* command == SOCKS_COMMAND_RESOLVE_PTR */ const char *a = ap_conn->socks_request->address; tor_addr_t addr; int r; /* We're doing a reverse lookup. The input could be an IP address, or * could be an .in-addr.arpa or .ip6.arpa address */ r = tor_addr_parse_PTR_name(&addr, a, AF_UNSPEC, 1); if (r <= 0) { log_warn(LD_APP, "Rejecting ill-formed reverse lookup of %s", safe_str_client(a)); connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); return -1; } r = tor_addr_to_PTR_name(inaddr_buf, sizeof(inaddr_buf), &addr); if (r < 0) { log_warn(LD_BUG, "Couldn't generate reverse lookup hostname of %s", safe_str_client(a)); connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); return -1; } string_addr = inaddr_buf; payload_len = (int)strlen(inaddr_buf)+1; tor_assert(payload_len <= (int)sizeof(inaddr_buf)); } log_debug(LD_APP, "Sending relay cell to begin stream %d.", edge_conn->stream_id); if (connection_edge_send_command(edge_conn, RELAY_COMMAND_RESOLVE, string_addr, payload_len) < 0) return -1; /* circuit is closed, don't continue */ if (!base_conn->address) { /* This might be unnecessary. XXXX */ base_conn->address = tor_addr_to_str_dup(&base_conn->addr); } base_conn->state = AP_CONN_STATE_RESOLVE_WAIT; log_info(LD_APP,"Address sent for resolve, ap socket "TOR_SOCKET_T_FORMAT ", n_circ_id %u", base_conn->s, (unsigned)circ->base_.n_circ_id); control_event_stream_status(ap_conn, STREAM_EVENT_SENT_RESOLVE, 0); return 0; } /** Make an AP connection_t linked to the connection_t <b>partner</b>. make a * new linked connection pair, and attach one side to the conn, connection_add * it, initialize it to circuit_wait, and call * connection_ap_handshake_attach_circuit(conn) on it. * * Return the newly created end of the linked connection pair, or -1 if error. */ entry_connection_t * connection_ap_make_link(connection_t *partner, char *address, uint16_t port, const char *digest, int session_group, int isolation_flags, int use_begindir, int want_onehop) { entry_connection_t *conn; connection_t *base_conn; log_info(LD_APP,"Making internal %s tunnel to %s:%d ...", want_onehop ? "direct" : "anonymized", safe_str_client(address), port); conn = entry_connection_new(CONN_TYPE_AP, tor_addr_family(&partner->addr)); base_conn = ENTRY_TO_CONN(conn); base_conn->linked = 1; /* so that we can add it safely below. */ /* populate conn->socks_request */ /* leave version at zero, so the socks_reply is empty */ conn->socks_request->socks_version = 0; conn->socks_request->has_finished = 0; /* waiting for 'connected' */ strlcpy(conn->socks_request->address, address, sizeof(conn->socks_request->address)); conn->socks_request->port = port; conn->socks_request->command = SOCKS_COMMAND_CONNECT; conn->want_onehop = want_onehop; conn->use_begindir = use_begindir; if (use_begindir) { conn->chosen_exit_name = tor_malloc(HEX_DIGEST_LEN+2); conn->chosen_exit_name[0] = '$'; tor_assert(digest); base16_encode(conn->chosen_exit_name+1,HEX_DIGEST_LEN+1, digest, DIGEST_LEN); } /* Populate isolation fields. */ conn->socks_request->listener_type = CONN_TYPE_DIR_LISTENER; conn->original_dest_address = tor_strdup(address); conn->entry_cfg.session_group = session_group; conn->entry_cfg.isolation_flags = isolation_flags; base_conn->address = tor_strdup("(Tor_internal)"); tor_addr_make_unspec(&base_conn->addr); base_conn->port = 0; connection_link_connections(partner, base_conn); if (connection_add(base_conn) < 0) { /* no space, forget it */ connection_free(base_conn); return NULL; } base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT; control_event_stream_status(conn, STREAM_EVENT_NEW, 0); /* attaching to a dirty circuit is fine */ connection_ap_mark_as_pending_circuit(conn); log_info(LD_APP,"... application connection created and linked."); return conn; } /** Notify any interested controller connections about a new hostname resolve * or resolve error. Takes the same arguments as does * connection_ap_handshake_socks_resolved(). */ static void tell_controller_about_resolved_result(entry_connection_t *conn, int answer_type, size_t answer_len, const char *answer, int ttl, time_t expires) { expires = time(NULL) + ttl; if (answer_type == RESOLVED_TYPE_IPV4 && answer_len >= 4) { char *cp = tor_dup_ip(ntohl(get_uint32(answer))); control_event_address_mapped(conn->socks_request->address, cp, expires, NULL, 0); tor_free(cp); } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) { char *cp = tor_strndup(answer, answer_len); control_event_address_mapped(conn->socks_request->address, cp, expires, NULL, 0); tor_free(cp); } else { control_event_address_mapped(conn->socks_request->address, "<error>", time(NULL)+ttl, "error=yes", 0); } } /** * As connection_ap_handshake_socks_resolved, but take a tor_addr_t to send * as the answer. */ void connection_ap_handshake_socks_resolved_addr(entry_connection_t *conn, const tor_addr_t *answer, int ttl, time_t expires) { if (tor_addr_family(answer) == AF_INET) { uint32_t a = tor_addr_to_ipv4n(answer); /* network order */ connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_IPV4,4, (uint8_t*)&a, ttl, expires); } else if (tor_addr_family(answer) == AF_INET6) { const uint8_t *a = tor_addr_to_in6_addr8(answer); connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_IPV6,16, a, ttl, expires); } else { log_warn(LD_BUG, "Got called with address of unexpected family %d", tor_addr_family(answer)); connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR,0,NULL,-1,-1); } } /** Send an answer to an AP connection that has requested a DNS lookup via * SOCKS. The type should be one of RESOLVED_TYPE_(IPV4|IPV6|HOSTNAME) or -1 * for unreachable; the answer should be in the format specified in the socks * extensions document. <b>ttl</b> is the ttl for the answer, or -1 on * certain errors or for values that didn't come via DNS. <b>expires</b> is * a time when the answer expires, or -1 or TIME_MAX if there's a good TTL. **/ /* XXXX the use of the ttl and expires fields is nutty. Let's make this * interface and those that use it less ugly. */ MOCK_IMPL(void, connection_ap_handshake_socks_resolved,(entry_connection_t *conn, int answer_type, size_t answer_len, const uint8_t *answer, int ttl, time_t expires)) { char buf[384]; size_t replylen; if (ttl >= 0) { if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { tor_addr_t a; tor_addr_from_ipv4n(&a, get_uint32(answer)); if (! tor_addr_is_null(&a)) { client_dns_set_addressmap(conn, conn->socks_request->address, &a, conn->chosen_exit_name, ttl); } } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) { tor_addr_t a; tor_addr_from_ipv6_bytes(&a, (char*)answer); if (! tor_addr_is_null(&a)) { client_dns_set_addressmap(conn, conn->socks_request->address, &a, conn->chosen_exit_name, ttl); } } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) { char *cp = tor_strndup((char*)answer, answer_len); client_dns_set_reverse_addressmap(conn, conn->socks_request->address, cp, conn->chosen_exit_name, ttl); tor_free(cp); } } if (ENTRY_TO_EDGE_CONN(conn)->is_dns_request) { if (conn->dns_server_request) { /* We had a request on our DNS port: answer it. */ dnsserv_resolved(conn, answer_type, answer_len, (char*)answer, ttl); conn->socks_request->has_finished = 1; return; } else { /* This must be a request from the controller. Since answers to those * requests are not cached, they do not generate an ADDRMAP event on * their own. */ tell_controller_about_resolved_result(conn, answer_type, answer_len, (char*)answer, ttl, expires); conn->socks_request->has_finished = 1; return; } /* We shouldn't need to free conn here; it gets marked by the caller. */ } if (conn->socks_request->socks_version == 4) { buf[0] = 0x00; /* version */ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { buf[1] = SOCKS4_GRANTED; set_uint16(buf+2, 0); memcpy(buf+4, answer, 4); /* address */ replylen = SOCKS4_NETWORK_LEN; } else { /* "error" */ buf[1] = SOCKS4_REJECT; memset(buf+2, 0, 6); replylen = SOCKS4_NETWORK_LEN; } } else if (conn->socks_request->socks_version == 5) { /* SOCKS5 */ buf[0] = 0x05; /* version */ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { buf[1] = SOCKS5_SUCCEEDED; buf[2] = 0; /* reserved */ buf[3] = 0x01; /* IPv4 address type */ memcpy(buf+4, answer, 4); /* address */ set_uint16(buf+8, 0); /* port == 0. */ replylen = 10; } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) { buf[1] = SOCKS5_SUCCEEDED; buf[2] = 0; /* reserved */ buf[3] = 0x04; /* IPv6 address type */ memcpy(buf+4, answer, 16); /* address */ set_uint16(buf+20, 0); /* port == 0. */ replylen = 22; } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) { buf[1] = SOCKS5_SUCCEEDED; buf[2] = 0; /* reserved */ buf[3] = 0x03; /* Domainname address type */ buf[4] = (char)answer_len; memcpy(buf+5, answer, answer_len); /* address */ set_uint16(buf+5+answer_len, 0); /* port == 0. */ replylen = 5+answer_len+2; } else { buf[1] = SOCKS5_HOST_UNREACHABLE; memset(buf+2, 0, 8); replylen = 10; } } else { /* no socks version info; don't send anything back */ return; } connection_ap_handshake_socks_reply(conn, buf, replylen, (answer_type == RESOLVED_TYPE_IPV4 || answer_type == RESOLVED_TYPE_IPV6 || answer_type == RESOLVED_TYPE_HOSTNAME) ? 0 : END_STREAM_REASON_RESOLVEFAILED); } /** Send a socks reply to stream <b>conn</b>, using the appropriate * socks version, etc, and mark <b>conn</b> as completed with SOCKS * handshaking. * * If <b>reply</b> is defined, then write <b>replylen</b> bytes of it to conn * and return, else reply based on <b>endreason</b> (one of * END_STREAM_REASON_*). If <b>reply</b> is undefined, <b>endreason</b> can't * be 0 or REASON_DONE. Send endreason to the controller, if appropriate. */ void connection_ap_handshake_socks_reply(entry_connection_t *conn, char *reply, size_t replylen, int endreason) { char buf[256]; socks5_reply_status_t status = stream_end_reason_to_socks5_response(endreason); tor_assert(conn->socks_request); /* make sure it's an AP stream */ if (!SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) { control_event_stream_status(conn, status==SOCKS5_SUCCEEDED ? STREAM_EVENT_SUCCEEDED : STREAM_EVENT_FAILED, endreason); } /* Flag this stream's circuit as having completed a stream successfully * (for path bias) */ if (status == SOCKS5_SUCCEEDED || endreason == END_STREAM_REASON_RESOLVEFAILED || endreason == END_STREAM_REASON_CONNECTREFUSED || endreason == END_STREAM_REASON_CONNRESET || endreason == END_STREAM_REASON_NOROUTE || endreason == END_STREAM_REASON_RESOURCELIMIT) { if (!conn->edge_.on_circuit || !CIRCUIT_IS_ORIGIN(conn->edge_.on_circuit)) { if (endreason != END_STREAM_REASON_RESOLVEFAILED) { log_info(LD_BUG, "No origin circuit for successful SOCKS stream "U64_FORMAT ". Reason: %d", U64_PRINTF_ARG(ENTRY_TO_CONN(conn)->global_identifier), endreason); } /* * Else DNS remaps and failed hidden service lookups can send us * here with END_STREAM_REASON_RESOLVEFAILED; ignore it * * Perhaps we could make the test more precise; we can tell hidden * services by conn->edge_.renddata != NULL; anything analogous for * the DNS remap case? */ } else { // XXX: Hrmm. It looks like optimistic data can't go through this // codepath, but someone should probably test it and make sure. // We don't want to mark optimistically opened streams as successful. pathbias_mark_use_success(TO_ORIGIN_CIRCUIT(conn->edge_.on_circuit)); } } if (conn->socks_request->has_finished) { log_warn(LD_BUG, "(Harmless.) duplicate calls to " "connection_ap_handshake_socks_reply."); return; } if (replylen) { /* we already have a reply in mind */ connection_write_to_buf(reply, replylen, ENTRY_TO_CONN(conn)); conn->socks_request->has_finished = 1; return; } if (conn->socks_request->socks_version == 4) { memset(buf,0,SOCKS4_NETWORK_LEN); buf[1] = (status==SOCKS5_SUCCEEDED ? SOCKS4_GRANTED : SOCKS4_REJECT); /* leave version, destport, destip zero */ connection_write_to_buf(buf, SOCKS4_NETWORK_LEN, ENTRY_TO_CONN(conn)); } else if (conn->socks_request->socks_version == 5) { size_t buf_len; memset(buf,0,sizeof(buf)); if (tor_addr_family(&conn->edge_.base_.addr) == AF_INET) { buf[0] = 5; /* version 5 */ buf[1] = (char)status; buf[2] = 0; buf[3] = 1; /* ipv4 addr */ /* 4 bytes for the header, 2 bytes for the port, 4 for the address. */ buf_len = 10; } else { /* AF_INET6. */ buf[0] = 5; /* version 5 */ buf[1] = (char)status; buf[2] = 0; buf[3] = 4; /* ipv6 addr */ /* 4 bytes for the header, 2 bytes for the port, 16 for the address. */ buf_len = 22; } connection_write_to_buf(buf,buf_len,ENTRY_TO_CONN(conn)); } /* If socks_version isn't 4 or 5, don't send anything. * This can happen in the case of AP bridges. */ conn->socks_request->has_finished = 1; return; } /** Read a RELAY_BEGIN or RELAY_BEGINDIR cell from <b>cell</b>, decode it, and * place the result in <b>bcell</b>. On success return 0; on failure return * <0 and set *<b>end_reason_out</b> to the end reason we should send back to * the client. * * Return -1 in the case where want to send a RELAY_END cell, and < -1 when * we don't. **/ STATIC int begin_cell_parse(const cell_t *cell, begin_cell_t *bcell, uint8_t *end_reason_out) { relay_header_t rh; const uint8_t *body, *nul; memset(bcell, 0, sizeof(*bcell)); *end_reason_out = END_STREAM_REASON_MISC; relay_header_unpack(&rh, cell->payload); if (rh.length > RELAY_PAYLOAD_SIZE) { return -2; /*XXXX why not TORPROTOCOL? */ } bcell->stream_id = rh.stream_id; if (rh.command == RELAY_COMMAND_BEGIN_DIR) { bcell->is_begindir = 1; return 0; } else if (rh.command != RELAY_COMMAND_BEGIN) { log_warn(LD_BUG, "Got an unexpected command %d", (int)rh.command); *end_reason_out = END_STREAM_REASON_INTERNAL; return -1; } body = cell->payload + RELAY_HEADER_SIZE; nul = memchr(body, 0, rh.length); if (! nul) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay begin cell has no \\0. Closing."); *end_reason_out = END_STREAM_REASON_TORPROTOCOL; return -1; } if (tor_addr_port_split(LOG_PROTOCOL_WARN, (char*)(body), &bcell->address,&bcell->port)<0) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Unable to parse addr:port in relay begin cell. Closing."); *end_reason_out = END_STREAM_REASON_TORPROTOCOL; return -1; } if (bcell->port == 0) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Missing port in relay begin cell. Closing."); tor_free(bcell->address); *end_reason_out = END_STREAM_REASON_TORPROTOCOL; return -1; } if (body + rh.length >= nul + 4) bcell->flags = ntohl(get_uint32(nul+1)); return 0; } /** A relay 'begin' or 'begin_dir' cell has arrived, and either we are * an exit hop for the circuit, or we are the origin and it is a * rendezvous begin. * * Launch a new exit connection and initialize things appropriately. * * If it's a rendezvous stream, call connection_exit_connect() on * it. * * For general streams, call dns_resolve() on it first, and only call * connection_exit_connect() if the dns answer is already known. * * Note that we don't call connection_add() on the new stream! We wait * for connection_exit_connect() to do that. * * Return -(some circuit end reason) if we want to tear down <b>circ</b>. * Else return 0. */ int connection_exit_begin_conn(cell_t *cell, circuit_t *circ) { edge_connection_t *n_stream; relay_header_t rh; char *address = NULL; uint16_t port = 0; or_circuit_t *or_circ = NULL; origin_circuit_t *origin_circ = NULL; crypt_path_t *layer_hint = NULL; const or_options_t *options = get_options(); begin_cell_t bcell; int rv; uint8_t end_reason=0; assert_circuit_ok(circ); if (!CIRCUIT_IS_ORIGIN(circ)) { or_circ = TO_OR_CIRCUIT(circ); } else { tor_assert(circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED); origin_circ = TO_ORIGIN_CIRCUIT(circ); layer_hint = origin_circ->cpath->prev; } relay_header_unpack(&rh, cell->payload); if (rh.length > RELAY_PAYLOAD_SIZE) return -END_CIRC_REASON_TORPROTOCOL; /* Note: we have to use relay_send_command_from_edge here, not * connection_edge_end or connection_edge_send_command, since those require * that we have a stream connected to a circuit, and we don't connect to a * circuit until we have a pending/successful resolve. */ if (!server_mode(options) && circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay begin cell at non-server. Closing."); relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_EXITPOLICY, NULL); return 0; } rv = begin_cell_parse(cell, &bcell, &end_reason); if (rv < -1) { return -END_CIRC_REASON_TORPROTOCOL; } else if (rv == -1) { tor_free(bcell.address); relay_send_end_cell_from_edge(rh.stream_id, circ, end_reason, layer_hint); return 0; } if (! bcell.is_begindir) { /* Steal reference */ address = bcell.address; port = bcell.port; if (or_circ && or_circ->p_chan) { if (!options->AllowSingleHopExits && (or_circ->is_first_hop || (!connection_or_digest_is_known_relay( or_circ->p_chan->identity_digest) && should_refuse_unknown_exits(options)))) { /* Don't let clients use us as a single-hop proxy, unless the user * has explicitly allowed that in the config. It attracts attackers * and users who'd be better off with, well, single-hop proxies. */ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Attempt by %s to open a stream %s. Closing.", safe_str(channel_get_canonical_remote_descr(or_circ->p_chan)), or_circ->is_first_hop ? "on first hop of circuit" : "from unknown relay"); relay_send_end_cell_from_edge(rh.stream_id, circ, or_circ->is_first_hop ? END_STREAM_REASON_TORPROTOCOL : END_STREAM_REASON_MISC, NULL); tor_free(address); return 0; } } } else if (rh.command == RELAY_COMMAND_BEGIN_DIR) { if (!directory_permits_begindir_requests(options) || circ->purpose != CIRCUIT_PURPOSE_OR) { relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_NOTDIRECTORY, layer_hint); return 0; } /* Make sure to get the 'real' address of the previous hop: the * caller might want to know whether the remote IP address has changed, * and we might already have corrected base_.addr[ess] for the relay's * canonical IP address. */ if (or_circ && or_circ->p_chan) address = tor_strdup(channel_get_actual_remote_address(or_circ->p_chan)); else address = tor_strdup("127.0.0.1"); port = 1; /* XXXX This value is never actually used anywhere, and there * isn't "really" a connection here. But we * need to set it to something nonzero. */ } else { log_warn(LD_BUG, "Got an unexpected command %d", (int)rh.command); relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_INTERNAL, layer_hint); return 0; } if (! options->IPv6Exit) { /* I don't care if you prefer IPv6; I can't give you any. */ bcell.flags &= ~BEGIN_FLAG_IPV6_PREFERRED; /* If you don't want IPv4, I can't help. */ if (bcell.flags & BEGIN_FLAG_IPV4_NOT_OK) { tor_free(address); relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_EXITPOLICY, layer_hint); return 0; } } log_debug(LD_EXIT,"Creating new exit connection."); /* The 'AF_INET' here is temporary; we might need to change it later in * connection_exit_connect(). */ n_stream = edge_connection_new(CONN_TYPE_EXIT, AF_INET); /* Remember the tunneled request ID in the new edge connection, so that * we can measure download times. */ n_stream->dirreq_id = circ->dirreq_id; n_stream->base_.purpose = EXIT_PURPOSE_CONNECT; n_stream->begincell_flags = bcell.flags; n_stream->stream_id = rh.stream_id; n_stream->base_.port = port; /* leave n_stream->s at -1, because it's not yet valid */ n_stream->package_window = STREAMWINDOW_START; n_stream->deliver_window = STREAMWINDOW_START; if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED) { tor_assert(origin_circ); log_info(LD_REND,"begin is for rendezvous. configuring stream."); n_stream->base_.address = tor_strdup("(rendezvous)"); n_stream->base_.state = EXIT_CONN_STATE_CONNECTING; n_stream->rend_data = rend_data_dup(origin_circ->rend_data); tor_assert(connection_edge_is_rendezvous_stream(n_stream)); assert_circuit_ok(circ); const int r = rend_service_set_connection_addr_port(n_stream, origin_circ); if (r < 0) { log_info(LD_REND,"Didn't find rendezvous service (port %d)", n_stream->base_.port); /* Send back reason DONE because we want to make hidden service port * scanning harder thus instead of returning that the exit policy * didn't match, which makes it obvious that the port is closed, * return DONE and kill the circuit. That way, a user (malicious or * not) needs one circuit per bad port unless it matches the policy of * the hidden service. */ relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_DONE, layer_hint); connection_free(TO_CONN(n_stream)); tor_free(address); /* Drop the circuit here since it might be someone deliberately * scanning the hidden service ports. Note that this mitigates port * scanning by adding more work on the attacker side to successfully * scan but does not fully solve it. */ if (r < -1) return END_CIRC_AT_ORIGIN; else return 0; } assert_circuit_ok(circ); log_debug(LD_REND,"Finished assigning addr/port"); n_stream->cpath_layer = origin_circ->cpath->prev; /* link it */ /* add it into the linked list of p_streams on this circuit */ n_stream->next_stream = origin_circ->p_streams; n_stream->on_circuit = circ; origin_circ->p_streams = n_stream; assert_circuit_ok(circ); origin_circ->rend_data->nr_streams++; connection_exit_connect(n_stream); /* For path bias: This circuit was used successfully */ pathbias_mark_use_success(origin_circ); tor_free(address); return 0; } tor_strlower(address); n_stream->base_.address = address; n_stream->base_.state = EXIT_CONN_STATE_RESOLVEFAILED; /* default to failed, change in dns_resolve if it turns out not to fail */ if (we_are_hibernating()) { relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_HIBERNATING, NULL); connection_free(TO_CONN(n_stream)); return 0; } n_stream->on_circuit = circ; if (rh.command == RELAY_COMMAND_BEGIN_DIR) { tor_addr_t tmp_addr; tor_assert(or_circ); if (or_circ->p_chan && channel_get_addr_if_possible(or_circ->p_chan, &tmp_addr)) { tor_addr_copy(&n_stream->base_.addr, &tmp_addr); } return connection_exit_connect_dir(n_stream); } log_debug(LD_EXIT,"about to start the dns_resolve()."); /* send it off to the gethostbyname farm */ switch (dns_resolve(n_stream)) { case 1: /* resolve worked; now n_stream is attached to circ. */ assert_circuit_ok(circ); log_debug(LD_EXIT,"about to call connection_exit_connect()."); connection_exit_connect(n_stream); return 0; case -1: /* resolve failed */ relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_RESOLVEFAILED, NULL); /* n_stream got freed. don't touch it. */ break; case 0: /* resolve added to pending list */ assert_circuit_ok(circ); break; } return 0; } /** * Called when we receive a RELAY_COMMAND_RESOLVE cell 'cell' along the * circuit <b>circ</b>; * begin resolving the hostname, and (eventually) reply with a RESOLVED cell. */ int connection_exit_begin_resolve(cell_t *cell, or_circuit_t *circ) { edge_connection_t *dummy_conn; relay_header_t rh; assert_circuit_ok(TO_CIRCUIT(circ)); relay_header_unpack(&rh, cell->payload); if (rh.length > RELAY_PAYLOAD_SIZE) return -1; /* This 'dummy_conn' only exists to remember the stream ID * associated with the resolve request; and to make the * implementation of dns.c more uniform. (We really only need to * remember the circuit, the stream ID, and the hostname to be * resolved; but if we didn't store them in a connection like this, * the housekeeping in dns.c would get way more complicated.) */ dummy_conn = edge_connection_new(CONN_TYPE_EXIT, AF_INET); dummy_conn->stream_id = rh.stream_id; dummy_conn->base_.address = tor_strndup( (char*)cell->payload+RELAY_HEADER_SIZE, rh.length); dummy_conn->base_.port = 0; dummy_conn->base_.state = EXIT_CONN_STATE_RESOLVEFAILED; dummy_conn->base_.purpose = EXIT_PURPOSE_RESOLVE; dummy_conn->on_circuit = TO_CIRCUIT(circ); /* send it off to the gethostbyname farm */ switch (dns_resolve(dummy_conn)) { case -1: /* Impossible to resolve; a resolved cell was sent. */ /* Connection freed; don't touch it. */ return 0; case 1: /* The result was cached; a resolved cell was sent. */ if (!dummy_conn->base_.marked_for_close) connection_free(TO_CONN(dummy_conn)); return 0; case 0: /* resolve added to pending list */ assert_circuit_ok(TO_CIRCUIT(circ)); break; } return 0; } /** Helper: Return true and set *<b>why_rejected</b> to an optional clarifying * message message iff we do not allow connections to <b>addr</b>:<b>port</b>. */ static int my_exit_policy_rejects(const tor_addr_t *addr, uint16_t port, const char **why_rejected) { if (router_compare_to_my_exit_policy(addr, port)) { *why_rejected = ""; return 1; } else if (tor_addr_family(addr) == AF_INET6 && !get_options()->IPv6Exit) { *why_rejected = " (IPv6 address without IPv6Exit configured)"; return 1; } return 0; } /** Connect to conn's specified addr and port. If it worked, conn * has now been added to the connection_array. * * Send back a connected cell. Include the resolved IP of the destination * address, but <em>only</em> if it's a general exit stream. (Rendezvous * streams must not reveal what IP they connected to.) */ void connection_exit_connect(edge_connection_t *edge_conn) { const tor_addr_t *addr; uint16_t port; connection_t *conn = TO_CONN(edge_conn); int socket_error = 0, result; const char *why_failed_exit_policy = NULL; /* Apply exit policy to non-rendezvous connections. */ if (! connection_edge_is_rendezvous_stream(edge_conn) && my_exit_policy_rejects(&edge_conn->base_.addr, edge_conn->base_.port, &why_failed_exit_policy)) { if (BUG(!why_failed_exit_policy)) why_failed_exit_policy = ""; log_info(LD_EXIT,"%s:%d failed exit policy%s. Closing.", escaped_safe_str_client(conn->address), conn->port, why_failed_exit_policy); connection_edge_end(edge_conn, END_STREAM_REASON_EXITPOLICY); circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn); connection_free(conn); return; } #ifdef HAVE_SYS_UN_H if (conn->socket_family != AF_UNIX) { #else { #endif /* defined(HAVE_SYS_UN_H) */ addr = &conn->addr; port = conn->port; if (tor_addr_family(addr) == AF_INET6) conn->socket_family = AF_INET6; log_debug(LD_EXIT, "about to try connecting"); result = connection_connect(conn, conn->address, addr, port, &socket_error); #ifdef HAVE_SYS_UN_H } else { /* * In the AF_UNIX case, we expect to have already had conn->port = 1, * tor_addr_make_unspec(conn->addr) (cf. the way we mark in the incoming * case in connection_handle_listener_read()), and conn->address should * have the socket path to connect to. */ tor_assert(conn->address && strlen(conn->address) > 0); log_debug(LD_EXIT, "about to try connecting"); result = connection_connect_unix(conn, conn->address, &socket_error); #endif /* defined(HAVE_SYS_UN_H) */ } switch (result) { case -1: { int reason = errno_to_stream_end_reason(socket_error); connection_edge_end(edge_conn, reason); circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn); connection_free(conn); return; } case 0: conn->state = EXIT_CONN_STATE_CONNECTING; connection_watch_events(conn, READ_EVENT | WRITE_EVENT); /* writable indicates finish; * readable/error indicates broken link in windows-land. */ return; /* case 1: fall through */ } conn->state = EXIT_CONN_STATE_OPEN; if (connection_get_outbuf_len(conn)) { /* in case there are any queued data cells, from e.g. optimistic data */ connection_watch_events(conn, READ_EVENT|WRITE_EVENT); } else { connection_watch_events(conn, READ_EVENT); } /* also, deliver a 'connected' cell back through the circuit. */ if (connection_edge_is_rendezvous_stream(edge_conn)) { /* don't send an address back! */ connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, NULL, 0); } else { /* normal stream */ uint8_t connected_payload[MAX_CONNECTED_CELL_PAYLOAD_LEN]; int connected_payload_len = connected_cell_format_payload(connected_payload, &conn->addr, edge_conn->address_ttl); if (connected_payload_len < 0) { connection_edge_end(edge_conn, END_STREAM_REASON_INTERNAL); circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn); connection_free(conn); return; } connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, (char*)connected_payload, connected_payload_len); } } /** Given an exit conn that should attach to us as a directory server, open a * bridge connection with a linked connection pair, create a new directory * conn, and join them together. Return 0 on success (or if there was an * error we could send back an end cell for). Return -(some circuit end * reason) if the circuit needs to be torn down. Either connects * <b>exitconn</b>, frees it, or marks it, as appropriate. */ static int connection_exit_connect_dir(edge_connection_t *exitconn) { dir_connection_t *dirconn = NULL; or_circuit_t *circ = TO_OR_CIRCUIT(exitconn->on_circuit); log_info(LD_EXIT, "Opening local connection for anonymized directory exit"); exitconn->base_.state = EXIT_CONN_STATE_OPEN; dirconn = dir_connection_new(tor_addr_family(&exitconn->base_.addr)); tor_addr_copy(&dirconn->base_.addr, &exitconn->base_.addr); dirconn->base_.port = 0; dirconn->base_.address = tor_strdup(exitconn->base_.address); dirconn->base_.type = CONN_TYPE_DIR; dirconn->base_.purpose = DIR_PURPOSE_SERVER; dirconn->base_.state = DIR_CONN_STATE_SERVER_COMMAND_WAIT; /* Note that the new dir conn belongs to the same tunneled request as * the edge conn, so that we can measure download times. */ dirconn->dirreq_id = exitconn->dirreq_id; connection_link_connections(TO_CONN(dirconn), TO_CONN(exitconn)); if (connection_add(TO_CONN(exitconn))<0) { connection_edge_end(exitconn, END_STREAM_REASON_RESOURCELIMIT); connection_free(TO_CONN(exitconn)); connection_free(TO_CONN(dirconn)); return 0; } /* link exitconn to circ, now that we know we can use it. */ exitconn->next_stream = circ->n_streams; circ->n_streams = exitconn; if (connection_add(TO_CONN(dirconn))<0) { connection_edge_end(exitconn, END_STREAM_REASON_RESOURCELIMIT); connection_close_immediate(TO_CONN(exitconn)); connection_mark_for_close(TO_CONN(exitconn)); connection_free(TO_CONN(dirconn)); return 0; } connection_start_reading(TO_CONN(dirconn)); connection_start_reading(TO_CONN(exitconn)); if (connection_edge_send_command(exitconn, RELAY_COMMAND_CONNECTED, NULL, 0) < 0) { connection_mark_for_close(TO_CONN(exitconn)); connection_mark_for_close(TO_CONN(dirconn)); return 0; } return 0; } /** Return 1 if <b>conn</b> is a rendezvous stream, or 0 if * it is a general stream. */ int connection_edge_is_rendezvous_stream(const edge_connection_t *conn) { tor_assert(conn); if (conn->rend_data) return 1; return 0; } /** Return 1 if router <b>exit_node</b> is likely to allow stream <b>conn</b> * to exit from it, or 0 if it probably will not allow it. * (We might be uncertain if conn's destination address has not yet been * resolved.) */ int connection_ap_can_use_exit(const entry_connection_t *conn, const node_t *exit_node) { const or_options_t *options = get_options(); tor_assert(conn); tor_assert(conn->socks_request); tor_assert(exit_node); /* If a particular exit node has been requested for the new connection, * make sure the exit node of the existing circuit matches exactly. */ if (conn->chosen_exit_name) { const node_t *chosen_exit = node_get_by_nickname(conn->chosen_exit_name, 1); if (!chosen_exit || tor_memneq(chosen_exit->identity, exit_node->identity, DIGEST_LEN)) { /* doesn't match */ // log_debug(LD_APP,"Requested node '%s', considering node '%s'. No.", // conn->chosen_exit_name, exit->nickname); return 0; } } if (conn->use_begindir) { /* Internal directory fetches do not count as exiting. */ return 1; } if (conn->socks_request->command == SOCKS_COMMAND_CONNECT) { tor_addr_t addr, *addrp = NULL; addr_policy_result_t r; if (0 == tor_addr_parse(&addr, conn->socks_request->address)) { addrp = &addr; } else if (!conn->entry_cfg.ipv4_traffic && conn->entry_cfg.ipv6_traffic) { tor_addr_make_null(&addr, AF_INET6); addrp = &addr; } else if (conn->entry_cfg.ipv4_traffic && !conn->entry_cfg.ipv6_traffic) { tor_addr_make_null(&addr, AF_INET); addrp = &addr; } r = compare_tor_addr_to_node_policy(addrp, conn->socks_request->port, exit_node); if (r == ADDR_POLICY_REJECTED) return 0; /* We know the address, and the exit policy rejects it. */ if (r == ADDR_POLICY_PROBABLY_REJECTED && !conn->chosen_exit_name) return 0; /* We don't know the addr, but the exit policy rejects most * addresses with this port. Since the user didn't ask for * this node, err on the side of caution. */ } else if (SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) { /* Don't send DNS requests to non-exit servers by default. */ if (!conn->chosen_exit_name && node_exit_policy_rejects_all(exit_node)) return 0; } if (routerset_contains_node(options->ExcludeExitNodesUnion_, exit_node)) { /* Not a suitable exit. Refuse it. */ return 0; } return 1; } /** If address is of the form "y.onion" with a well-formed handle y: * Put a NUL after y, lower-case it, and return ONION_HOSTNAME. * * If address is of the form "x.y.onion" with a well-formed handle x: * Drop "x.", put a NUL after y, lower-case it, and return ONION_HOSTNAME. * * If address is of the form "y.onion" with a badly-formed handle y: * Return BAD_HOSTNAME and log a message. * * If address is of the form "y.exit": * Put a NUL after y and return EXIT_HOSTNAME. * * Otherwise: * Return NORMAL_HOSTNAME and change nothing. */ hostname_type_t parse_extended_hostname(char *address) { char *s; char *q; char query[REND_SERVICE_ID_LEN_BASE32+1]; s = strrchr(address,'.'); if (!s) return NORMAL_HOSTNAME; /* no dot, thus normal */ if (!strcmp(s+1,"exit")) { *s = 0; /* NUL-terminate it */ return EXIT_HOSTNAME; /* .exit */ } if (strcmp(s+1,"onion")) return NORMAL_HOSTNAME; /* neither .exit nor .onion, thus normal */ /* so it is .onion */ *s = 0; /* NUL-terminate it */ /* locate a 'sub-domain' component, in order to remove it */ q = strrchr(address, '.'); if (q == address) { goto failed; /* reject sub-domain, as DNS does */ } q = (NULL == q) ? address : q + 1; if (strlcpy(query, q, REND_SERVICE_ID_LEN_BASE32+1) >= REND_SERVICE_ID_LEN_BASE32+1) goto failed; if (q != address) { memmove(address, q, strlen(q) + 1 /* also get \0 */); } if (rend_valid_service_id(query)) { return ONION_HOSTNAME; /* success */ } failed: /* otherwise, return to previous state and return 0 */ *s = '.'; log_warn(LD_APP, "Invalid onion hostname %s; rejecting", safe_str_client(address)); return BAD_HOSTNAME; } /** Return true iff the (possibly NULL) <b>alen</b>-byte chunk of memory at * <b>a</b> is equal to the (possibly NULL) <b>blen</b>-byte chunk of memory * at <b>b</b>. */ static int memeq_opt(const char *a, size_t alen, const char *b, size_t blen) { if (a == NULL) { return (b == NULL); } else if (b == NULL) { return 0; } else if (alen != blen) { return 0; } else { return tor_memeq(a, b, alen); } } /** * Return true iff none of the isolation flags and fields in <b>conn</b> * should prevent it from being attached to <b>circ</b>. */ int connection_edge_compatible_with_circuit(const entry_connection_t *conn, const origin_circuit_t *circ) { const uint8_t iso = conn->entry_cfg.isolation_flags; const socks_request_t *sr = conn->socks_request; /* If circ has never been used for an isolated connection, we can * totally use it for this one. */ if (!circ->isolation_values_set) return 1; /* If circ has been used for connections having more than one value * for some field f, it will have the corresponding bit set in * isolation_flags_mixed. If isolation_flags_mixed has any bits * in common with iso, then conn must be isolated from at least * one stream that has been attached to circ. */ if ((iso & circ->isolation_flags_mixed) != 0) { /* For at least one field where conn is isolated, the circuit * already has mixed streams. */ return 0; } if (! conn->original_dest_address) { log_warn(LD_BUG, "Reached connection_edge_compatible_with_circuit without " "having set conn->original_dest_address"); ((entry_connection_t*)conn)->original_dest_address = tor_strdup(conn->socks_request->address); } if ((iso & ISO_STREAM) && (circ->associated_isolated_stream_global_id != ENTRY_TO_CONN(conn)->global_identifier)) return 0; if ((iso & ISO_DESTPORT) && conn->socks_request->port != circ->dest_port) return 0; if ((iso & ISO_DESTADDR) && strcasecmp(conn->original_dest_address, circ->dest_address)) return 0; if ((iso & ISO_SOCKSAUTH) && (! memeq_opt(sr->username, sr->usernamelen, circ->socks_username, circ->socks_username_len) || ! memeq_opt(sr->password, sr->passwordlen, circ->socks_password, circ->socks_password_len))) return 0; if ((iso & ISO_CLIENTPROTO) && (conn->socks_request->listener_type != circ->client_proto_type || conn->socks_request->socks_version != circ->client_proto_socksver)) return 0; if ((iso & ISO_CLIENTADDR) && !tor_addr_eq(&ENTRY_TO_CONN(conn)->addr, &circ->client_addr)) return 0; if ((iso & ISO_SESSIONGRP) && conn->entry_cfg.session_group != circ->session_group) return 0; if ((iso & ISO_NYM_EPOCH) && conn->nym_epoch != circ->nym_epoch) return 0; return 1; } /** * If <b>dry_run</b> is false, update <b>circ</b>'s isolation flags and fields * to reflect having had <b>conn</b> attached to it, and return 0. Otherwise, * if <b>dry_run</b> is true, then make no changes to <b>circ</b>, and return * a bitfield of isolation flags that we would have to set in * isolation_flags_mixed to add <b>conn</b> to <b>circ</b>, or -1 if * <b>circ</b> has had no streams attached to it. */ int connection_edge_update_circuit_isolation(const entry_connection_t *conn, origin_circuit_t *circ, int dry_run) { const socks_request_t *sr = conn->socks_request; if (! conn->original_dest_address) { log_warn(LD_BUG, "Reached connection_update_circuit_isolation without " "having set conn->original_dest_address"); ((entry_connection_t*)conn)->original_dest_address = tor_strdup(conn->socks_request->address); } if (!circ->isolation_values_set) { if (dry_run) return -1; circ->associated_isolated_stream_global_id = ENTRY_TO_CONN(conn)->global_identifier; circ->dest_port = conn->socks_request->port; circ->dest_address = tor_strdup(conn->original_dest_address); circ->client_proto_type = conn->socks_request->listener_type; circ->client_proto_socksver = conn->socks_request->socks_version; tor_addr_copy(&circ->client_addr, &ENTRY_TO_CONN(conn)->addr); circ->session_group = conn->entry_cfg.session_group; circ->nym_epoch = conn->nym_epoch; circ->socks_username = sr->username ? tor_memdup(sr->username, sr->usernamelen) : NULL; circ->socks_password = sr->password ? tor_memdup(sr->password, sr->passwordlen) : NULL; circ->socks_username_len = sr->usernamelen; circ->socks_password_len = sr->passwordlen; circ->isolation_values_set = 1; return 0; } else { uint8_t mixed = 0; if (conn->socks_request->port != circ->dest_port) mixed |= ISO_DESTPORT; if (strcasecmp(conn->original_dest_address, circ->dest_address)) mixed |= ISO_DESTADDR; if (!memeq_opt(sr->username, sr->usernamelen, circ->socks_username, circ->socks_username_len) || !memeq_opt(sr->password, sr->passwordlen, circ->socks_password, circ->socks_password_len)) mixed |= ISO_SOCKSAUTH; if ((conn->socks_request->listener_type != circ->client_proto_type || conn->socks_request->socks_version != circ->client_proto_socksver)) mixed |= ISO_CLIENTPROTO; if (!tor_addr_eq(&ENTRY_TO_CONN(conn)->addr, &circ->client_addr)) mixed |= ISO_CLIENTADDR; if (conn->entry_cfg.session_group != circ->session_group) mixed |= ISO_SESSIONGRP; if (conn->nym_epoch != circ->nym_epoch) mixed |= ISO_NYM_EPOCH; if (dry_run) return mixed; if ((mixed & conn->entry_cfg.isolation_flags) != 0) { log_warn(LD_BUG, "Updating a circuit with seemingly incompatible " "isolation flags."); } circ->isolation_flags_mixed |= mixed; return 0; } } /** * Clear the isolation settings on <b>circ</b>. * * This only works on an open circuit that has never had a stream attached to * it, and whose isolation settings are hypothetical. (We set hypothetical * isolation settings on circuits as we're launching them, so that we * know whether they can handle more streams or whether we need to launch * even more circuits. Once the circuit is open, if it turns out that * we no longer have any streams to attach to it, we clear the isolation flags * and data so that other streams can have a chance.) */ void circuit_clear_isolation(origin_circuit_t *circ) { if (circ->isolation_any_streams_attached) { log_warn(LD_BUG, "Tried to clear the isolation status of a dirty circuit"); return; } if (TO_CIRCUIT(circ)->state != CIRCUIT_STATE_OPEN) { log_warn(LD_BUG, "Tried to clear the isolation status of a non-open " "circuit"); return; } circ->isolation_values_set = 0; circ->isolation_flags_mixed = 0; circ->associated_isolated_stream_global_id = 0; circ->client_proto_type = 0; circ->client_proto_socksver = 0; circ->dest_port = 0; tor_addr_make_unspec(&circ->client_addr); tor_free(circ->dest_address); circ->session_group = -1; circ->nym_epoch = 0; if (circ->socks_username) { memwipe(circ->socks_username, 0x11, circ->socks_username_len); tor_free(circ->socks_username); } if (circ->socks_password) { memwipe(circ->socks_password, 0x05, circ->socks_password_len); tor_free(circ->socks_password); } circ->socks_username_len = circ->socks_password_len = 0; } /** Free all storage held in module-scoped variables for connection_edge.c */ void connection_edge_free_all(void) { untried_pending_connections = 0; smartlist_free(pending_entry_connections); pending_entry_connections = NULL; }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2488_1
crossvul-cpp_data_good_370_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP N N GGGG % % P P NN N G % % PPPP N N N G GG % % P N NN G G % % P N N GGG % % % % % % Read/Write Portable Network Graphics Image Format % % % % Software Design % % Cristy % % Glenn Randers-Pehrson % % November 1997 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/static.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/transform.h" #include "magick/utility.h" #if defined(MAGICKCORE_PNG_DELEGATE) /* Suppress libpng pedantic warnings that were added in * libpng-1.2.41 and libpng-1.4.0. If you are working on * migration to libpng-1.5, remove these defines and then * fix any code that generates warnings. */ /* #define PNG_DEPRECATED Use of this function is deprecated */ /* #define PNG_USE_RESULT The result of this function must be checked */ /* #define PNG_NORETURN This function does not return */ /* #define PNG_ALLOCATED The result of the function is new memory */ /* #define PNG_DEPSTRUCT Access to this struct member is deprecated */ /* PNG_PTR_NORETURN does not work on some platforms, in libpng-1.5.x */ #define PNG_PTR_NORETURN #include "png.h" #include "zlib.h" /* ImageMagick differences */ #define first_scene scene #if PNG_LIBPNG_VER > 10011 /* Optional declarations. Define or undefine them as you like. */ /* #define PNG_DEBUG -- turning this on breaks VisualC compiling */ /* Features under construction. Define these to work on them. */ #undef MNG_OBJECT_BUFFERS #undef MNG_BASI_SUPPORTED #define MNG_COALESCE_LAYERS /* In 5.4.4, this interfered with MMAP'ed files. */ #define MNG_INSERT_LAYERS /* Troublesome, but seem to work as of 5.4.4 */ #if defined(MAGICKCORE_JPEG_DELEGATE) # define JNG_SUPPORTED /* Not finished as of 5.5.2. See "To do" comments. */ #endif #if !defined(RGBColorMatchExact) #define IsPNGColorEqual(color,target) \ (((color).red == (target).red) && \ ((color).green == (target).green) && \ ((color).blue == (target).blue)) #endif /* Table of recognized sRGB ICC profiles */ struct sRGB_info_struct { png_uint_32 len; png_uint_32 crc; png_byte intent; }; const struct sRGB_info_struct sRGB_info[] = { /* ICC v2 perceptual sRGB_IEC61966-2-1_black_scaled.icc */ { 3048, 0x3b8772b9UL, 0}, /* ICC v2 relative sRGB_IEC61966-2-1_no_black_scaling.icc */ { 3052, 0x427ebb21UL, 1}, /* ICC v4 perceptual sRGB_v4_ICC_preference_displayclass.icc */ {60988, 0x306fd8aeUL, 0}, /* ICC v4 perceptual sRGB_v4_ICC_preference.icc perceptual */ {60960, 0xbbef7812UL, 0}, /* HP? sRGB v2 media-relative sRGB_IEC61966-2-1_noBPC.icc */ { 3024, 0x5d5129ceUL, 1}, /* HP-Microsoft sRGB v2 perceptual */ { 3144, 0x182ea552UL, 0}, /* HP-Microsoft sRGB v2 media-relative */ { 3144, 0xf29e526dUL, 1}, /* Facebook's "2012/01/25 03:41:57", 524, "TINYsRGB.icc" */ { 524, 0xd4938c39UL, 0}, /* "2012/11/28 22:35:21", 3212, "Argyll_sRGB.icm") */ { 3212, 0x034af5a1UL, 0}, /* Not recognized */ { 0, 0x00000000UL, 0}, }; /* Macros for left-bit-replication to ensure that pixels * and PixelPackets all have the same image->depth, and for use * in PNG8 quantization. */ /* LBR01: Replicate top bit */ #define LBR01PacketRed(pixelpacket) \ (pixelpacket).red=(ScaleQuantumToChar((pixelpacket).red) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketGreen(pixelpacket) \ (pixelpacket).green=(ScaleQuantumToChar((pixelpacket).green) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketBlue(pixelpacket) \ (pixelpacket).blue=(ScaleQuantumToChar((pixelpacket).blue) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketOpacity(pixelpacket) \ (pixelpacket).opacity=(ScaleQuantumToChar((pixelpacket).opacity) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketRGB(pixelpacket) \ { \ LBR01PacketRed((pixelpacket)); \ LBR01PacketGreen((pixelpacket)); \ LBR01PacketBlue((pixelpacket)); \ } #define LBR01PacketRGBO(pixelpacket) \ { \ LBR01PacketRGB((pixelpacket)); \ LBR01PacketOpacity((pixelpacket)); \ } #define LBR01PixelRed(pixel) \ (SetPixelRed((pixel), \ ScaleQuantumToChar(GetPixelRed((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelGreen(pixel) \ (SetPixelGreen((pixel), \ ScaleQuantumToChar(GetPixelGreen((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelBlue(pixel) \ (SetPixelBlue((pixel), \ ScaleQuantumToChar(GetPixelBlue((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelOpacity(pixel) \ (SetPixelOpacity((pixel), \ ScaleQuantumToChar(GetPixelOpacity((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelRGB(pixel) \ { \ LBR01PixelRed((pixel)); \ LBR01PixelGreen((pixel)); \ LBR01PixelBlue((pixel)); \ } #define LBR01PixelRGBO(pixel) \ { \ LBR01PixelRGB((pixel)); \ LBR01PixelOpacity((pixel)); \ } /* LBR02: Replicate top 2 bits */ #define LBR02PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xc0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xc0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xc0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xc0; \ (pixelpacket).opacity=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketRGB(pixelpacket) \ { \ LBR02PacketRed((pixelpacket)); \ LBR02PacketGreen((pixelpacket)); \ LBR02PacketBlue((pixelpacket)); \ } #define LBR02PacketRGBO(pixelpacket) \ { \ LBR02PacketRGB((pixelpacket)); \ LBR02PacketOpacity((pixelpacket)); \ } #define LBR02PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xc0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xc0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xc0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02Opacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xc0; \ SetPixelOpacity((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelRGB(pixel) \ { \ LBR02PixelRed((pixel)); \ LBR02PixelGreen((pixel)); \ LBR02PixelBlue((pixel)); \ } #define LBR02PixelRGBO(pixel) \ { \ LBR02PixelRGB((pixel)); \ LBR02Opacity((pixel)); \ } /* LBR03: Replicate top 3 bits (only used with opaque pixels during PNG8 quantization) */ #define LBR03PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xe0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xe0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xe0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketRGB(pixelpacket) \ { \ LBR03PacketRed((pixelpacket)); \ LBR03PacketGreen((pixelpacket)); \ LBR03PacketBlue((pixelpacket)); \ } #define LBR03PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xe0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xe0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelBlue(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelBlue((pixel))) \ & 0xe0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelRGB(pixel) \ { \ LBR03PixelRed((pixel)); \ LBR03PixelGreen((pixel)); \ LBR03PixelBlue((pixel)); \ } /* LBR04: Replicate top 4 bits */ #define LBR04PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xf0; \ (pixelpacket).red=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xf0; \ (pixelpacket).green=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xf0; \ (pixelpacket).blue=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xf0; \ (pixelpacket).opacity=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketRGB(pixelpacket) \ { \ LBR04PacketRed((pixelpacket)); \ LBR04PacketGreen((pixelpacket)); \ LBR04PacketBlue((pixelpacket)); \ } #define LBR04PacketRGBO(pixelpacket) \ { \ LBR04PacketRGB((pixelpacket)); \ LBR04PacketOpacity((pixelpacket)); \ } #define LBR04PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xf0; \ SetPixelRed((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xf0; \ SetPixelGreen((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xf0; \ SetPixelBlue((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelOpacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xf0; \ SetPixelOpacity((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelRGB(pixel) \ { \ LBR04PixelRed((pixel)); \ LBR04PixelGreen((pixel)); \ LBR04PixelBlue((pixel)); \ } #define LBR04PixelRGBO(pixel) \ { \ LBR04PixelRGB((pixel)); \ LBR04PixelOpacity((pixel)); \ } /* Establish thread safety. setjmp/longjmp is claimed to be safe on these platforms: setjmp/longjmp is alleged to be unsafe on these platforms: */ #ifdef PNG_SETJMP_SUPPORTED # ifndef IMPNG_SETJMP_IS_THREAD_SAFE # define IMPNG_SETJMP_NOT_THREAD_SAFE # endif # ifdef IMPNG_SETJMP_NOT_THREAD_SAFE static SemaphoreInfo *ping_semaphore = (SemaphoreInfo *) NULL; # endif #endif /* This temporary until I set up malloc'ed object attributes array. Recompile with MNG_MAX_OBJECTS=65536L to avoid this limit but waste more memory. */ #define MNG_MAX_OBJECTS 256 /* If this not defined, spec is interpreted strictly. If it is defined, an attempt will be made to recover from some errors, including o global PLTE too short */ #undef MNG_LOOSE /* Don't try to define PNG_MNG_FEATURES_SUPPORTED here. Make sure it's defined in libpng/pngconf.h, version 1.0.9 or later. It won't work with earlier versions of libpng. From libpng-1.0.3a to libpng-1.0.8, PNG_READ|WRITE_EMPTY_PLTE were used but those have been deprecated in libpng in favor of PNG_MNG_FEATURES_SUPPORTED, so we set them here. PNG_MNG_FEATURES_SUPPORTED is disabled by default in libpng-1.0.9 and will be enabled by default in libpng-1.2.0. */ #ifdef PNG_MNG_FEATURES_SUPPORTED # ifndef PNG_READ_EMPTY_PLTE_SUPPORTED # define PNG_READ_EMPTY_PLTE_SUPPORTED # endif # ifndef PNG_WRITE_EMPTY_PLTE_SUPPORTED # define PNG_WRITE_EMPTY_PLTE_SUPPORTED # endif #endif /* Maximum valid size_t in PNG/MNG chunks is (2^31)-1 This macro is only defined in libpng-1.0.3 and later. Previously it was PNG_MAX_UINT but that was deprecated in libpng-1.2.6 */ #ifndef PNG_UINT_31_MAX #define PNG_UINT_31_MAX (png_uint_32) 0x7fffffffL #endif /* Constant strings for known chunk types. If you need to add a chunk, add a string holding the name here. To make the code more portable, we use ASCII numbers like this, not characters. */ static const png_byte mng_MHDR[5]={ 77, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_BACK[5]={ 66, 65, 67, 75, (png_byte) '\0'}; static const png_byte mng_BASI[5]={ 66, 65, 83, 73, (png_byte) '\0'}; static const png_byte mng_CLIP[5]={ 67, 76, 73, 80, (png_byte) '\0'}; static const png_byte mng_CLON[5]={ 67, 76, 79, 78, (png_byte) '\0'}; static const png_byte mng_DEFI[5]={ 68, 69, 70, 73, (png_byte) '\0'}; static const png_byte mng_DHDR[5]={ 68, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_DISC[5]={ 68, 73, 83, 67, (png_byte) '\0'}; static const png_byte mng_ENDL[5]={ 69, 78, 68, 76, (png_byte) '\0'}; static const png_byte mng_FRAM[5]={ 70, 82, 65, 77, (png_byte) '\0'}; static const png_byte mng_IEND[5]={ 73, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_IHDR[5]={ 73, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_JHDR[5]={ 74, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_LOOP[5]={ 76, 79, 79, 80, (png_byte) '\0'}; static const png_byte mng_MAGN[5]={ 77, 65, 71, 78, (png_byte) '\0'}; static const png_byte mng_MEND[5]={ 77, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_MOVE[5]={ 77, 79, 86, 69, (png_byte) '\0'}; static const png_byte mng_PAST[5]={ 80, 65, 83, 84, (png_byte) '\0'}; static const png_byte mng_PLTE[5]={ 80, 76, 84, 69, (png_byte) '\0'}; static const png_byte mng_SAVE[5]={ 83, 65, 86, 69, (png_byte) '\0'}; static const png_byte mng_SEEK[5]={ 83, 69, 69, 75, (png_byte) '\0'}; static const png_byte mng_SHOW[5]={ 83, 72, 79, 87, (png_byte) '\0'}; static const png_byte mng_TERM[5]={ 84, 69, 82, 77, (png_byte) '\0'}; static const png_byte mng_bKGD[5]={ 98, 75, 71, 68, (png_byte) '\0'}; static const png_byte mng_caNv[5]={ 99, 97, 78, 118, (png_byte) '\0'}; static const png_byte mng_cHRM[5]={ 99, 72, 82, 77, (png_byte) '\0'}; static const png_byte mng_eXIf[5]={101, 88, 73, 102, (png_byte) '\0'}; static const png_byte mng_gAMA[5]={103, 65, 77, 65, (png_byte) '\0'}; static const png_byte mng_iCCP[5]={105, 67, 67, 80, (png_byte) '\0'}; static const png_byte mng_nEED[5]={110, 69, 69, 68, (png_byte) '\0'}; static const png_byte mng_pHYg[5]={112, 72, 89, 103, (png_byte) '\0'}; static const png_byte mng_vpAg[5]={118, 112, 65, 103, (png_byte) '\0'}; static const png_byte mng_pHYs[5]={112, 72, 89, 115, (png_byte) '\0'}; static const png_byte mng_sBIT[5]={115, 66, 73, 84, (png_byte) '\0'}; static const png_byte mng_sRGB[5]={115, 82, 71, 66, (png_byte) '\0'}; static const png_byte mng_tRNS[5]={116, 82, 78, 83, (png_byte) '\0'}; #if defined(JNG_SUPPORTED) static const png_byte mng_IDAT[5]={ 73, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAT[5]={ 74, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAA[5]={ 74, 68, 65, 65, (png_byte) '\0'}; static const png_byte mng_JdAA[5]={ 74, 100, 65, 65, (png_byte) '\0'}; static const png_byte mng_JSEP[5]={ 74, 83, 69, 80, (png_byte) '\0'}; static const png_byte mng_oFFs[5]={111, 70, 70, 115, (png_byte) '\0'}; #endif #if 0 /* Other known chunks that are not yet supported by ImageMagick: */ static const png_byte mng_hIST[5]={104, 73, 83, 84, (png_byte) '\0'}; static const png_byte mng_iTXt[5]={105, 84, 88, 116, (png_byte) '\0'}; static const png_byte mng_sPLT[5]={115, 80, 76, 84, (png_byte) '\0'}; static const png_byte mng_sTER[5]={115, 84, 69, 82, (png_byte) '\0'}; static const png_byte mng_tEXt[5]={116, 69, 88, 116, (png_byte) '\0'}; static const png_byte mng_tIME[5]={116, 73, 77, 69, (png_byte) '\0'}; static const png_byte mng_zTXt[5]={122, 84, 88, 116, (png_byte) '\0'}; #endif typedef struct _MngBox { long left, right, top, bottom; } MngBox; typedef struct _MngPair { volatile long a, b; } MngPair; #ifdef MNG_OBJECT_BUFFERS typedef struct _MngBuffer { size_t height, width; Image *image; png_color plte[256]; int reference_count; unsigned char alpha_sample_depth, compression_method, color_type, concrete, filter_method, frozen, image_type, interlace_method, pixel_sample_depth, plte_length, sample_depth, viewable; } MngBuffer; #endif typedef struct _MngInfo { #ifdef MNG_OBJECT_BUFFERS MngBuffer *ob[MNG_MAX_OBJECTS]; #endif Image * image; RectangleInfo page; int adjoin, #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED bytes_in_read_buffer, found_empty_plte, #endif equal_backgrounds, equal_chrms, equal_gammas, #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) equal_palettes, #endif equal_physs, equal_srgbs, framing_mode, have_global_bkgd, have_global_chrm, have_global_gama, have_global_phys, have_global_sbit, have_global_srgb, have_saved_bkgd_index, have_write_global_chrm, have_write_global_gama, have_write_global_plte, have_write_global_srgb, need_fram, object_id, old_framing_mode, saved_bkgd_index; int new_number_colors; ssize_t image_found, loop_count[256], loop_iteration[256], scenes_found, x_off[MNG_MAX_OBJECTS], y_off[MNG_MAX_OBJECTS]; MngBox clip, frame, image_box, object_clip[MNG_MAX_OBJECTS]; unsigned char /* These flags could be combined into one byte */ exists[MNG_MAX_OBJECTS], frozen[MNG_MAX_OBJECTS], loop_active[256], invisible[MNG_MAX_OBJECTS], viewable[MNG_MAX_OBJECTS]; MagickOffsetType loop_jump[256]; png_colorp global_plte; png_color_8 global_sbit; png_byte #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED read_buffer[8], #endif global_trns[256]; float global_gamma; ChromaticityInfo global_chrm; RenderingIntent global_srgb_intent; unsigned long delay, global_plte_length, global_trns_length, global_x_pixels_per_unit, global_y_pixels_per_unit, mng_width, mng_height, ticks_per_second; MagickBooleanType need_blob; unsigned int IsPalette, global_phys_unit_type, basi_warning, clon_warning, dhdr_warning, jhdr_warning, magn_warning, past_warning, phyg_warning, phys_warning, sbit_warning, show_warning, mng_type, write_mng, write_png_colortype, write_png_depth, write_png_compression_level, write_png_compression_strategy, write_png_compression_filter, write_png8, write_png24, write_png32, write_png48, write_png64; #ifdef MNG_BASI_SUPPORTED unsigned long basi_width, basi_height; unsigned int basi_depth, basi_color_type, basi_compression_method, basi_filter_type, basi_interlace_method, basi_red, basi_green, basi_blue, basi_alpha, basi_viewable; #endif png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; PixelPacket mng_global_bkgd; /* Added at version 6.6.6-7 */ MagickBooleanType ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, ping_exclude_eXIf, ping_exclude_EXIF, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tRNS, ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, /* Added at version 6.8.5-7 */ ping_preserve_iCCP, /* Added at version 6.8.9-9 */ ping_exclude_tIME; } MngInfo; #endif /* VER */ /* Forward declarations. */ static MagickBooleanType WritePNGImage(const ImageInfo *,Image *); static MagickBooleanType WriteMNGImage(const ImageInfo *,Image *); #if defined(JNG_SUPPORTED) static MagickBooleanType WriteJNGImage(const ImageInfo *,Image *); #endif #if PNG_LIBPNG_VER > 10011 #if (MAGICKCORE_QUANTUM_DEPTH >= 16) static MagickBooleanType LosslessReduceDepthOK(Image *image) { /* Reduce bit depth if it can be reduced losslessly from 16+ to 8. * * This is true if the high byte and the next highest byte of * each sample of the image, the colormap, and the background color * are equal to each other. We check this by seeing if the samples * are unchanged when we scale them down to 8 and back up to Quantum. * * We don't use the method GetImageDepth() because it doesn't check * background and doesn't handle PseudoClass specially. */ #define QuantumToCharToQuantumEqQuantum(quantum) \ ((ScaleCharToQuantum((unsigned char) ScaleQuantumToChar(quantum))) == quantum) MagickBooleanType ok_to_reduce=MagickFalse; if (image->depth >= 16) { const PixelPacket *p; ok_to_reduce= QuantumToCharToQuantumEqQuantum(image->background_color.red) && QuantumToCharToQuantumEqQuantum(image->background_color.green) && QuantumToCharToQuantumEqQuantum(image->background_color.blue) ? MagickTrue : MagickFalse; if (ok_to_reduce != MagickFalse && image->storage_class == PseudoClass) { int indx; for (indx=0; indx < (ssize_t) image->colors; indx++) { ok_to_reduce=( QuantumToCharToQuantumEqQuantum( image->colormap[indx].red) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].green) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].blue)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; } } if ((ok_to_reduce != MagickFalse) && (image->storage_class != PseudoClass)) { ssize_t y; register ssize_t x; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) { ok_to_reduce = MagickFalse; break; } for (x=(ssize_t) image->columns-1; x >= 0; x--) { ok_to_reduce= QuantumToCharToQuantumEqQuantum(GetPixelRed(p)) && QuantumToCharToQuantumEqQuantum(GetPixelGreen(p)) && QuantumToCharToQuantumEqQuantum(GetPixelBlue(p)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; p++; } if (x >= 0) break; } } if (ok_to_reduce != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " OK to reduce PNG bit depth to 8 without loss of info"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Not OK to reduce PNG bit depth to 8 without loss of info"); } } return ok_to_reduce; } #endif /* MAGICKCORE_QUANTUM_DEPTH >= 16 */ static const char* PngColorTypeToString(const unsigned int color_type) { const char *result = "Unknown"; switch (color_type) { case PNG_COLOR_TYPE_GRAY: result = "Gray"; break; case PNG_COLOR_TYPE_GRAY_ALPHA: result = "Gray+Alpha"; break; case PNG_COLOR_TYPE_PALETTE: result = "Palette"; break; case PNG_COLOR_TYPE_RGB: result = "RGB"; break; case PNG_COLOR_TYPE_RGB_ALPHA: result = "RGB+Alpha"; break; } return result; } static int Magick_RenderingIntent_to_PNG_RenderingIntent(const RenderingIntent intent) { switch (intent) { case PerceptualIntent: return 0; case RelativeIntent: return 1; case SaturationIntent: return 2; case AbsoluteIntent: return 3; default: return -1; } } static RenderingIntent Magick_RenderingIntent_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return PerceptualIntent; case 1: return RelativeIntent; case 2: return SaturationIntent; case 3: return AbsoluteIntent; default: return UndefinedIntent; } } static const char * Magick_RenderingIntentString_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return "Perceptual Intent"; case 1: return "Relative Intent"; case 2: return "Saturation Intent"; case 3: return "Absolute Intent"; default: return "Undefined Intent"; } } static const char * Magick_ColorType_from_PNG_ColorType(const int ping_colortype) { switch (ping_colortype) { case 0: return "Grayscale"; case 2: return "Truecolor"; case 3: return "Indexed"; case 4: return "GrayAlpha"; case 6: return "RGBA"; default: return "UndefinedColorType"; } } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* MAGICKCORE_PNG_DELEGATE */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMNG() returns MagickTrue if the image format type, identified by the % magick string, is MNG. % % The format of the IsMNG method is: % % MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\212MNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJNG() returns MagickTrue if the image format type, identified by the % magick string, is JNG. % % The format of the IsJNG method is: % % MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\213JNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPNG() returns MagickTrue if the image format type, identified by the % magick string, is PNG. % % The format of the IsPNG method is: % % MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\211PNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_PNG_DELEGATE) #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #if (PNG_LIBPNG_VER > 10011) static size_t WriteBlobMSBULong(Image *image,const size_t value) { unsigned char buffer[4]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; return((size_t) WriteBlob(image,4,buffer)); } static void PNGLong(png_bytep p,png_uint_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGsLong(png_bytep p,png_int_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGShort(png_bytep p,png_uint_16 value) { *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGType(png_bytep p,const png_byte *type) { (void) memcpy(p,type,4*sizeof(png_byte)); } static void LogPNGChunk(MagickBooleanType logging, const png_byte *type, size_t length) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing %c%c%c%c chunk, length: %.20g", type[0],type[1],type[2],type[3],(double) length); } #endif /* PNG_LIBPNG_VER > 10011 */ #if defined(__cplusplus) || defined(c_plusplus) } #endif #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPNGImage() reads a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image or set of images. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadPNGImage method is: % % Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % % To do, more or less in chronological order (as of version 5.5.2, % November 26, 2002 -- glennrp -- see also "To do" under WriteMNGImage): % % Get 16-bit cheap transparency working. % % (At this point, PNG decoding is supposed to be in full MNG-LC compliance) % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % (At this point, PNG encoding should be in full MNG compliance) % % Provide options for choice of background to use when the MNG BACK % chunk is not present or is not mandatory (i.e., leave transparent, % user specified, MNG BACK, PNG bKGD) % % Implement LOOP/ENDL [done, but could do discretionary loops more % efficiently by linking in the duplicate frames.]. % % Decode and act on the MHDR simplicity profile (offer option to reject % files or attempt to process them anyway when the profile isn't LC or VLC). % % Upgrade to full MNG without Delta-PNG. % % o BACK [done a while ago except for background image ID] % o MOVE [done 15 May 1999] % o CLIP [done 15 May 1999] % o DISC [done 19 May 1999] % o SAVE [partially done 19 May 1999 (marks objects frozen)] % o SEEK [partially done 19 May 1999 (discard function only)] % o SHOW % o PAST % o BASI % o MNG-level tEXt/iTXt/zTXt % o pHYg % o pHYs % o sBIT % o bKGD % o iTXt (wait for libpng implementation). % % Use the scene signature to discover when an identical scene is % being reused, and just point to the original image->exception instead % of storing another set of pixels. This not specific to MNG % but could be applied generally. % % Upgrade to full MNG with Delta-PNG. % % JNG tEXt/iTXt/zTXt % % We will not attempt to read files containing the CgBI chunk. % They are really Xcode files meant for display on the iPhone. % These are not valid PNG files and it is impossible to recover % the original PNG from files that have been converted to Xcode-PNG, % since irretrievable loss of color data has occurred due to the % use of premultiplied alpha. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* This the function that does the actual reading of data. It is the same as the one supplied in libpng, except that it receives the datastream from the ReadBlob() function instead of standard input. */ static void png_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) ReadBlob(image,(size_t) length,data); if (check != length) { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent, "Expected %.20g bytes; found %.20g bytes",(double) length, (double) check); png_warning(png_ptr,msg); png_error(png_ptr,"Read Exception"); } } } #if !defined(PNG_READ_EMPTY_PLTE_SUPPORTED) && \ !defined(PNG_MNG_FEATURES_SUPPORTED) /* We use mng_get_data() instead of png_get_data() if we have a libpng * older than libpng-1.0.3a, which was the first to allow the empty * PLTE, or a newer libpng in which PNG_MNG_FEATURES_SUPPORTED was * ifdef'ed out. Earlier versions would crash if the bKGD chunk was * encountered after an empty PLTE, so we have to look ahead for bKGD * chunks and remove them from the datastream that is passed to libpng, * and store their contents for later use. */ static void mng_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { MngInfo *mng_info; Image *image; png_size_t check; register ssize_t i; i=0; mng_info=(MngInfo *) png_get_io_ptr(png_ptr); image=(Image *) mng_info->image; while (mng_info->bytes_in_read_buffer && length) { data[i]=mng_info->read_buffer[i]; mng_info->bytes_in_read_buffer--; length--; i++; } if (length != 0) { check=(png_size_t) ReadBlob(image,(size_t) length,(char *) data); if (check != length) png_error(png_ptr,"Read Exception"); if (length == 4) { if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 0)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_PLTE,4) == 0) mng_info->found_empty_plte=MagickTrue; if (memcmp(mng_info->read_buffer,mng_IEND,4) == 0) { mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; } } if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 1)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_bKGD,4) == 0) if (mng_info->found_empty_plte) { /* Skip the bKGD data byte and CRC. */ check=(png_size_t) ReadBlob(image,5,(char *) mng_info->read_buffer); check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->saved_bkgd_index=mng_info->read_buffer[0]; mng_info->have_saved_bkgd_index=MagickTrue; mng_info->bytes_in_read_buffer=0; } } } } } #endif static void png_put_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) WriteBlob(image,(size_t) length,data); if (check != length) png_error(png_ptr,"WriteBlob Failed"); } } static void png_flush_data(png_structp png_ptr) { (void) png_ptr; } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED static int PalettesAreEqual(Image *a,Image *b) { ssize_t i; if ((a == (Image *) NULL) || (b == (Image *) NULL)) return((int) MagickFalse); if (a->storage_class != PseudoClass || b->storage_class != PseudoClass) return((int) MagickFalse); if (a->colors != b->colors) return((int) MagickFalse); for (i=0; i < (ssize_t) a->colors; i++) { if ((a->colormap[i].red != b->colormap[i].red) || (a->colormap[i].green != b->colormap[i].green) || (a->colormap[i].blue != b->colormap[i].blue)) return((int) MagickFalse); } return((int) MagickTrue); } #endif static void MngInfoDiscardObject(MngInfo *mng_info,int i) { if (i && (i < MNG_MAX_OBJECTS) && (mng_info != (MngInfo *) NULL) && mng_info->exists[i] && !mng_info->frozen[i]) { #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) { if (mng_info->ob[i]->reference_count > 0) mng_info->ob[i]->reference_count--; if (mng_info->ob[i]->reference_count == 0) { if (mng_info->ob[i]->image != (Image *) NULL) mng_info->ob[i]->image=DestroyImage(mng_info->ob[i]->image); mng_info->ob[i]=DestroyString(mng_info->ob[i]); } } mng_info->ob[i]=(MngBuffer *) NULL; #endif mng_info->exists[i]=MagickFalse; mng_info->invisible[i]=MagickFalse; mng_info->viewable[i]=MagickFalse; mng_info->frozen[i]=MagickFalse; mng_info->x_off[i]=0; mng_info->y_off[i]=0; mng_info->object_clip[i].left=0; mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].top=0; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } } static MngInfo *MngInfoFreeStruct(MngInfo *mng_info) { register ssize_t i; if (mng_info == (MngInfo *) NULL) return((MngInfo *) NULL); for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); mng_info->global_plte=(png_colorp) RelinquishMagickMemory(mng_info->global_plte); return((MngInfo *) RelinquishMagickMemory(mng_info)); } static long mng_get_long(unsigned char *p) { return ((long) (((png_uint_32) p[0] << 24) | ((png_uint_32) p[1] << 16) | ((png_uint_32) p[2] << 8) | (png_uint_32) p[3])); } static MngBox mng_minimum_box(MngBox box1,MngBox box2) { MngBox box; box=box1; if (box.left < box2.left) box.left=box2.left; if (box.top < box2.top) box.top=box2.top; if (box.right > box2.right) box.right=box2.right; if (box.bottom > box2.bottom) box.bottom=box2.bottom; return box; } static MngBox mng_read_box(MngBox previous_box,char delta_type,unsigned char *p) { MngBox box; /* Read clipping boundaries from DEFI, CLIP, FRAM, or PAST chunk. */ box.left=mng_get_long(p); box.right=mng_get_long(&p[4]); box.top=mng_get_long(&p[8]); box.bottom=mng_get_long(&p[12]); if (delta_type != 0) { box.left+=previous_box.left; box.right+=previous_box.right; box.top+=previous_box.top; box.bottom+=previous_box.bottom; } return(box); } static MngPair mng_read_pair(MngPair previous_pair,int delta_type, unsigned char *p) { MngPair pair; /* Read two ssize_t's from CLON, MOVE or PAST chunk */ pair.a=mng_get_long(p); pair.b=mng_get_long(&p[4]); if (delta_type != 0) { pair.a+=previous_pair.a; pair.b+=previous_pair.b; } return(pair); } typedef struct _PNGErrorInfo { Image *image; ExceptionInfo *exception; } PNGErrorInfo; static void MagickPNGErrorHandler(png_struct *ping,png_const_charp message) { Image *image; image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s error: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderError, message,"`%s'",image->filename); #if (PNG_LIBPNG_VER < 10500) /* A warning about deprecated use of jmpbuf here is unavoidable if you * are building with libpng-1.4.x and can be ignored. */ longjmp(ping->jmpbuf,1); #else png_longjmp(ping,1); #endif } static void MagickPNGWarningHandler(png_struct *ping,png_const_charp message) { Image *image; if (LocaleCompare(message, "Missing PLTE before tRNS") == 0) png_error(ping, message); image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s warning: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderWarning, message,"`%s'",image->filename); } #ifdef PNG_USER_MEM_SUPPORTED #if PNG_LIBPNG_VER >= 10400 static png_voidp Magick_png_malloc(png_structp png_ptr,png_alloc_size_t size) #else static png_voidp Magick_png_malloc(png_structp png_ptr,png_size_t size) #endif { (void) png_ptr; return((png_voidp) AcquireMagickMemory((size_t) size)); } /* Free a pointer. It is removed from the list at the same time. */ static png_free_ptr Magick_png_free(png_structp png_ptr,png_voidp ptr) { (void) png_ptr; ptr=RelinquishMagickMemory(ptr); return((png_free_ptr) NULL); } #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif static int Magick_png_read_raw_profile(png_struct *ping,Image *image, const ImageInfo *image_info, png_textp text,int ii) { register ssize_t i; register unsigned char *dp; register png_charp sp; size_t extent, length, nibbles; StringInfo *profile; const unsigned char unhex[103]={0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,1, 2,3,4,5,6,7,8,9,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,10,11,12, 13,14,15}; sp=text[ii].text+1; extent=text[ii].text_length; /* look for newline */ while ((*sp != '\n') && extent--) sp++; /* look for length */ while (((*sp == '\0' || *sp == ' ' || *sp == '\n')) && extent--) sp++; if (extent == 0) { png_warning(ping,"missing profile length"); return(MagickFalse); } length=StringToLong(sp); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu",(unsigned long) length); while ((*sp != ' ' && *sp != '\n') && extent--) sp++; if (extent == 0) { png_warning(ping,"invalid profile length"); return(MagickFalse); } /* allocate space */ if (length == 0) { png_warning(ping,"invalid profile length"); return(MagickFalse); } profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { png_warning(ping, "unable to copy profile"); return(MagickFalse); } /* copy profile, skipping white space and column 1 "=" signs */ dp=GetStringInfoDatum(profile); nibbles=length*2; for (i=0; i < (ssize_t) nibbles; i++) { while (*sp < '0' || (*sp > '9' && *sp < 'a') || *sp > 'f') { if (*sp == '\0') { png_warning(ping, "ran out of profile data"); return(MagickFalse); } sp++; } if (i%2 == 0) *dp=(unsigned char) (16*unhex[(int) *sp++]); else (*dp++)+=unhex[(int) *sp++]; } /* We have already read "Raw profile type. */ (void) SetImageProfile(image,&text[ii].key[17],profile); profile=DestroyStringInfo(profile); if (image_info->verbose) (void) printf(" Found a generic profile, type %s\n",&text[ii].key[17]); return MagickTrue; } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) static int read_user_chunk_callback(png_struct *ping, png_unknown_chunkp chunk) { Image *image; /* The unknown chunk structure contains the chunk data: png_byte name[5]; png_byte *data; png_size_t size; Note that libpng has already taken care of the CRC handling. */ LogMagickEvent(CoderEvent,GetMagickModule(), " read_user_chunk: found %c%c%c%c chunk", chunk->name[0],chunk->name[1],chunk->name[2],chunk->name[3]); if (chunk->name[0] == 101 && (chunk->name[1] == 88 || chunk->name[1] == 120 ) && chunk->name[2] == 73 && chunk-> name[3] == 102) { /* process eXIf or exIf chunk */ PNGErrorInfo *error_info; StringInfo *profile; unsigned char *p; png_byte *s; int i; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " recognized eXIf chunk"); image=(Image *) png_get_user_chunk_ptr(ping); error_info=(PNGErrorInfo *) png_get_error_ptr(ping); profile=BlobToStringInfo((const void *) NULL,chunk->size+6); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(error_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1); } p=GetStringInfoDatum(profile); /* Initialize profile with "Exif\0\0" */ *p++ ='E'; *p++ ='x'; *p++ ='i'; *p++ ='f'; *p++ ='\0'; *p++ ='\0'; s=chunk->data; i=0; if (chunk->size > 6) { /* Skip first 6 bytes if "Exif\0\0" is already present by accident */ if (s[0] == 'E' && s[1] == 'x' && s[2] == 'i' && s[3] == 'f' && s[4] == '\0' && s[5] == '\0') { s+=6; i=6; SetStringInfoLength(profile,chunk->size); p=GetStringInfoDatum(profile); } } /* copy chunk->data to profile */ for (; i<chunk->size; i++) *p++ = *s++; (void) SetImageProfile(image,"exif",profile); return(1); } /* vpAg (deprecated, replaced by caNv) */ if (chunk->name[0] == 118 && chunk->name[1] == 112 && chunk->name[2] == 65 && chunk->name[3] == 103) { /* recognized vpAg */ if (chunk->size != 9) return(-1); /* Error return */ if (chunk->data[8] != 0) return(0); /* ImageMagick requires pixel units */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t)mng_get_long(chunk->data); image->page.height=(size_t)mng_get_long(&chunk->data[4]); return(1); } /* caNv */ if (chunk->name[0] == 99 && chunk->name[1] == 97 && chunk->name[2] == 78 && chunk->name[3] == 118) { /* recognized caNv */ if (chunk->size != 16) return(-1); /* Error return */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t)mng_get_long(chunk->data); image->page.height=(size_t)mng_get_long(&chunk->data[4]); image->page.x=(size_t)mng_get_long(&chunk->data[8]); image->page.y=(size_t)mng_get_long(&chunk->data[12]); /* Return one of the following: */ /* return(-n); chunk had an error */ /* return(0); did not recognize */ /* return(n); success */ return(1); } return(0); /* Did not recognize */ } #endif #if defined(PNG_tIME_SUPPORTED) static void read_tIME_chunk(Image *image,png_struct *ping,png_info *info) { png_timep time; if (png_get_tIME(ping,info,&time)) { char timestamp[21]; FormatLocaleString(timestamp,21,"%04d-%02d-%02dT%02d:%02d:%02dZ", time->year,time->month,time->day,time->hour,time->minute,time->second); SetImageProperty(image,"png:tIME",timestamp); } } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOnePNGImage() reads a Portable Network Graphics (PNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ReadOnePNGImage method is: % % Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { /* Read one PNG image */ /* To do: Read the tEXt/Creation Time chunk into the date:create property */ Image *image; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; int intent, /* "PNG Rendering intent", which is ICC intent + 1 */ num_raw_profiles, num_text, num_text_total, num_passes, number_colors, pass, ping_bit_depth, ping_color_type, ping_file_depth, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans, unit_type; double file_gamma; LongPixelPacket transparent_color; MagickBooleanType logging, ping_found_cHRM, ping_found_gAMA, ping_found_iCCP, ping_found_sRGB, ping_found_sRGB_cHRM, ping_preserve_iCCP, status; MemoryInfo *volatile pixel_info; png_bytep ping_trans_alpha; png_color_16p ping_background, ping_trans_color; png_info *end_info, *ping_info; png_struct *ping; png_textp text; png_uint_32 ping_height, ping_width, x_resolution, y_resolution; ssize_t ping_rowbytes, y; register unsigned char *p; register IndexPacket *indexes; register ssize_t i, x; register PixelPacket *q; size_t length, row_offset; Quantum *volatile quantum_scanline; QuantumInfo *volatile quantum_info; ssize_t j; unsigned char *ping_pixels; #ifdef PNG_UNKNOWN_CHUNKS_SUPPORTED png_byte unused_chunks[]= { 104, 73, 83, 84, (png_byte) '\0', /* hIST */ 105, 84, 88, 116, (png_byte) '\0', /* iTXt */ 112, 67, 65, 76, (png_byte) '\0', /* pCAL */ 115, 67, 65, 76, (png_byte) '\0', /* sCAL */ 115, 80, 76, 84, (png_byte) '\0', /* sPLT */ #if !defined(PNG_tIME_SUPPORTED) 116, 73, 77, 69, (png_byte) '\0', /* tIME */ #endif #ifdef PNG_APNG_SUPPORTED /* libpng was built with APNG patch; */ /* ignore the APNG chunks */ 97, 99, 84, 76, (png_byte) '\0', /* acTL */ 102, 99, 84, 76, (png_byte) '\0', /* fcTL */ 102, 100, 65, 84, (png_byte) '\0', /* fdAT */ #endif }; #endif /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,32); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,32); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOnePNGImage()\n" " IM version = %s\n" " Libpng version = %s", im_vers, libpng_vers); if (logging != MagickFalse) { if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", zlib_runv); } } #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif #if (PNG_LIBPNG_VER >= 10400) # ifndef PNG_TRANSFORM_GRAY_TO_RGB /* Added at libpng-1.4.0beta67 */ if (image_info->verbose) { printf("Your PNG library (libpng-%s) is an old beta version.\n", PNG_LIBPNG_VER_STRING); printf("Please update it.\n"); } # endif #endif image=mng_info->image; if (logging != MagickFalse) { (void)LogMagickEvent(CoderEvent,GetMagickModule(), " Before reading:\n" " image->matte=%d\n" " image->rendering_intent=%d\n" " image->colorspace=%d\n" " image->gamma=%f", (int) image->matte, (int) image->rendering_intent, (int) image->colorspace, image->gamma); } intent=Magick_RenderingIntent_to_PNG_RenderingIntent(image->rendering_intent); /* Set to an out-of-range color unless tRNS chunk is present */ transparent_color.red=65537; transparent_color.green=65537; transparent_color.blue=65537; transparent_color.opacity=65537; number_colors=0; num_text = 0; num_text_total = 0; num_raw_profiles = 0; ping_found_cHRM = MagickFalse; ping_found_gAMA = MagickFalse; ping_found_iCCP = MagickFalse; ping_found_sRGB = MagickFalse; ping_found_sRGB_cHRM = MagickFalse; ping_preserve_iCCP = MagickFalse; /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_read_struct_2(PNG_LIBPNG_VER_STRING, image, MagickPNGErrorHandler,MagickPNGWarningHandler, NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_read_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_read_struct(&ping,(png_info **) NULL,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } end_info=png_create_info_struct(ping); if (end_info == (png_info *) NULL) { png_destroy_read_struct(&ping,&ping_info,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixel_info=(MemoryInfo *) NULL; quantum_scanline = (Quantum *) NULL; quantum_info = (QuantumInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG image is corrupt. */ png_destroy_read_struct(&ping,&ping_info,&end_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() with error."); if (image != (Image *) NULL) { InheritException(exception,&image->exception); image=DestroyImageList(image); } return(image); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for reading. */ mng_info->image_found++; png_set_sig_bytes(ping,8); if (LocaleCompare(image_info->magick,"MNG") == 0) { #if defined(PNG_MNG_FEATURES_SUPPORTED) (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); png_set_read_fn(ping,image,png_get_data); #else #if defined(PNG_READ_EMPTY_PLTE_SUPPORTED) png_permit_empty_plte(ping,MagickTrue); png_set_read_fn(ping,image,png_get_data); #else mng_info->image=image; mng_info->bytes_in_read_buffer=0; mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; png_set_read_fn(ping,mng_info,mng_get_data); #endif #endif } else png_set_read_fn(ping,image,png_get_data); { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",value) == MagickFalse) { value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) ping_preserve_iCCP=MagickTrue; #if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) /* Don't let libpng check for ICC/sRGB profile because we're going * to do that anyway. This feature was added at libpng-1.6.12. * If logging, go ahead and check and issue a warning as appropriate. */ if (logging == MagickFalse) png_set_option(ping, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) else { /* Ignore the iCCP chunk */ png_set_keep_unknown_chunks(ping, 1, (png_bytep)mng_iCCP, 1); } #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) /* Ignore unused chunks and all unknown chunks except for caNv and vpAg */ # if PNG_LIBPNG_VER < 10700 /* Avoid libpng16 warning */ png_set_keep_unknown_chunks(ping, 2, (png_bytep)NULL, 0); # else png_set_keep_unknown_chunks(ping, 1, (png_bytep)NULL, 0); # endif png_set_keep_unknown_chunks(ping, 2, (png_bytep)mng_caNv, 1); png_set_keep_unknown_chunks(ping, 2, (png_bytep)mng_vpAg, 1); png_set_keep_unknown_chunks(ping, 1, unused_chunks, (int)sizeof(unused_chunks)/5); /* Callback for other unknown chunks */ png_set_read_user_chunk_fn(ping, image, read_user_chunk_callback); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED #if (PNG_LIBPNG_VER >= 10400) /* Limit the size of the chunk storage cache used for sPLT, text, * and unknown chunks. */ png_set_chunk_cache_max(ping, 32767); #endif #endif #ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature */ png_set_check_for_invalid_index (ping, 0); #endif #if (PNG_LIBPNG_VER < 10400) # if defined(PNG_USE_PNGGCCRD) && defined(PNG_ASSEMBLER_CODE_SUPPORTED) && \ (PNG_LIBPNG_VER >= 10200) && (PNG_LIBPNG_VER < 10220) && defined(__i386__) /* Disable thread-unsafe features of pnggccrd */ if (png_access_version_number() >= 10200) { png_uint_32 mmx_disable_mask=0; png_uint_32 asm_flags; mmx_disable_mask |= ( PNG_ASM_FLAG_MMX_READ_COMBINE_ROW \ | PNG_ASM_FLAG_MMX_READ_FILTER_SUB \ | PNG_ASM_FLAG_MMX_READ_FILTER_AVG \ | PNG_ASM_FLAG_MMX_READ_FILTER_PAETH ); asm_flags=png_get_asm_flags(ping); png_set_asm_flags(ping, asm_flags & ~mmx_disable_mask); } # endif #endif png_read_info(ping,ping_info); /* Read and check IHDR chunk data */ png_get_IHDR(ping,ping_info,&ping_width,&ping_height, &ping_bit_depth,&ping_color_type, &ping_interlace_method,&ping_compression_method, &ping_filter_method); ping_file_depth = ping_bit_depth; /* Swap bytes if requested */ if (ping_file_depth == 16) { const char *value; value=GetImageOption(image_info,"png:swap-bytes"); if (value == NULL) value=GetImageArtifact(image,"png:swap-bytes"); if (value != NULL) png_set_swap(ping); } /* Save bit-depth and color-type in case we later want to write a PNG00 */ { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_color_type); (void) SetImageProperty(image,"png:IHDR.color-type-orig",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_bit_depth); (void) SetImageProperty(image,"png:IHDR.bit-depth-orig",msg); } (void) png_get_tRNS(ping, ping_info, &ping_trans_alpha, &ping_num_trans, &ping_trans_color); (void) png_get_bKGD(ping, ping_info, &ping_background); if (ping_bit_depth < 8) { png_set_packing(ping); ping_bit_depth = 8; } image->depth=ping_bit_depth; image->depth=GetImageQuantumDepth(image,MagickFalse); image->interlace=ping_interlace_method != 0 ? PNGInterlace : NoInterlace; if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { image->rendering_intent=UndefinedIntent; intent=Magick_RenderingIntent_to_PNG_RenderingIntent(UndefinedIntent); (void) memset(&image->chromaticity,0, sizeof(image->chromaticity)); } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG width: %.20g, height: %.20g\n" " PNG color_type: %d, bit_depth: %d\n" " PNG compression_method: %d\n" " PNG interlace_method: %d, filter_method: %d", (double) ping_width, (double) ping_height, ping_color_type, ping_bit_depth, ping_compression_method, ping_interlace_method,ping_filter_method); } if (png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_gAMA)) { ping_found_gAMA=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG gAMA chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { ping_found_cHRM=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG cHRM chunk."); } if (ping_found_iCCP != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { ping_found_sRGB=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG sRGB chunk."); } #ifdef PNG_READ_iCCP_SUPPORTED if (ping_found_iCCP !=MagickTrue && ping_found_sRGB != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_iCCP)) { int compression; #if (PNG_LIBPNG_VER < 10500) png_charp info; #else png_bytep info; #endif png_charp name; png_uint_32 profile_length; (void) png_get_iCCP(ping,ping_info,&name,(int *) &compression,&info, &profile_length); if (profile_length != 0) { StringInfo *profile; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG iCCP chunk."); profile=BlobToStringInfo(info,profile_length); if (profile == (StringInfo *) NULL) { png_warning(ping, "ICC profile is NULL"); profile=DestroyStringInfo(profile); } else { if (ping_preserve_iCCP == MagickFalse) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } break; } } } if (sRGB_info[icheck].len == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); (void) SetImageProfile(image,"icc",profile); } } else /* Preserve-iCCP */ { (void) SetImageProfile(image,"icc",profile); } profile=DestroyStringInfo(profile); } } } #endif #if defined(PNG_READ_sRGB_SUPPORTED) { if (ping_found_iCCP==MagickFalse && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { if (png_get_sRGB(ping,ping_info,&intent)) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (intent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG sRGB chunk: rendering_intent: %d",intent); } } else if (mng_info->have_global_srgb) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (mng_info->global_srgb_intent); } } #endif { if (!png_get_gAMA(ping,ping_info,&file_gamma)) if (mng_info->have_global_gama) png_set_gAMA(ping,ping_info,mng_info->global_gamma); if (png_get_gAMA(ping,ping_info,&file_gamma)) { image->gamma=(float) file_gamma; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG gAMA chunk: gamma: %f",file_gamma); } } if (!png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { if (mng_info->have_global_chrm != MagickFalse) { (void) png_set_cHRM(ping,ping_info, mng_info->global_chrm.white_point.x, mng_info->global_chrm.white_point.y, mng_info->global_chrm.red_primary.x, mng_info->global_chrm.red_primary.y, mng_info->global_chrm.green_primary.x, mng_info->global_chrm.green_primary.y, mng_info->global_chrm.blue_primary.x, mng_info->global_chrm.blue_primary.y); } } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { (void) png_get_cHRM(ping,ping_info, &image->chromaticity.white_point.x, &image->chromaticity.white_point.y, &image->chromaticity.red_primary.x, &image->chromaticity.red_primary.y, &image->chromaticity.green_primary.x, &image->chromaticity.green_primary.y, &image->chromaticity.blue_primary.x, &image->chromaticity.blue_primary.y); ping_found_cHRM=MagickTrue; if (image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f) ping_found_sRGB_cHRM=MagickTrue; } if (image->rendering_intent != UndefinedIntent) { if (ping_found_sRGB != MagickTrue && (ping_found_gAMA != MagickTrue || (image->gamma > .45 && image->gamma < .46)) && (ping_found_cHRM != MagickTrue || ping_found_sRGB_cHRM != MagickFalse) && ping_found_iCCP != MagickTrue) { png_set_sRGB(ping,ping_info, Magick_RenderingIntent_to_PNG_RenderingIntent (image->rendering_intent)); file_gamma=1.000f/2.200f; ping_found_sRGB=MagickTrue; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting sRGB as if in input"); } } #if defined(PNG_oFFs_SUPPORTED) if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { image->page.x=(ssize_t) png_get_x_offset_pixels(ping, ping_info); image->page.y=(ssize_t) png_get_y_offset_pixels(ping, ping_info); if (logging != MagickFalse) if (image->page.x || image->page.y) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG oFFs chunk: x: %.20g, y: %.20g.",(double) image->page.x,(double) image->page.y); } #endif #if defined(PNG_pHYs_SUPPORTED) if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { if (mng_info->have_global_phys) { png_set_pHYs(ping,ping_info, mng_info->global_x_pixels_per_unit, mng_info->global_y_pixels_per_unit, mng_info->global_phys_unit_type); } } x_resolution=0; y_resolution=0; unit_type=0; if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { /* Set image resolution. */ (void) png_get_pHYs(ping,ping_info,&x_resolution,&y_resolution, &unit_type); image->x_resolution=(double) x_resolution; image->y_resolution=(double) y_resolution; if (unit_type == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=(double) x_resolution/100.0; image->y_resolution=(double) y_resolution/100.0; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) x_resolution,(double) y_resolution,unit_type); } #endif if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); if ((number_colors == 0) && ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)) { if (mng_info->global_plte_length) { png_set_PLTE(ping,ping_info,mng_info->global_plte, (int) mng_info->global_plte_length); if (!png_get_valid(ping,ping_info,PNG_INFO_tRNS)) if (mng_info->global_trns_length) { if (mng_info->global_trns_length > mng_info->global_plte_length) { png_warning(ping, "global tRNS has more entries than global PLTE"); } else { png_set_tRNS(ping,ping_info,mng_info->global_trns, (int) mng_info->global_trns_length,NULL); } } #ifdef PNG_READ_bKGD_SUPPORTED if ( #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED mng_info->have_saved_bkgd_index || #endif png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { png_color_16 background; #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED if (mng_info->have_saved_bkgd_index) background.index=mng_info->saved_bkgd_index; #endif if (png_get_valid(ping, ping_info, PNG_INFO_bKGD)) background.index=ping_background->index; background.red=(png_uint_16) mng_info->global_plte[background.index].red; background.green=(png_uint_16) mng_info->global_plte[background.index].green; background.blue=(png_uint_16) mng_info->global_plte[background.index].blue; background.gray=(png_uint_16) mng_info->global_plte[background.index].green; png_set_bKGD(ping,ping_info,&background); } #endif } else png_error(ping,"No global PLTE in file"); } } #ifdef PNG_READ_bKGD_SUPPORTED if (mng_info->have_global_bkgd && (!png_get_valid(ping,ping_info,PNG_INFO_bKGD))) image->background_color=mng_info->mng_global_bkgd; if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { unsigned int bkgd_scale; /* Set image background color. * Scale background components to 16-bit, then scale * to quantum depth */ bkgd_scale = 1; if (ping_file_depth == 1) bkgd_scale = 255; else if (ping_file_depth == 2) bkgd_scale = 85; else if (ping_file_depth == 4) bkgd_scale = 17; if (ping_file_depth <= 8) bkgd_scale *= 257; ping_background->red *= bkgd_scale; ping_background->green *= bkgd_scale; ping_background->blue *= bkgd_scale; if (logging != MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG bKGD chunk, raw ping_background=(%d,%d,%d).\n" " bkgd_scale=%d. ping_background=(%d,%d,%d).", ping_background->red,ping_background->green, ping_background->blue, bkgd_scale,ping_background->red, ping_background->green,ping_background->blue); } image->background_color.red= ScaleShortToQuantum(ping_background->red); image->background_color.green= ScaleShortToQuantum(ping_background->green); image->background_color.blue= ScaleShortToQuantum(ping_background->blue); image->background_color.opacity=OpaqueOpacity; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->background_color=(%.20g,%.20g,%.20g).", (double) image->background_color.red, (double) image->background_color.green, (double) image->background_color.blue); } #endif /* PNG_READ_bKGD_SUPPORTED */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { /* Image has a tRNS chunk. */ int max_sample; size_t one = 1; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG tRNS chunk."); max_sample = (int) ((one << ping_file_depth) - 1); if ((ping_color_type == PNG_COLOR_TYPE_GRAY && (int)ping_trans_color->gray > max_sample) || (ping_color_type == PNG_COLOR_TYPE_RGB && ((int)ping_trans_color->red > max_sample || (int)ping_trans_color->green > max_sample || (int)ping_trans_color->blue > max_sample))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Ignoring PNG tRNS chunk with out-of-range sample."); png_free_data(ping, ping_info, PNG_FREE_TRNS, 0); png_set_invalid(ping,ping_info,PNG_INFO_tRNS); image->matte=MagickFalse; } else { int scale_to_short; scale_to_short = 65535L/((1UL << ping_file_depth)-1); /* Scale transparent_color to short */ transparent_color.red= scale_to_short*ping_trans_color->red; transparent_color.green= scale_to_short*ping_trans_color->green; transparent_color.blue= scale_to_short*ping_trans_color->blue; transparent_color.opacity= scale_to_short*ping_trans_color->gray; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Raw tRNS graylevel = %d, scaled graylevel = %d.", ping_trans_color->gray,transparent_color.opacity); } transparent_color.red=transparent_color.opacity; transparent_color.green=transparent_color.opacity; transparent_color.blue=transparent_color.opacity; } } } #if defined(PNG_READ_sBIT_SUPPORTED) if (mng_info->have_global_sbit) { if (!png_get_valid(ping,ping_info,PNG_INFO_sBIT)) png_set_sBIT(ping,ping_info,&mng_info->global_sbit); } #endif num_passes=png_set_interlace_handling(ping); png_read_update_info(ping,ping_info); ping_rowbytes=png_get_rowbytes(ping,ping_info); /* Initialize image structure. */ mng_info->image_box.left=0; mng_info->image_box.right=(ssize_t) ping_width; mng_info->image_box.top=0; mng_info->image_box.bottom=(ssize_t) ping_height; if (mng_info->mng_type == 0) { mng_info->mng_width=ping_width; mng_info->mng_height=ping_height; mng_info->frame=mng_info->image_box; mng_info->clip=mng_info->image_box; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } image->compression=ZipCompression; image->columns=ping_width; image->rows=ping_height; if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { double image_gamma = image->gamma; (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%f",(float) image_gamma); if (image_gamma > 0.75) { /* Set image->rendering_intent to Undefined, * image->colorspace to GRAY, and reset image->chromaticity. */ image->intensity = Rec709LuminancePixelIntensityMethod; SetImageColorspace(image,LinearGRAYColorspace); } else { RenderingIntent save_rendering_intent = image->rendering_intent; ChromaticityInfo save_chromaticity = image->chromaticity; SetImageColorspace(image,GRAYColorspace); image->rendering_intent = save_rendering_intent; image->chromaticity = save_chromaticity; } image->gamma = image_gamma; } else { double image_gamma = image->gamma; (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%f",(float) image_gamma); if (image_gamma > 0.75) { /* Set image->rendering_intent to Undefined, * image->colorspace to GRAY, and reset image->chromaticity. */ image->intensity = Rec709LuminancePixelIntensityMethod; SetImageColorspace(image,RGBColorspace); } else { RenderingIntent save_rendering_intent = image->rendering_intent; ChromaticityInfo save_chromaticity = image->chromaticity; SetImageColorspace(image,sRGBColorspace); image->rendering_intent = save_rendering_intent; image->chromaticity = save_chromaticity; } image->gamma = image_gamma; } (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->colorspace=%d",(int) image->colorspace); if (((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || ((int) ping_bit_depth < 16 && (int) ping_color_type == PNG_COLOR_TYPE_GRAY)) { size_t one; image->storage_class=PseudoClass; one=1; image->colors=one << ping_file_depth; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->colors > 256) image->colors=256; #else if (image->colors > 65536L) image->colors=65536L; #endif if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); image->colors=(size_t) number_colors; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG PLTE chunk: number_colors: %d.",number_colors); } } if (image->storage_class == PseudoClass) { /* Initialize image colormap. */ if (AcquireImageColormap(image,image->colors) == MagickFalse) png_error(ping,"Memory allocation failed"); if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); for (i=0; i < (ssize_t) number_colors; i++) { image->colormap[i].red=ScaleCharToQuantum(palette[i].red); image->colormap[i].green=ScaleCharToQuantum(palette[i].green); image->colormap[i].blue=ScaleCharToQuantum(palette[i].blue); } for ( ; i < (ssize_t) image->colors; i++) { image->colormap[i].red=0; image->colormap[i].green=0; image->colormap[i].blue=0; } } } /* Set some properties for reporting by "identify" */ { char msg[MaxTextExtent]; /* encode ping_width, ping_height, ping_file_depth, ping_color_type, ping_interlace_method in value */ (void) FormatLocaleString(msg,MaxTextExtent, "%d, %d",(int) ping_width, (int) ping_height); (void) SetImageProperty(image,"png:IHDR.width,height",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_file_depth); (void) SetImageProperty(image,"png:IHDR.bit_depth",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d (%s)", (int) ping_color_type, Magick_ColorType_from_PNG_ColorType((int)ping_color_type)); (void) SetImageProperty(image,"png:IHDR.color_type",msg); if (ping_interlace_method == 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Not interlaced)", (int) ping_interlace_method); } else if (ping_interlace_method == 1) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Adam7 method)", (int) ping_interlace_method); } else { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Unknown method)", (int) ping_interlace_method); } (void) SetImageProperty(image,"png:IHDR.interlace_method",msg); if (number_colors != 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d", (int) number_colors); (void) SetImageProperty(image,"png:PLTE.number_colors",msg); } } #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,ping_info); #endif /* Read image scanlines. */ if (image->delay != 0) mng_info->scenes_found++; if ((mng_info->mng_type == 0 && (image->ping != MagickFalse)) || ( (image_info->number_scenes != 0) && (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)))) { /* This happens later in non-ping decodes */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) image->storage_class=DirectClass; image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping PNG image data for scene %.20g",(double) mng_info->scenes_found-1); png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()."); return(image); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG IDAT chunk(s)"); if (num_passes > 1) pixel_info=AcquireVirtualMemory(image->rows,ping_rowbytes* sizeof(*ping_pixels)); else pixel_info=AcquireVirtualMemory(ping_rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Memory allocation failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting PNG pixels to pixel packets"); /* Convert PNG pixels to pixel packets. */ { MagickBooleanType found_transparent_pixel; found_transparent_pixel=MagickFalse; if (image->storage_class == DirectClass) { quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Failed to allocate quantum_info"); (void) SetQuantumEndian(image,quantum_info,MSBEndian); for (pass=0; pass < num_passes; pass++) { /* Convert image to DirectClass pixel packets. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; for (y=0; y < (ssize_t) image->rows; y++) { if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; else { if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayAlphaQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBAQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, IndexQuantum,ping_pixels+row_offset,exception); else /* ping_color_type == PNG_COLOR_TYPE_RGB */ (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBQuantum,ping_pixels+row_offset,exception); } if (found_transparent_pixel == MagickFalse) { /* Is there a transparent pixel in the row? */ if (y== 0 && logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Looking for cheap transparent pixel"); for (x=(ssize_t) image->columns-1; x >= 0; x--) { if ((ping_color_type == PNG_COLOR_TYPE_RGBA || ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) && (GetPixelOpacity(q) != OpaqueOpacity)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } if ((ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_GRAY) && (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } q++; } } if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag, (MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (y < (long) image->rows) break; if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } } quantum_info=DestroyQuantumInfo(quantum_info); } else /* image->storage_class != DirectClass */ for (pass=0; pass < num_passes; pass++) { register Quantum *r; /* Convert grayscale image to PseudoClass pixel packets. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting grayscale pixels to pixel packets"); image->matte=ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA ? MagickTrue : MagickFalse; quantum_scanline=(Quantum *) AcquireQuantumMemory(image->columns, (image->matte ? 2 : 1)*sizeof(*quantum_scanline)); if (quantum_scanline == (Quantum *) NULL) png_error(ping,"Memory allocation failed"); for (y=0; y < (ssize_t) image->rows; y++) { Quantum alpha; if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); p=ping_pixels+row_offset; r=quantum_scanline; switch (ping_bit_depth) { case 8: { if (ping_color_type == 4) for (x=(ssize_t) image->columns-1; x >= 0; x--) { *r++=*p++; /* In image.h, OpaqueOpacity is 0 * TransparentOpacity is QuantumRange * In a PNG datastream, Opaque is QuantumRange * and Transparent is 0. */ alpha=ScaleCharToQuantum((unsigned char)*p++); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } else for (x=(ssize_t) image->columns-1; x >= 0; x--) *r++=*p++; break; } case 16: { for (x=(ssize_t) image->columns-1; x >= 0; x--) { #if (MAGICKCORE_QUANTUM_DEPTH >= 16) unsigned long quantum; if (image->colors > 256) quantum=(((unsigned int) *p++) << 8); else quantum=0; quantum|=(*p++); *r=ScaleShortToQuantum(quantum); r++; if (ping_color_type == 4) { if (image->colors > 256) quantum=(((unsigned int) *p++) << 8); else quantum=0; quantum|=(*p++); alpha=ScaleShortToQuantum(quantum); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } #else /* MAGICKCORE_QUANTUM_DEPTH == 8 */ *r++=(*p++); p++; /* strip low byte */ if (ping_color_type == 4) { alpha=*p++; SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; p++; q++; } #endif } break; } default: break; } /* Transfer image scanline. */ r=quantum_scanline; for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*r++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline); if (y < (long) image->rows) break; if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } } image->matte=found_transparent_pixel; if (logging != MagickFalse) { if (found_transparent_pixel != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found transparent pixel"); else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No transparent pixel was found"); ping_color_type&=0x03; } } } if (image->storage_class == PseudoClass) { MagickBooleanType matte; matte=image->matte; image->matte=MagickFalse; (void) SyncImage(image); image->matte=matte; } png_read_end(ping,end_info); if (image_info->number_scenes != 0 && mng_info->scenes_found-1 < (ssize_t) image_info->first_scene && image->delay != 0) { png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); image->colors=2; (void) SetImageBackgroundColor(image); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() early."); return(image); } if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { ClassType storage_class; /* Image has a transparent background. */ storage_class=image->storage_class; image->matte=MagickTrue; /* Balfour fix from imagemagick discourse server, 5 Feb 2010 */ if (storage_class == PseudoClass) { if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { for (x=0; x < ping_num_trans; x++) { image->colormap[x].opacity = ScaleCharToQuantum((unsigned char)(255-ping_trans_alpha[x])); } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY) { for (x=0; x < (int) image->colors; x++) { if (ScaleQuantumToShort(image->colormap[x].red) == transparent_color.opacity) { image->colormap[x].opacity = (Quantum) TransparentOpacity; } } } (void) SyncImage(image); } #if 1 /* Should have already been done above, but glennrp problem P10 * needs this. */ else { for (y=0; y < (ssize_t) image->rows; y++) { image->storage_class=storage_class; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); /* Caution: on a Q8 build, this does not distinguish between * 16-bit colors that differ only in the low byte */ for (x=(ssize_t) image->columns-1; x >= 0; x--) { if (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue) { SetPixelOpacity(q,TransparentOpacity); } else { SetPixelOpacity(q,OpaqueOpacity); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif image->storage_class=DirectClass; } for (j = 0; j < 2; j++) { if (j == 0) status = png_get_text(ping,ping_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; else status = png_get_text(ping,end_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; if (status != MagickFalse) for (i=0; i < (ssize_t) num_text; i++) { /* Check for a profile */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG text chunk"); if (strlen(text[i].key) > 16 && memcmp(text[i].key, "Raw profile type ",17) == 0) { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember(text[i].key+17,value) == MagickFalse) { (void) Magick_png_read_raw_profile(ping,image,image_info,text, (int) i); num_raw_profiles++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Read raw profile %s",text[i].key+17); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping raw profile %s",text[i].key+17); } } else { char *value; length=text[i].text_length; value=(char *) AcquireQuantumMemory(length+MaxTextExtent, sizeof(*value)); if (value == (char *) NULL) png_error(ping,"Memory allocation failed"); *value='\0'; (void) ConcatenateMagickString(value,text[i].text,length+2); /* Don't save "density" or "units" property if we have a pHYs * chunk */ if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs) || (LocaleCompare(text[i].key,"density") != 0 && LocaleCompare(text[i].key,"units") != 0)) (void) SetImageProperty(image,text[i].key,value); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu\n" " Keyword: %s", (unsigned long) length, text[i].key); } value=DestroyString(value); } } num_text_total += num_text; } #ifdef MNG_OBJECT_BUFFERS /* Store the object if necessary. */ if (object_id && !mng_info->frozen[object_id]) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) { /* create a new object buffer. */ mng_info->ob[object_id]=(MngBuffer *) AcquireMagickMemory(sizeof(MngBuffer)); if (mng_info->ob[object_id] != (MngBuffer *) NULL) { mng_info->ob[object_id]->image=(Image *) NULL; mng_info->ob[object_id]->reference_count=1; } } if ((mng_info->ob[object_id] == (MngBuffer *) NULL) || mng_info->ob[object_id]->frozen) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) png_error(ping,"Memory allocation failed"); if (mng_info->ob[object_id]->frozen) png_error(ping,"Cannot overwrite frozen MNG object buffer"); } else { if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image=DestroyImage (mng_info->ob[object_id]->image); mng_info->ob[object_id]->image=CloneImage(image,0,0,MagickTrue, &image->exception); if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image->file=(FILE *) NULL; else png_error(ping, "Cloning image for object buffer failed"); if (ping_width > 250000L || ping_height > 250000L) png_error(ping,"PNG Image dimensions are too large."); mng_info->ob[object_id]->width=ping_width; mng_info->ob[object_id]->height=ping_height; mng_info->ob[object_id]->color_type=ping_color_type; mng_info->ob[object_id]->sample_depth=ping_bit_depth; mng_info->ob[object_id]->interlace_method=ping_interlace_method; mng_info->ob[object_id]->compression_method= ping_compression_method; mng_info->ob[object_id]->filter_method=ping_filter_method; if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp plte; /* Copy the PLTE to the object buffer. */ png_get_PLTE(ping,ping_info,&plte,&number_colors); mng_info->ob[object_id]->plte_length=number_colors; for (i=0; i < number_colors; i++) { mng_info->ob[object_id]->plte[i]=plte[i]; } } else mng_info->ob[object_id]->plte_length=0; } } #endif /* Set image->matte to MagickTrue if the input colortype supports * alpha or if a valid tRNS chunk is present, no matter whether there * is actual transparency present. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; #if 0 /* I'm not sure what's wrong here but it does not work. */ if (image->matte != MagickFalse) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) SetImageType(image,GrayscaleMatteType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteMatteType); else (void) SetImageType(image,TrueColorMatteType); } else { if (ping_color_type == PNG_COLOR_TYPE_GRAY) (void) SetImageType(image,GrayscaleType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteType); else (void) SetImageType(image,TrueColorType); } #endif /* Set more properties for identify to retrieve */ { char msg[MaxTextExtent]; if (num_text_total != 0) { /* libpng doesn't tell us whether they were tEXt, zTXt, or iTXt */ (void) FormatLocaleString(msg,MaxTextExtent, "%d tEXt/zTXt/iTXt chunks were found", num_text_total); (void) SetImageProperty(image,"png:text",msg); } if (num_raw_profiles != 0) { (void) FormatLocaleString(msg,MaxTextExtent, "%d were found", num_raw_profiles); (void) SetImageProperty(image,"png:text-encoded profiles",msg); } /* cHRM chunk: */ if (ping_found_cHRM != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Chromaticity, above)"); (void) SetImageProperty(image,"png:cHRM",msg); } /* bKGD chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Background color, above)"); (void) SetImageProperty(image,"png:bKGD",msg); } (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found"); /* iCCP chunk: */ if (ping_found_iCCP != MagickFalse) (void) SetImageProperty(image,"png:iCCP",msg); if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) (void) SetImageProperty(image,"png:tRNS",msg); #if defined(PNG_sRGB_SUPPORTED) /* sRGB chunk: */ if (ping_found_sRGB != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "intent=%d (%s)", (int) intent, Magick_RenderingIntentString_from_PNG_RenderingIntent(intent)); (void) SetImageProperty(image,"png:sRGB",msg); } #endif /* gAMA chunk: */ if (ping_found_gAMA != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "gamma=%.8g (See Gamma, above)", file_gamma); (void) SetImageProperty(image,"png:gAMA",msg); } #if defined(PNG_pHYs_SUPPORTED) /* pHYs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { (void) FormatLocaleString(msg,MaxTextExtent, "x_res=%.10g, y_res=%.10g, units=%d", (double) x_resolution,(double) y_resolution, unit_type); (void) SetImageProperty(image,"png:pHYs",msg); } #endif #if defined(PNG_oFFs_SUPPORTED) /* oFFs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { (void) FormatLocaleString(msg,MaxTextExtent,"x_off=%.20g, y_off=%.20g", (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:oFFs",msg); } #endif #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,end_info); #endif /* caNv chunk: */ if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || (image->page.x != 0 || image->page.y != 0)) { (void) FormatLocaleString(msg,MaxTextExtent, "width=%.20g, height=%.20g, x_offset=%.20g, y_offset=%.20g", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:caNv",msg); } } /* Relinquish resources. */ png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block, revert to * Throwing an Exception when an error occurs. */ return(image); /* end of reading one PNG image */ } static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; ssize_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadPNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) ThrowReaderException(FileOpenError,"UnableToOpenFile"); /* Verify PNG signature. */ count=ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\211PNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify that file size large enough to contain a PNG datastream. */ if (GetBlobSize(image) < 61) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOnePNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if ((image->columns == 0) || (image->rows == 0)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error."); ThrowReaderException(CorruptImageError,"CorruptImage"); } if ((IssRGBColorspace(image->colorspace) != MagickFalse) && ((image->gamma < .45) || (image->gamma > .46)) && !(image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "SetImageColorspace to RGBColorspace"); SetImageColorspace(image,RGBColorspace); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " page.w: %.20g, page.h: %.20g,page.x: %.20g, page.y: %.20g.", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadPNGImage()"); return(image); } #if defined(JNG_SUPPORTED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOneJNGImage() reads a JPEG Network Graphics (JNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadOneJNGImage method is: % % Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static void DestroyJNG(unsigned char *chunk,Image **color_image, ImageInfo **color_image_info,Image **alpha_image,ImageInfo **alpha_image_info) { (void) RelinquishMagickMemory(chunk); if (color_image_info && *color_image_info) { DestroyImageInfo(*color_image_info); *color_image_info = (ImageInfo *)NULL; } if (alpha_image_info && *alpha_image_info) { DestroyImageInfo(*alpha_image_info); *alpha_image_info = (ImageInfo *)NULL; } if (color_image && *color_image) { DestroyImage(*color_image); *color_image = (Image *)NULL; } if (alpha_image && *alpha_image) { DestroyImage(*alpha_image); *alpha_image = (Image *)NULL; } } static Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { Image *alpha_image, *color_image, *image, *jng_image; ImageInfo *alpha_image_info, *color_image_info; MagickBooleanType logging; int unique_filenames; ssize_t y; MagickBooleanType status; png_uint_32 jng_height, jng_width; png_byte jng_color_type, jng_image_sample_depth, jng_image_compression_method, jng_image_interlace_method, jng_alpha_sample_depth, jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method; register const PixelPacket *s; register ssize_t i, x; register PixelPacket *q; register unsigned char *p; unsigned int read_JSEP, reading_idat; size_t length; jng_alpha_compression_method=0; jng_alpha_sample_depth=8; jng_color_type=0; jng_height=0; jng_width=0; alpha_image=(Image *) NULL; color_image=(Image *) NULL; alpha_image_info=(ImageInfo *) NULL; color_image_info=(ImageInfo *) NULL; unique_filenames=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneJNGImage()"); image=mng_info->image; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireNextImage()"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; /* Signature bytes have already been read. */ read_JSEP=MagickFalse; reading_idat=MagickFalse; for (;;) { char type[MaxTextExtent]; unsigned char *chunk; unsigned int count; /* Read a new JNG chunk. */ status=SetImageProgress(image,LoadImagesTag,TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) break; type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=(size_t) ReadBlobMSBLong(image); count=(unsigned int) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading JNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX || count == 0) { DestroyJNG(NULL,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError,"CorruptImage"); } p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { if (length > GetBlobSize(image)) { DestroyJNG(NULL,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) { int c; c=ReadBlobByte(image); if (c == EOF) break; chunk[i]=(unsigned char) c; } for ( ; i < (ssize_t) length; i++) chunk[i]='\0'; p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ if (memcmp(type,mng_JHDR,4) == 0) { if (length == 16) { jng_width=(png_uint_32)mng_get_long(p); jng_height=(png_uint_32)mng_get_long(&p[4]); if ((jng_width == 0) || (jng_height == 0)) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); } jng_color_type=p[8]; jng_image_sample_depth=p[9]; jng_image_compression_method=p[10]; jng_image_interlace_method=p[11]; image->interlace=jng_image_interlace_method != 0 ? PNGInterlace : NoInterlace; jng_alpha_sample_depth=p[12]; jng_alpha_compression_method=p[13]; jng_alpha_filter_method=p[14]; jng_alpha_interlace_method=p[15]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_width: %16lu, jng_height: %16lu\n" " jng_color_type: %16d, jng_image_sample_depth: %3d\n" " jng_image_compression_method:%3d", (unsigned long) jng_width, (unsigned long) jng_height, jng_color_type, jng_image_sample_depth, jng_image_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_image_interlace_method: %3d" " jng_alpha_sample_depth: %3d", jng_image_interlace_method, jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_alpha_compression_method:%3d\n" " jng_alpha_filter_method: %3d\n" " jng_alpha_interlace_method: %3d", jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method); } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (jng_width > 65535 || jng_height > 65535 || (long) jng_width > GetMagickResourceLimit(WidthResource) || (long) jng_height > GetMagickResourceLimit(HeightResource)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG width or height too large: (%lu x %lu)", (long) jng_width, (long) jng_height); DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } continue; } if ((reading_idat == MagickFalse) && (read_JSEP == MagickFalse) && ((memcmp(type,mng_JDAT,4) == 0) || (memcmp(type,mng_JdAA,4) == 0) || (memcmp(type,mng_IDAT,4) == 0) || (memcmp(type,mng_JDAA,4) == 0))) { /* o create color_image o open color_blob, attached to color_image o if (color type has alpha) open alpha_blob, attached to alpha_image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating color_blob."); color_image_info=(ImageInfo *)AcquireMagickMemory(sizeof(ImageInfo)); if (color_image_info == (ImageInfo *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } GetImageInfo(color_image_info); color_image=AcquireImage(color_image_info); if (color_image == (Image *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } (void) AcquireUniqueFilename(color_image->filename); unique_filenames++; status=OpenBlob(color_image_info,color_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); return(DestroyImageList(image)); } if ((image_info->ping == MagickFalse) && (jng_color_type >= 12)) { alpha_image_info=(ImageInfo *) AcquireMagickMemory(sizeof(ImageInfo)); if (alpha_image_info == (ImageInfo *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } GetImageInfo(alpha_image_info); alpha_image=AcquireImage(alpha_image_info); if (alpha_image == (Image *) NULL) { DestroyJNG(chunk,&color_image,&color_image_info, &alpha_image,&alpha_image_info); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating alpha_blob."); (void) AcquireUniqueFilename(alpha_image->filename); unique_filenames++; status=OpenBlob(alpha_image_info,alpha_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); color_image=DestroyImage(color_image); return(DestroyImageList(image)); } if (jng_alpha_compression_method == 0) { unsigned char data[18]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing IHDR chunk to alpha_blob."); (void) WriteBlob(alpha_image,8,(const unsigned char *) "\211PNG\r\n\032\n"); (void) WriteBlobMSBULong(alpha_image,13L); PNGType(data,mng_IHDR); LogPNGChunk(logging,mng_IHDR,13L); PNGLong(data+4,jng_width); PNGLong(data+8,jng_height); data[12]=jng_alpha_sample_depth; data[13]=0; /* color_type gray */ data[14]=0; /* compression method 0 */ data[15]=0; /* filter_method 0 */ data[16]=0; /* interlace_method 0 */ (void) WriteBlob(alpha_image,17,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,17)); } } reading_idat=MagickTrue; } if (memcmp(type,mng_JDAT,4) == 0) { /* Copy chunk to color_image->blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAT chunk data to color_blob."); if ((length != 0) && (color_image != (Image *) NULL)) { (void) WriteBlob(color_image,length,chunk); chunk=(unsigned char *) RelinquishMagickMemory(chunk); } continue; } if (memcmp(type,mng_IDAT,4) == 0) { png_byte data[5]; /* Copy IDAT header and chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying IDAT chunk data to alpha_blob."); (void) WriteBlobMSBULong(alpha_image,(size_t) length); PNGType(data,mng_IDAT); LogPNGChunk(logging,mng_IDAT,length); (void) WriteBlob(alpha_image,4,data); (void) WriteBlob(alpha_image,length,chunk); (void) WriteBlobMSBULong(alpha_image, crc32(crc32(0,data,4),chunk,(uInt) length)); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_JDAA,4) == 0) || (memcmp(type,mng_JdAA,4) == 0)) { /* Copy chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAA chunk data to alpha_blob."); (void) WriteBlob(alpha_image,length,chunk); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_JSEP,4) == 0) { read_JSEP=MagickTrue; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { if (length == 2) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=image->background_color.red; image->background_color.blue=image->background_color.red; } if (length == 6) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=ScaleCharToQuantum(p[3]); image->background_color.blue=ScaleCharToQuantum(p[5]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) image->gamma=((float) mng_get_long(p))*0.00001; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { if (length == 32) { image->chromaticity.white_point.x=0.00001*mng_get_long(p); image->chromaticity.white_point.y=0.00001*mng_get_long(&p[4]); image->chromaticity.red_primary.x=0.00001*mng_get_long(&p[8]); image->chromaticity.red_primary.y=0.00001*mng_get_long(&p[12]); image->chromaticity.green_primary.x=0.00001*mng_get_long(&p[16]); image->chromaticity.green_primary.y=0.00001*mng_get_long(&p[20]); image->chromaticity.blue_primary.x=0.00001*mng_get_long(&p[24]); image->chromaticity.blue_primary.y=0.00001*mng_get_long(&p[28]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { if (length == 1) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_oFFs,4) == 0) { if (length > 8) { image->page.x=(ssize_t) mng_get_long(p); image->page.y=(ssize_t) mng_get_long(&p[4]); if ((int) p[8] != 0) { image->page.x/=10000; image->page.y/=10000; } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { image->x_resolution=(double) mng_get_long(p); image->y_resolution=(double) mng_get_long(&p[4]); if ((int) p[8] == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=image->x_resolution/100.0f; image->y_resolution=image->y_resolution/100.0f; } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if 0 if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (memcmp(type,mng_IEND,4)) continue; break; } /* IEND found */ /* Finish up reading image data: o read main image from color_blob. o close color_blob. o if (color_type has alpha) if alpha_encoding is PNG read secondary image from alpha_blob via ReadPNG if alpha_encoding is JPEG read secondary image from alpha_blob via ReadJPEG o close alpha_blob. o copy intensity of secondary image into opacity samples of main image. o destroy the secondary image. */ if (color_image_info == (ImageInfo *) NULL) { assert(color_image == (Image *) NULL); assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } if (color_image == (Image *) NULL) { assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } (void) SeekBlob(color_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading jng_image from color_blob."); assert(color_image_info != (ImageInfo *) NULL); (void) FormatLocaleString(color_image_info->filename,MaxTextExtent,"%s", color_image->filename); color_image_info->ping=MagickFalse; /* To do: avoid this */ jng_image=ReadImage(color_image_info,exception); (void) RelinquishUniqueFileResource(color_image->filename); unique_filenames--; color_image=DestroyImage(color_image); color_image_info=DestroyImageInfo(color_image_info); if (jng_image == (Image *) NULL) { DestroyJNG(NULL,NULL,NULL,&alpha_image,&alpha_image_info); return(DestroyImageList(image)); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying jng_image pixels to main image."); image->columns=jng_width; image->rows=jng_height; length=image->columns*sizeof(PixelPacket); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jng_image=DestroyImageList(jng_image); DestroyJNG(NULL,&color_image,&color_image_info,&alpha_image, &alpha_image_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((image->columns != jng_image->columns) || (image->rows != jng_image->rows)) { jng_image=DestroyImageList(jng_image); DestroyJNG(NULL,&color_image,&color_image_info,&alpha_image, &alpha_image_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1,&image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if ((s == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; (void) memcpy(q,s,length); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } jng_image=DestroyImage(jng_image); if ((image_info->ping == MagickFalse) && (jng_color_type >= 12)) { if (jng_alpha_compression_method == 0) { png_byte data[5]; (void) WriteBlobMSBULong(alpha_image,0x00000000L); PNGType(data,mng_IEND); LogPNGChunk(logging,mng_IEND,0L); (void) WriteBlob(alpha_image,4,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,4)); } (void) SeekBlob(alpha_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading opacity from alpha_blob."); (void) FormatLocaleString(alpha_image_info->filename,MaxTextExtent, "%s",alpha_image->filename); jng_image=ReadImage(alpha_image_info,exception); if (jng_image != (Image *) NULL) for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1,&image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if ((s == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; if (image->matte != MagickFalse) for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) SetPixelOpacity(q,QuantumRange-GetPixelRed(s)); else for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) { SetPixelAlpha(q,GetPixelRed(s)); if (GetPixelOpacity(q) != OpaqueOpacity) image->matte=MagickTrue; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } (void) RelinquishUniqueFileResource(alpha_image->filename); unique_filenames--; alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); if (jng_image != (Image *) NULL) jng_image=DestroyImage(jng_image); } /* Read the JNG image. */ if (mng_info->mng_type == 0) { mng_info->mng_width=jng_width; mng_info->mng_height=jng_height; } if (image->page.width == 0 && image->page.height == 0) { image->page.width=jng_width; image->page.height=jng_height; } if (image->page.x == 0 && image->page.y == 0) { image->page.x=mng_info->x_off[mng_info->object_id]; image->page.y=mng_info->y_off[mng_info->object_id]; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } mng_info->image_found++; status=SetImageProgress(image,LoadImagesTag,2*TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) return(DestroyImageList(image)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage(); unique_filenames=%d",unique_filenames); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJNGImage() reads a JPEG Network Graphics (JNG) image file % (including the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadJNGImage method is: % % Image *ReadJNGImage(const ImageInfo *image_info, ExceptionInfo % *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadJNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; size_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadJNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); if (LocaleCompare(image_info->magick,"JNG") != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify JNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\213JNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify that file size large enough to contain a JNG datastream. */ if (GetBlobSize(image) < 147) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(*mng_info)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneJNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (image->columns == 0 || image->rows == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); ThrowReaderException(CorruptImageError,"CorruptImage"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadJNGImage()"); return(image); } #endif static Image *ReadOneMNGImage(MngInfo* mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { char page_geometry[MaxTextExtent]; Image *image; MagickBooleanType logging; volatile int first_mng_object, object_id, term_chunk_found, skip_to_iend; volatile ssize_t image_count=0; MagickBooleanType status; MagickOffsetType offset; MngBox default_fb, fb, previous_fb; #if defined(MNG_INSERT_LAYERS) PixelPacket mng_background_color; #endif register unsigned char *p; register ssize_t i; size_t count; ssize_t loop_level; volatile short skipping_loop; #if defined(MNG_INSERT_LAYERS) unsigned int mandatory_back=0; #endif volatile unsigned int #ifdef MNG_OBJECT_BUFFERS mng_background_object=0, #endif mng_type=0; /* 0: PNG or JNG; 1: MNG; 2: MNG-LC; 3: MNG-VLC */ size_t default_frame_timeout, frame_timeout, #if defined(MNG_INSERT_LAYERS) image_height, image_width, #endif length; /* These delays are all measured in image ticks_per_second, * not in MNG ticks_per_second */ volatile size_t default_frame_delay, final_delay, final_image_delay, frame_delay, #if defined(MNG_INSERT_LAYERS) insert_layers, #endif mng_iterations=1, simplicity=0, subframe_height=0, subframe_width=0; previous_fb.top=0; previous_fb.bottom=0; previous_fb.left=0; previous_fb.right=0; default_fb.top=0; default_fb.bottom=0; default_fb.left=0; default_fb.right=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneMNGImage()"); image=mng_info->image; if (LocaleCompare(image_info->magick,"MNG") == 0) { char magic_number[MaxTextExtent]; /* Verify MNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (memcmp(magic_number,"\212MNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize some nonzero members of the MngInfo structure. */ for (i=0; i < MNG_MAX_OBJECTS; i++) { mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } mng_info->exists[0]=MagickTrue; } skipping_loop=(-1); first_mng_object=MagickTrue; mng_type=0; #if defined(MNG_INSERT_LAYERS) insert_layers=MagickFalse; /* should be False when converting or mogrifying */ #endif default_frame_delay=0; default_frame_timeout=0; frame_delay=0; final_delay=1; mng_info->ticks_per_second=1UL*image->ticks_per_second; object_id=0; skip_to_iend=MagickFalse; term_chunk_found=MagickFalse; mng_info->framing_mode=1; #if defined(MNG_INSERT_LAYERS) mandatory_back=MagickFalse; #endif #if defined(MNG_INSERT_LAYERS) mng_background_color=image->background_color; #endif default_fb=mng_info->frame; previous_fb=mng_info->frame; do { char type[MaxTextExtent]; if (LocaleCompare(image_info->magick,"MNG") == 0) { unsigned char *chunk; /* Read a new chunk. */ type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=(size_t) ReadBlobMSBLong(image); count=(size_t) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading MNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX) { status=MagickFalse; break; } if (count == 0) ThrowReaderException(CorruptImageError,"CorruptImage"); p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) { int c; c=ReadBlobByte(image); if (c == EOF) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } chunk[i]=(unsigned char) c; } p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ #if !defined(JNG_SUPPORTED) if (memcmp(type,mng_JHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->jhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"JNGCompressNotSupported","`%s'",image->filename); mng_info->jhdr_warning++; } #endif if (memcmp(type,mng_DHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->dhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DeltaPNGNotSupported","`%s'",image->filename); mng_info->dhdr_warning++; } if (memcmp(type,mng_MEND,4) == 0) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); break; } if (skip_to_iend) { if (memcmp(type,mng_IEND,4) == 0) skip_to_iend=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skip to IEND."); continue; } if (memcmp(type,mng_MHDR,4) == 0) { if (length != 28) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"CorruptImage"); } mng_info->mng_width=(unsigned long)mng_get_long(p); mng_info->mng_height=(unsigned long)mng_get_long(&p[4]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG width: %.20g",(double) mng_info->mng_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG height: %.20g",(double) mng_info->mng_height); } p+=8; mng_info->ticks_per_second=(size_t) mng_get_long(p); if (mng_info->ticks_per_second == 0) default_frame_delay=0; else default_frame_delay=1UL*image->ticks_per_second/ mng_info->ticks_per_second; frame_delay=default_frame_delay; simplicity=0; /* Skip nominal layer count, frame count, and play time */ p+=16; simplicity=(size_t) mng_get_long(p); mng_type=1; /* Full MNG */ if ((simplicity != 0) && ((simplicity | 11) == 11)) mng_type=2; /* LC */ if ((simplicity != 0) && ((simplicity | 9) == 9)) mng_type=3; /* VLC */ #if defined(MNG_INSERT_LAYERS) if (mng_type != 3) insert_layers=MagickTrue; #endif if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); mng_info->image=image; } if ((mng_info->mng_width > 65535L) || (mng_info->mng_height > 65535L)) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit"); } (void) FormatLocaleString(page_geometry,MaxTextExtent, "%.20gx%.20g+0+0",(double) mng_info->mng_width,(double) mng_info->mng_height); mng_info->frame.left=0; mng_info->frame.right=(ssize_t) mng_info->mng_width; mng_info->frame.top=0; mng_info->frame.bottom=(ssize_t) mng_info->mng_height; mng_info->clip=default_fb=previous_fb=mng_info->frame; for (i=0; i < MNG_MAX_OBJECTS; i++) mng_info->object_clip[i]=mng_info->frame; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_TERM,4) == 0) { int repeat=0; if (length != 0) repeat=p[0]; if (repeat == 3 && length > 8) { final_delay=(png_uint_32) mng_get_long(&p[2]); mng_iterations=(png_uint_32) mng_get_long(&p[6]); if (mng_iterations == PNG_UINT_31_MAX) mng_iterations=0; image->iterations=mng_iterations; term_chunk_found=MagickTrue; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " repeat=%d, final_delay=%.20g, iterations=%.20g", repeat,(double) final_delay, (double) image->iterations); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_DEFI,4) == 0) { if (mng_type == 3) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DEFI chunk found in MNG-VLC datastream","`%s'", image->filename); chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (length < 2) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"CorruptImage"); } object_id=((unsigned int) p[0] << 8) | (unsigned int) p[1]; if (mng_type == 2 && object_id != 0) (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError,"Nonzero object_id in MNG-LC datastream", "`%s'", image->filename); if (object_id > MNG_MAX_OBJECTS) { /* Instead of using a warning we should allocate a larger MngInfo structure and continue. */ (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError, "object id too large","`%s'",image->filename); object_id=MNG_MAX_OBJECTS; } if (mng_info->exists[object_id]) if (mng_info->frozen[object_id]) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "DEFI cannot redefine a frozen MNG object","`%s'", image->filename); continue; } mng_info->exists[object_id]=MagickTrue; if (length > 2) mng_info->invisible[object_id]=p[2]; /* Extract object offset info. */ if (length > 11) { mng_info->x_off[object_id]=(ssize_t) mng_get_long(&p[4]); mng_info->y_off[object_id]=(ssize_t) mng_get_long(&p[8]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_off[%d]: %.20g, y_off[%d]: %.20g", object_id,(double) mng_info->x_off[object_id], object_id,(double) mng_info->y_off[object_id]); } } /* Extract object clipping info. */ if (length > 27) mng_info->object_clip[object_id]= mng_read_box(mng_info->frame,0, &p[12]); chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { mng_info->have_global_bkgd=MagickFalse; if (length > 5) { mng_info->mng_global_bkgd.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_info->mng_global_bkgd.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_info->mng_global_bkgd.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_info->have_global_bkgd=MagickTrue; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_BACK,4) == 0) { #if defined(MNG_INSERT_LAYERS) if (length > 6) mandatory_back=p[6]; else mandatory_back=0; if (mandatory_back && length > 5) { mng_background_color.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_background_color.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_background_color.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_background_color.opacity=OpaqueOpacity; } #ifdef MNG_OBJECT_BUFFERS if (length > 8) mng_background_object=(p[7] << 8) | p[8]; #endif #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_PLTE,4) == 0) { /* Read global PLTE. */ if (length && (length < 769)) { if (mng_info->global_plte == (png_colorp) NULL) mng_info->global_plte=(png_colorp) AcquireQuantumMemory(256, sizeof(*mng_info->global_plte)); if (mng_info->global_plte == (png_colorp) NULL) { mng_info->global_plte_length=0; chunk=(unsigned char *) RelinquishMagickMemory(chunk); mng_info=MngInfoFreeStruct(mng_info); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (length/3); i++) { mng_info->global_plte[i].red=p[3*i]; mng_info->global_plte[i].green=p[3*i+1]; mng_info->global_plte[i].blue=p[3*i+2]; } mng_info->global_plte_length=(unsigned int) (length/3); } #ifdef MNG_LOOSE for ( ; i < 256; i++) { mng_info->global_plte[i].red=i; mng_info->global_plte[i].green=i; mng_info->global_plte[i].blue=i; } if (length != 0) mng_info->global_plte_length=256; #endif else mng_info->global_plte_length=0; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_tRNS,4) == 0) { /* read global tRNS */ if (length > 0 && length < 257) for (i=0; i < (ssize_t) length; i++) mng_info->global_trns[i]=p[i]; #ifdef MNG_LOOSE for ( ; i < 256; i++) mng_info->global_trns[i]=255; #endif mng_info->global_trns_length=(unsigned int) length; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) { ssize_t igamma; igamma=mng_get_long(p); mng_info->global_gamma=((float) igamma)*0.00001; mng_info->have_global_gama=MagickTrue; } else mng_info->have_global_gama=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { /* Read global cHRM */ if (length == 32) { mng_info->global_chrm.white_point.x=0.00001*mng_get_long(p); mng_info->global_chrm.white_point.y=0.00001*mng_get_long(&p[4]); mng_info->global_chrm.red_primary.x=0.00001*mng_get_long(&p[8]); mng_info->global_chrm.red_primary.y=0.00001* mng_get_long(&p[12]); mng_info->global_chrm.green_primary.x=0.00001* mng_get_long(&p[16]); mng_info->global_chrm.green_primary.y=0.00001* mng_get_long(&p[20]); mng_info->global_chrm.blue_primary.x=0.00001* mng_get_long(&p[24]); mng_info->global_chrm.blue_primary.y=0.00001* mng_get_long(&p[28]); mng_info->have_global_chrm=MagickTrue; } else mng_info->have_global_chrm=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { /* Read global sRGB. */ if (length != 0) { mng_info->global_srgb_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); mng_info->have_global_srgb=MagickTrue; } else mng_info->have_global_srgb=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ /* Read global iCCP. */ chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_FRAM,4) == 0) { if (mng_type == 3) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"FRAM chunk found in MNG-VLC datastream","`%s'", image->filename); if ((mng_info->framing_mode == 2) || (mng_info->framing_mode == 4)) image->delay=frame_delay; frame_delay=default_frame_delay; frame_timeout=default_frame_timeout; fb=default_fb; if (length > 0) if (p[0]) mng_info->framing_mode=p[0]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_mode=%d",mng_info->framing_mode); if (length > 6) { /* Note the delay and frame clipping boundaries. */ p++; /* framing mode */ while (((p-chunk) < (long) length) && *p) p++; /* frame name */ p++; /* frame name terminator */ if ((p-chunk) < (ssize_t) (length-4)) { int change_delay, change_timeout, change_clipping; change_delay=(*p++); change_timeout=(*p++); change_clipping=(*p++); p++; /* change_sync */ if (change_delay && ((p-chunk) < (ssize_t) (length-4))) { frame_delay=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_delay/=mng_info->ticks_per_second; else frame_delay=PNG_UINT_31_MAX; if (change_delay == 2) default_frame_delay=frame_delay; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_delay=%.20g",(double) frame_delay); } if (change_timeout && ((p-chunk) < (ssize_t) (length-4))) { frame_timeout=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_timeout/=mng_info->ticks_per_second; else frame_timeout=PNG_UINT_31_MAX; if (change_timeout == 2) default_frame_timeout=frame_timeout; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_timeout=%.20g",(double) frame_timeout); } if (change_clipping && ((p-chunk) < (ssize_t) (length-16))) { fb=mng_read_box(previous_fb,(char) p[0],&p[1]); p+=16; previous_fb=fb; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Frame_clip: L=%.20g R=%.20g T=%.20g B=%.20g", (double) fb.left,(double) fb.right,(double) fb.top, (double) fb.bottom); if (change_clipping == 2) default_fb=fb; } } } mng_info->clip=fb; mng_info->clip=mng_minimum_box(fb,mng_info->frame); subframe_width=(size_t) (mng_info->clip.right -mng_info->clip.left); subframe_height=(size_t) (mng_info->clip.bottom -mng_info->clip.top); /* Insert a background layer behind the frame if framing_mode is 4. */ #if defined(MNG_INSERT_LAYERS) if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " subframe_width=%.20g, subframe_height=%.20g",(double) subframe_width,(double) subframe_height); if (insert_layers && (mng_info->framing_mode == 4) && (subframe_width) && (subframe_height)) { /* Allocate next image structure. */ if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; image->delay=0; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert backgd layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLIP,4) == 0) { unsigned int first_object, last_object; /* Read CLIP. */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(int) first_object; i <= (int) last_object; i++) { if ((i < 0) || (i >= MNG_MAX_OBJECTS)) continue; if (mng_info->exists[i] && !mng_info->frozen[i]) { MngBox box; box=mng_info->object_clip[i]; if ((p-chunk) < (ssize_t) (length-17)) mng_info->object_clip[i]= mng_read_box(box,(char) p[0],&p[1]); } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_SAVE,4) == 0) { for (i=1; i < MNG_MAX_OBJECTS; i++) if (mng_info->exists[i]) { mng_info->frozen[i]=MagickTrue; #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) mng_info->ob[i]->frozen=MagickTrue; #endif } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_DISC,4) == 0) || (memcmp(type,mng_SEEK,4) == 0)) { /* Read DISC or SEEK. */ if ((length == 0) || !memcmp(type,mng_SEEK,4)) { for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); } else { register ssize_t j; for (j=1; j < (ssize_t) length; j+=2) { i=p[j-1] << 8 | p[j]; MngInfoDiscardObject(mng_info,i); } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_MOVE,4) == 0) { size_t first_object, last_object; /* read MOVE */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(ssize_t) first_object; i <= (ssize_t) last_object; i++) { if ((i < 0) || (i >= MNG_MAX_OBJECTS)) continue; if (mng_info->exists[i] && !mng_info->frozen[i] && (p-chunk) < (ssize_t) (length-8)) { MngPair new_pair; MngPair old_pair; old_pair.a=mng_info->x_off[i]; old_pair.b=mng_info->y_off[i]; new_pair=mng_read_pair(old_pair,(int) p[0],&p[1]); mng_info->x_off[i]=new_pair.a; mng_info->y_off[i]=new_pair.b; } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_LOOP,4) == 0) { ssize_t loop_iters=1; if (length > 4) { loop_level=chunk[0]; mng_info->loop_active[loop_level]=1; /* mark loop active */ /* Record starting point. */ loop_iters=mng_get_long(&chunk[1]); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " LOOP level %.20g has %.20g iterations ", (double) loop_level, (double) loop_iters); if (loop_iters <= 0) skipping_loop=loop_level; else { if (loop_iters > GetMagickResourceLimit(ListLengthResource)) loop_iters=GetMagickResourceLimit(ListLengthResource); if (loop_iters >= 2147483647L) loop_iters=2147483647L; mng_info->loop_jump[loop_level]=TellBlob(image); mng_info->loop_count[loop_level]=loop_iters; } mng_info->loop_iteration[loop_level]=0; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_ENDL,4) == 0) { if (length > 0) { loop_level=chunk[0]; if (skipping_loop > 0) { if (skipping_loop == loop_level) { /* Found end of zero-iteration loop. */ skipping_loop=(-1); mng_info->loop_active[loop_level]=0; } } else { if (mng_info->loop_active[loop_level] == 1) { mng_info->loop_count[loop_level]--; mng_info->loop_iteration[loop_level]++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ENDL: LOOP level %.20g has %.20g remaining iters ", (double) loop_level,(double) mng_info->loop_count[loop_level]); if (mng_info->loop_count[loop_level] != 0) { offset=SeekBlob(image, mng_info->loop_jump[loop_level], SEEK_SET); if (offset < 0) { chunk=(unsigned char *) RelinquishMagickMemory( chunk); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } } else { short last_level; /* Finished loop. */ mng_info->loop_active[loop_level]=0; last_level=(-1); for (i=0; i < loop_level; i++) if (mng_info->loop_active[i] == 1) last_level=(short) i; loop_level=last_level; } } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLON,4) == 0) { if (mng_info->clon_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CLON is not implemented yet","`%s'", image->filename); mng_info->clon_warning++; } if (memcmp(type,mng_MAGN,4) == 0) { png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; if (length > 1) magn_first=(p[0] << 8) | p[1]; else magn_first=0; if (length > 3) magn_last=(p[2] << 8) | p[3]; else magn_last=magn_first; #ifndef MNG_OBJECT_BUFFERS if (magn_first || magn_last) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "MAGN is not implemented yet for nonzero objects", "`%s'",image->filename); mng_info->magn_warning++; } #endif if (length > 4) magn_methx=p[4]; else magn_methx=0; if (length > 6) magn_mx=(p[5] << 8) | p[6]; else magn_mx=1; if (magn_mx == 0) magn_mx=1; if (length > 8) magn_my=(p[7] << 8) | p[8]; else magn_my=magn_mx; if (magn_my == 0) magn_my=1; if (length > 10) magn_ml=(p[9] << 8) | p[10]; else magn_ml=magn_mx; if (magn_ml == 0) magn_ml=1; if (length > 12) magn_mr=(p[11] << 8) | p[12]; else magn_mr=magn_mx; if (magn_mr == 0) magn_mr=1; if (length > 14) magn_mt=(p[13] << 8) | p[14]; else magn_mt=magn_my; if (magn_mt == 0) magn_mt=1; if (length > 16) magn_mb=(p[15] << 8) | p[16]; else magn_mb=magn_my; if (magn_mb == 0) magn_mb=1; if (length > 17) magn_methy=p[17]; else magn_methy=magn_methx; if (magn_methx > 5 || magn_methy > 5) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Unknown MAGN method in MNG datastream","`%s'", image->filename); mng_info->magn_warning++; } #ifdef MNG_OBJECT_BUFFERS /* Magnify existing objects in the range magn_first to magn_last */ #endif if (magn_first == 0 || magn_last == 0) { /* Save the magnification factors for object 0 */ mng_info->magn_mb=magn_mb; mng_info->magn_ml=magn_ml; mng_info->magn_mr=magn_mr; mng_info->magn_mt=magn_mt; mng_info->magn_mx=magn_mx; mng_info->magn_my=magn_my; mng_info->magn_methx=magn_methx; mng_info->magn_methy=magn_methy; } } if (memcmp(type,mng_PAST,4) == 0) { if (mng_info->past_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"PAST is not implemented yet","`%s'", image->filename); mng_info->past_warning++; } if (memcmp(type,mng_SHOW,4) == 0) { if (mng_info->show_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"SHOW is not implemented yet","`%s'", image->filename); mng_info->show_warning++; } if (memcmp(type,mng_sBIT,4) == 0) { if (length < 4) mng_info->have_global_sbit=MagickFalse; else { mng_info->global_sbit.gray=p[0]; mng_info->global_sbit.red=p[0]; mng_info->global_sbit.green=p[1]; mng_info->global_sbit.blue=p[2]; mng_info->global_sbit.alpha=p[3]; mng_info->have_global_sbit=MagickTrue; } } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { mng_info->global_x_pixels_per_unit= (size_t) mng_get_long(p); mng_info->global_y_pixels_per_unit= (size_t) mng_get_long(&p[4]); mng_info->global_phys_unit_type=p[8]; mng_info->have_global_phys=MagickTrue; } else mng_info->have_global_phys=MagickFalse; } if (memcmp(type,mng_pHYg,4) == 0) { if (mng_info->phyg_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"pHYg is not implemented.","`%s'",image->filename); mng_info->phyg_warning++; } if (memcmp(type,mng_BASI,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->basi_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"BASI is not implemented yet","`%s'", image->filename); mng_info->basi_warning++; #ifdef MNG_BASI_SUPPORTED if (length > 11) { basi_width=(unsigned long) mng_get_long(p); basi_width=(unsigned long) mng_get_long(&p[4]); basi_color_type=p[8]; basi_compression_method=p[9]; basi_filter_type=p[10]; basi_interlace_method=p[11]; } if (length > 13) basi_red=(png_uint_32) p[12] << 8) & png_uint_32) p[13]; else basi_red=0; if (length > 15) basi_green=(png_uint_32) p[14] << 8) & png_uint_32) p[15]; else basi_green=0; if (length > 17) basi_blue=(png_uint_32) p[16] << 8) & png_uint_32) p[17]; else basi_blue=0; if (length > 19) basi_alpha=(png_uint_32) p[18] << 8) & png_uint_32) p[19]; else { if (basi_sample_depth == 16) basi_alpha=65535L; else basi_alpha=255; } if (length > 20) basi_viewable=p[20]; else basi_viewable=0; #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_IHDR,4) #if defined(JNG_SUPPORTED) && memcmp(type,mng_JHDR,4) #endif ) { /* Not an IHDR or JHDR chunk */ chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } /* Process IHDR */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing %c%c%c%c chunk",type[0],type[1],type[2],type[3]); mng_info->exists[object_id]=MagickTrue; mng_info->viewable[object_id]=MagickTrue; if (mng_info->invisible[object_id]) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping invisible object"); skip_to_iend=MagickTrue; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if defined(MNG_INSERT_LAYERS) if (length < 8) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } image_width=(size_t) mng_get_long(p); image_height=(size_t) mng_get_long(&p[4]); #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); /* Insert a transparent background layer behind the entire animation if it is not full screen. */ #if defined(MNG_INSERT_LAYERS) if (insert_layers && mng_type && first_mng_object) { if ((mng_info->clip.left > 0) || (mng_info->clip.top > 0) || (image_width < mng_info->mng_width) || (mng_info->clip.right < (ssize_t) mng_info->mng_width) || (image_height < mng_info->mng_height) || (mng_info->clip.bottom < (ssize_t) mng_info->mng_height)) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; /* Make a background rectangle. */ image->delay=0; image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Inserted transparent background layer, W=%.20g, H=%.20g", (double) mng_info->mng_width,(double) mng_info->mng_height); } } /* Insert a background layer behind the upcoming image if framing_mode is 3, and we haven't already inserted one. */ if (insert_layers && (mng_info->framing_mode == 3) && (subframe_width) && (subframe_height) && (simplicity == 0 || (simplicity & 0x08))) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->delay=0; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert background layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif /* MNG_INSERT_LAYERS */ first_mng_object=MagickFalse; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; if (term_chunk_found) { image->start_loop=MagickTrue; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; if (mng_info->framing_mode == 1 || mng_info->framing_mode == 3) { image->delay=frame_delay; frame_delay=default_frame_delay; } else image->delay=0; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=mng_info->x_off[object_id]; image->page.y=mng_info->y_off[object_id]; image->iterations=mng_iterations; /* Seek back to the beginning of the IHDR or JHDR chunk's length field. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Seeking back to beginning of %c%c%c%c chunk",type[0],type[1], type[2],type[3]); offset=SeekBlob(image,-((ssize_t) length+12),SEEK_CUR); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } mng_info->image=image; mng_info->mng_type=mng_type; mng_info->object_id=object_id; if (memcmp(type,mng_IHDR,4) == 0) image=ReadOnePNGImage(mng_info,image_info,exception); #if defined(JNG_SUPPORTED) else image=ReadOneJNGImage(mng_info,image_info,exception); #endif if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } if (image->columns == 0 || image->rows == 0) { (void) CloseBlob(image); return(DestroyImageList(image)); } mng_info->image=image; if (mng_type) { MngBox crop_box; if (mng_info->magn_methx || mng_info->magn_methy) { png_uint_32 magnified_height, magnified_width; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing MNG MAGN chunk"); if (mng_info->magn_methx == 1) { magnified_width=mng_info->magn_ml; if (image->columns > 1) magnified_width += mng_info->magn_mr; if (image->columns > 2) magnified_width += (png_uint_32) ((image->columns-2)*(mng_info->magn_mx)); } else { magnified_width=(png_uint_32) image->columns; if (image->columns > 1) magnified_width += mng_info->magn_ml-1; if (image->columns > 2) magnified_width += mng_info->magn_mr-1; if (image->columns > 3) magnified_width += (png_uint_32) ((image->columns-3)*(mng_info->magn_mx-1)); } if (mng_info->magn_methy == 1) { magnified_height=mng_info->magn_mt; if (image->rows > 1) magnified_height += mng_info->magn_mb; if (image->rows > 2) magnified_height += (png_uint_32) ((image->rows-2)*(mng_info->magn_my)); } else { magnified_height=(png_uint_32) image->rows; if (image->rows > 1) magnified_height += mng_info->magn_mt-1; if (image->rows > 2) magnified_height += mng_info->magn_mb-1; if (image->rows > 3) magnified_height += (png_uint_32) ((image->rows-3)*(mng_info->magn_my-1)); } if (magnified_height > image->rows || magnified_width > image->columns) { Image *large_image; int yy; ssize_t m, y; register ssize_t x; register PixelPacket *n, *q; PixelPacket *next, *prev; png_uint_16 magn_methx, magn_methy; /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocate magnified image"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); large_image=SyncNextImageInList(image); large_image->columns=magnified_width; large_image->rows=magnified_height; magn_methx=mng_info->magn_methx; magn_methy=mng_info->magn_methy; #if (MAGICKCORE_QUANTUM_DEPTH > 16) #define QM unsigned short if (magn_methx != 1 || magn_methy != 1) { /* Scale pixels to unsigned shorts to prevent overflow of intermediate values of interpolations */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleQuantumToShort( GetPixelRed(q))); SetPixelGreen(q,ScaleQuantumToShort( GetPixelGreen(q))); SetPixelBlue(q,ScaleQuantumToShort( GetPixelBlue(q))); SetPixelOpacity(q,ScaleQuantumToShort( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #else #define QM Quantum #endif if (image->matte != MagickFalse) (void) SetImageBackgroundColor(large_image); else { large_image->background_color.opacity=OpaqueOpacity; (void) SetImageBackgroundColor(large_image); if (magn_methx == 4) magn_methx=2; if (magn_methx == 5) magn_methx=3; if (magn_methy == 4) magn_methy=2; if (magn_methy == 5) magn_methy=3; } /* magnify the rows into the right side of the large image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the rows to %.20g",(double) large_image->rows); m=(ssize_t) mng_info->magn_mt; yy=0; length=(size_t) image->columns; next=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*next)); prev=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*prev)); if ((prev == (PixelPacket *) NULL) || (next == (PixelPacket *) NULL)) { if (prev != (PixelPacket *) NULL) prev=(PixelPacket *) RelinquishMagickMemory(prev); if (next != (PixelPacket *) NULL) next=(PixelPacket *) RelinquishMagickMemory(next); image=DestroyImageList(image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } n=GetAuthenticPixels(image,0,0,image->columns,1,exception); (void) memcpy(next,n,length); for (y=0; y < (ssize_t) image->rows; y++) { if (y == 0) m=(ssize_t) mng_info->magn_mt; else if (magn_methy > 1 && y == (ssize_t) image->rows-2) m=(ssize_t) mng_info->magn_mb; else if (magn_methy <= 1 && y == (ssize_t) image->rows-1) m=(ssize_t) mng_info->magn_mb; else if (magn_methy > 1 && y == (ssize_t) image->rows-1) m=1; else m=(ssize_t) mng_info->magn_my; n=prev; prev=next; next=n; if (y < (ssize_t) image->rows-1) { n=GetAuthenticPixels(image,0,y+1,image->columns,1, exception); (void) memcpy(next,n,length); } for (i=0; i < m; i++, yy++) { register PixelPacket *pixels; assert(yy < (ssize_t) large_image->rows); pixels=prev; n=next; q=GetAuthenticPixels(large_image,0,yy,large_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; q+=(large_image->columns-image->columns); for (x=(ssize_t) image->columns-1; x >= 0; x--) { /* To do: get color as function of indexes[x] */ /* if (image->storage_class == PseudoClass) { } */ if (magn_methy <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methy == 2 || magn_methy == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } else { /* Interpolate */ SetPixelRed(q, ((QM) (((ssize_t) (2*i*(GetPixelRed(n) -GetPixelRed(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelRed(pixels))))); SetPixelGreen(q, ((QM) (((ssize_t) (2*i*(GetPixelGreen(n) -GetPixelGreen(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelGreen(pixels))))); SetPixelBlue(q, ((QM) (((ssize_t) (2*i*(GetPixelBlue(n) -GetPixelBlue(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelBlue(pixels))))); if (image->matte != MagickFalse) SetPixelOpacity(q, ((QM) (((ssize_t) (2*i*(GetPixelOpacity(n) -GetPixelOpacity(pixels)+m)) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))))); } if (magn_methy == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) SetPixelOpacity(q, (*pixels).opacity+0); else SetPixelOpacity(q, (*n).opacity+0); } } else /* if (magn_methy == 3 || magn_methy == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methy == 5) { SetPixelOpacity(q, (QM) (((ssize_t) (2*i* (GetPixelOpacity(n) -GetPixelOpacity(pixels)) +m))/((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } n++; q++; pixels++; } /* x */ if (SyncAuthenticPixels(large_image,exception) == 0) break; } /* i */ } /* y */ prev=(PixelPacket *) RelinquishMagickMemory(prev); next=(PixelPacket *) RelinquishMagickMemory(next); length=image->columns; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Delete original image"); DeleteImageFromList(&image); image=large_image; mng_info->image=image; /* magnify the columns */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the columns to %.20g",(double) image->columns); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *pixels; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; pixels=q+(image->columns-length); n=pixels+1; for (x=(ssize_t) (image->columns-length); x < (ssize_t) image->columns; x++) { /* To do: Rewrite using Get/Set***PixelComponent() */ if (x == (ssize_t) (image->columns-length)) m=(ssize_t) mng_info->magn_ml; else if (magn_methx > 1 && x == (ssize_t) image->columns-2) m=(ssize_t) mng_info->magn_mr; else if (magn_methx <= 1 && x == (ssize_t) image->columns-1) m=(ssize_t) mng_info->magn_mr; else if (magn_methx > 1 && x == (ssize_t) image->columns-1) m=1; else m=(ssize_t) mng_info->magn_mx; for (i=0; i < m; i++) { if (magn_methx <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methx == 2 || magn_methx == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } /* To do: Rewrite using Get/Set***PixelComponent() */ else { /* Interpolate */ SetPixelRed(q, (QM) ((2*i*( GetPixelRed(n) -GetPixelRed(pixels))+m) /((ssize_t) (m*2))+ GetPixelRed(pixels))); SetPixelGreen(q, (QM) ((2*i*( GetPixelGreen(n) -GetPixelGreen(pixels))+m) /((ssize_t) (m*2))+ GetPixelGreen(pixels))); SetPixelBlue(q, (QM) ((2*i*( GetPixelBlue(n) -GetPixelBlue(pixels))+m) /((ssize_t) (m*2))+ GetPixelBlue(pixels))); if (image->matte != MagickFalse) SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))); } if (magn_methx == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelOpacity(q, GetPixelOpacity(pixels)+0); } else { SetPixelOpacity(q, GetPixelOpacity(n)+0); } } } else /* if (magn_methx == 3 || magn_methx == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methx == 5) { /* Interpolate */ SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m)/ ((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } q++; } n++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } #if (MAGICKCORE_QUANTUM_DEPTH > 16) if (magn_methx != 1 || magn_methy != 1) { /* Rescale pixels to Quantum */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleShortToQuantum( GetPixelRed(q))); SetPixelGreen(q,ScaleShortToQuantum( GetPixelGreen(q))); SetPixelBlue(q,ScaleShortToQuantum( GetPixelBlue(q))); SetPixelOpacity(q,ScaleShortToQuantum( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished MAGN processing"); } } /* Crop_box is with respect to the upper left corner of the MNG. */ crop_box.left=mng_info->image_box.left+mng_info->x_off[object_id]; crop_box.right=mng_info->image_box.right+mng_info->x_off[object_id]; crop_box.top=mng_info->image_box.top+mng_info->y_off[object_id]; crop_box.bottom=mng_info->image_box.bottom+mng_info->y_off[object_id]; crop_box=mng_minimum_box(crop_box,mng_info->clip); crop_box=mng_minimum_box(crop_box,mng_info->frame); crop_box=mng_minimum_box(crop_box,mng_info->object_clip[object_id]); if ((crop_box.left != (mng_info->image_box.left +mng_info->x_off[object_id])) || (crop_box.right != (mng_info->image_box.right +mng_info->x_off[object_id])) || (crop_box.top != (mng_info->image_box.top +mng_info->y_off[object_id])) || (crop_box.bottom != (mng_info->image_box.bottom +mng_info->y_off[object_id]))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Crop the PNG image"); if ((crop_box.left < crop_box.right) && (crop_box.top < crop_box.bottom)) { Image *im; RectangleInfo crop_info; /* Crop_info is with respect to the upper left corner of the image. */ crop_info.x=(crop_box.left-mng_info->x_off[object_id]); crop_info.y=(crop_box.top-mng_info->y_off[object_id]); crop_info.width=(size_t) (crop_box.right-crop_box.left); crop_info.height=(size_t) (crop_box.bottom-crop_box.top); image->page.width=image->columns; image->page.height=image->rows; image->page.x=0; image->page.y=0; im=CropImage(image,&crop_info,exception); if (im != (Image *) NULL) { image->columns=im->columns; image->rows=im->rows; im=DestroyImage(im); image->page.width=image->columns; image->page.height=image->rows; image->page.x=crop_box.left; image->page.y=crop_box.top; } } else { /* No pixels in crop area. The MNG spec still requires a layer, though, so make a single transparent pixel in the top left corner. */ image->columns=1; image->rows=1; image->colors=2; (void) SetImageBackgroundColor(image); image->page.width=1; image->page.height=1; image->page.x=0; image->page.y=0; } } #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED image=mng_info->image; #endif } #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy, and promote any depths > 8 to 16. */ if (image->depth > 16) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif GetImageException(image,exception); if (image_info->number_scenes != 0) { if (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)) break; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading image datastream."); } while (LocaleCompare(image_info->magick,"MNG") == 0); (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading all image datastreams."); #if defined(MNG_INSERT_LAYERS) if (insert_layers && !mng_info->image_found && (mng_info->mng_width) && (mng_info->mng_height)) { /* Insert a background layer if nothing else was found. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No images found. Inserting a background layer."); if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocation failed, returning NULL."); return(DestroyImageList(image)); } image=SyncNextImageInList(image); } image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; image->matte=MagickFalse; if (image_info->ping == MagickFalse) (void) SetImageBackgroundColor(image); mng_info->image_found++; } #endif image->iterations=mng_iterations; if (mng_iterations == 1) image->start_loop=MagickTrue; while (GetPreviousImageInList(image) != (Image *) NULL) { image_count++; if (image_count > 10*mng_info->image_found) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," No beginning"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted, beginning of list not found", "`%s'",image_info->filename); return(DestroyImageList(image)); } image=GetPreviousImageInList(image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Corrupt list"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted; next_image is NULL","`%s'", image_info->filename); } } if (mng_info->ticks_per_second && mng_info->image_found > 1 && GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " First image null"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"image->next for first image is NULL but shouldn't be.", "`%s'",image_info->filename); } if (mng_info->image_found == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No visible images found."); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"No visible images in file","`%s'",image_info->filename); return(DestroyImageList(image)); } if (mng_info->ticks_per_second) final_delay=1UL*MagickMax(image->ticks_per_second,1L)* final_delay/mng_info->ticks_per_second; else image->start_loop=MagickTrue; /* Find final nonzero image delay */ final_image_delay=0; while (GetNextImageInList(image) != (Image *) NULL) { if (image->delay) final_image_delay=image->delay; image=GetNextImageInList(image); } if (final_delay < final_image_delay) final_delay=final_image_delay; image->delay=final_delay; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->delay=%.20g, final_delay=%.20g",(double) image->delay, (double) final_delay); if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Before coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g",(double) image->delay); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g",(double) scene++,(double) image->delay); } } image=GetFirstImageInList(image); #ifdef MNG_COALESCE_LAYERS if (insert_layers) { Image *next_image, *next; size_t scene; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Coalesce Images"); scene=image->scene; next_image=CoalesceImages(image,&image->exception); if (next_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); image=DestroyImageList(image); image=next_image; for (next=image; next != (Image *) NULL; next=next_image) { next->page.width=mng_info->mng_width; next->page.height=mng_info->mng_height; next->page.x=0; next->page.y=0; next->scene=scene++; next_image=GetNextImageInList(next); if (next_image == (Image *) NULL) break; if (next->delay == 0) { scene--; next_image->previous=GetPreviousImageInList(next); if (GetPreviousImageInList(next) == (Image *) NULL) image=next_image; else next->previous->next=next_image; next=DestroyImage(next); } } } #endif while (GetNextImageInList(image) != (Image *) NULL) image=GetNextImageInList(image); image->dispose=BackgroundDispose; if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " After coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g dispose=%.20g",(double) image->delay, (double) image->dispose); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g dispose=%.20g",(double) scene++, (double) image->delay,(double) image->dispose); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage();"); return(image); } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadMNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneMNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadMNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadMNGImage()"); return(GetFirstImageInList(image)); } #else /* PNG_LIBPNG_VER > 10011 */ static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "PNG library is too old","`%s'",image_info->filename); return(Image *) NULL; } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { return(ReadPNGImage(image_info,exception)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPNGImage() adds properties for the PNG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPNGImage method is: % % size_t RegisterPNGImage(void) % */ ModuleExport size_t RegisterPNGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char *PNGNote= { "See http://www.libpng.org/ for details about the PNG format." }, *JNGNote= { "See http://www.libpng.org/pub/mng/ for details about the JNG\n" "format." }, *MNGNote= { "See http://www.libpng.org/pub/mng/ for details about the MNG\n" "format." }; *version='\0'; #if defined(PNG_LIBPNG_VER_STRING) (void) ConcatenateMagickString(version,"libpng ",MaxTextExtent); (void) ConcatenateMagickString(version,PNG_LIBPNG_VER_STRING,MaxTextExtent); if (LocaleCompare(PNG_LIBPNG_VER_STRING,png_get_header_ver(NULL)) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,png_get_libpng_ver(NULL), MaxTextExtent); } #endif entry=SetMagickInfo("MNG"); entry->seekable_stream=MagickTrue; #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadMNGImage; entry->encoder=(EncodeImageHandler *) WriteMNGImage; #endif entry->magick=(IsImageFormatHandler *) IsMNG; entry->description=ConstantString("Multiple-image Network Graphics"); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("video/x-mng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(MNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("Portable Network Graphics"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); if (*version != '\0') entry->version=ConstantString(version); entry->note=ConstantString(PNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG8"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString( "8-bit indexed with optional binary transparency"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG24"); *version='\0'; #if defined(ZLIB_VERSION) (void) ConcatenateMagickString(version,"zlib ",MaxTextExtent); (void) ConcatenateMagickString(version,ZLIB_VERSION,MaxTextExtent); if (LocaleCompare(ZLIB_VERSION,zlib_version) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,zlib_version,MaxTextExtent); } #endif if (*version != '\0') entry->version=ConstantString(version); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 24-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG32"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 32-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG48"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 48-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG64"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 64-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG00"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString( "PNG inheriting bit-depth, color-type from original if possible"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JNG"); #if defined(JNG_SUPPORTED) #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJNGImage; entry->encoder=(EncodeImageHandler *) WriteJNGImage; #endif #endif entry->magick=(IsImageFormatHandler *) IsJNG; entry->seekable_stream=MagickTrue; entry->adjoin=MagickFalse; entry->description=ConstantString("JPEG Network Graphics"); entry->mime_type=ConstantString("image/x-jng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(JNGNote); (void) RegisterMagickInfo(entry); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE ping_semaphore=AllocateSemaphoreInfo(); #endif return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPNGImage() removes format registrations made by the % PNG module from the list of supported formats. % % The format of the UnregisterPNGImage method is: % % UnregisterPNGImage(void) % */ ModuleExport void UnregisterPNGImage(void) { (void) UnregisterMagickInfo("MNG"); (void) UnregisterMagickInfo("PNG"); (void) UnregisterMagickInfo("PNG8"); (void) UnregisterMagickInfo("PNG24"); (void) UnregisterMagickInfo("PNG32"); (void) UnregisterMagickInfo("PNG48"); (void) UnregisterMagickInfo("PNG64"); (void) UnregisterMagickInfo("PNG00"); (void) UnregisterMagickInfo("JNG"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE if (ping_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&ping_semaphore); #endif } #if defined(MAGICKCORE_PNG_DELEGATE) #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteMNGImage() writes an image in the Portable Network Graphics % Group's "Multiple-image Network Graphics" encoded image format. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteMNGImage method is: % % MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % % To do (as of version 5.5.2, November 26, 2002 -- glennrp -- see also % "To do" under ReadPNGImage): % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % Write the iCCP chunk at MNG level when (icc profile length > 0) % % Improve selection of color type (use indexed-colour or indexed-colour % with tRNS when 256 or fewer unique RGBA values are present). % % Figure out what to do with "dispose=<restore-to-previous>" (dispose == 3) % This will be complicated if we limit ourselves to generating MNG-LC % files. For now we ignore disposal method 3 and simply overlay the next % image on it. % % Check for identical PLTE's or PLTE/tRNS combinations and use a % global MNG PLTE or PLTE/tRNS combination when appropriate. % [mostly done 15 June 1999 but still need to take care of tRNS] % % Check for identical sRGB and replace with a global sRGB (and remove % gAMA/cHRM if sRGB is found; check for identical gAMA/cHRM and % replace with global gAMA/cHRM (or with sRGB if appropriate; replace % local gAMA/cHRM with local sRGB if appropriate). % % Check for identical sBIT chunks and write global ones. % % Provide option to skip writing the signature tEXt chunks. % % Use signatures to detect identical objects and reuse the first % instance of such objects instead of writing duplicate objects. % % Use a smaller-than-32k value of compression window size when % appropriate. % % Encode JNG datastreams. Mostly done as of 5.5.2; need to write % ancillary text chunks and save profiles. % % Provide an option to force LC files (to ensure exact framing rate) % instead of VLC. % % Provide an option to force VLC files instead of LC, even when offsets % are present. This will involve expanding the embedded images with a % transparent region at the top and/or left. */ static void Magick_png_write_raw_profile(const ImageInfo *image_info,png_struct *ping, png_info *ping_info, unsigned char *profile_type, unsigned char *profile_description, unsigned char *profile_data, png_uint_32 length) { png_textp text; register ssize_t i; unsigned char *sp; png_charp dp; png_uint_32 allocated_length, description_length; unsigned char hex[16]={'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; if (LocaleNCompare((char *) profile_type+1, "ng-chunk-",9) == 0) return; if (image_info->verbose) { (void) printf("writing raw profile: type=%s, length=%.20g\n", (char *) profile_type, (double) length); } #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping,(png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif description_length=(png_uint_32) strlen((const char *) profile_description); allocated_length=(png_uint_32) (length*2 + (length >> 5) + 20 + description_length); #if PNG_LIBPNG_VER >= 10400 text[0].text=(png_charp) png_malloc(ping, (png_alloc_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_alloc_size_t) 80); #else text[0].text=(png_charp) png_malloc(ping, (png_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_size_t) 80); #endif text[0].key[0]='\0'; (void) ConcatenateMagickString(text[0].key, "Raw profile type ",MaxTextExtent); (void) ConcatenateMagickString(text[0].key,(const char *) profile_type,62); sp=profile_data; dp=text[0].text; *dp++='\n'; (void) CopyMagickString(dp,(const char *) profile_description, allocated_length); dp+=description_length; *dp++='\n'; (void) FormatLocaleString(dp,allocated_length- (png_size_t) (dp-text[0].text),"%8lu ",(unsigned long) length); dp+=8; for (i=0; i < (ssize_t) length; i++) { if (i%36 == 0) *dp++='\n'; *(dp++)=(char) hex[((*sp >> 4) & 0x0f)]; *(dp++)=(char) hex[((*sp++ ) & 0x0f)]; } *dp++='\n'; *dp='\0'; text[0].text_length=(png_size_t) (dp-text[0].text); text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? -1 : 0; if (text[0].text_length <= allocated_length) png_set_text(ping,ping_info,text,1); png_free(ping,text[0].text); png_free(ping,text[0].key); png_free(ping,text); } #if defined(PNG_tIME_SUPPORTED) static void write_tIME_chunk(Image *image,png_struct *ping,png_info *info, const char *date) { const char *timestamp; int ret; int day, hour, minute, month, second, year; int addhours=0, addminutes=0; png_time ptime; if (date == (const char *) NULL) timestamp=GetImageProperty(image,"date:modify"); else timestamp=date; LogMagickEvent(CoderEvent,GetMagickModule(), " Writing tIME chunk: timestamp property is %30s\n",timestamp); ret=sscanf(timestamp,"%d-%d-%dT%d:%d:%d",&year,&month,&day,&hour, &minute, &second); addhours=0; addminutes=0; ret=sscanf(timestamp,"%d-%d-%dT%d:%d:%d%d:%d",&year,&month,&day,&hour, &minute, &second, &addhours, &addminutes); LogMagickEvent(CoderEvent,GetMagickModule(), " Date format specified for png:tIME=%s" ,timestamp); LogMagickEvent(CoderEvent,GetMagickModule(), " ret=%d,y=%d, m=%d, d=%d, h=%d, m=%d, s=%d, ah=%d, as=%d", ret,year,month,day,hour,minute,second,addhours,addminutes); if (ret < 6) { LogMagickEvent(CoderEvent,GetMagickModule(), " Invalid date, ret=%d",ret); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderError, "Invalid date format specified for png:tIME","`%s' (ret=%d)", image->filename,ret); return; } if (addhours < 0) { addhours+=24; addminutes=-addminutes; day--; } hour+=addhours; minute+=addminutes; if (day == 0) { month--; day=31; if(month == 2) day=28; else { if(month == 4 || month == 6 || month == 9 || month == 11) day=30; else day=31; } } if (month == 0) { month++; year--; } if (minute > 59) { hour++; minute-=60; } if (hour > 23) { day ++; hour -=24; } if (hour < 0) { day --; hour +=24; } /* To do: fix this for leap years */ if (day > 31 || (month == 2 && day > 28) || ((month == 4 || month == 6 || month == 9 || month == 11) && day > 30)) { month++; day = 1; } if (month > 12) { year++; month=1; } ptime.year = year; ptime.month = month; ptime.day = day; ptime.hour = hour; ptime.minute = minute; ptime.second = second; LogMagickEvent(CoderEvent,GetMagickModule(), " png_set_tIME: y=%d, m=%d, d=%d, h=%d, m=%d, s=%d, ah=%d, am=%d", ptime.year, ptime.month, ptime.day, ptime.hour, ptime.minute, ptime.second, addhours, addminutes); png_set_tIME(ping,info,&ptime); } #endif /* Write one PNG image */ static MagickBooleanType WriteOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { char s[2]; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; const char *name, *property, *value; const StringInfo *profile; int num_passes, pass, ping_wrote_caNv; png_byte ping_trans_alpha[256]; png_color palette[257]; png_color_16 ping_background, ping_trans_color; png_info *ping_info; png_struct *ping; png_uint_32 ping_height, ping_width; ssize_t y; MagickBooleanType image_matte, logging, matte, ping_have_blob, ping_have_cheap_transparency, ping_have_color, ping_have_non_bw, ping_have_PLTE, ping_have_bKGD, ping_have_eXIf, ping_have_iCCP, ping_have_pHYs, ping_have_sRGB, ping_have_tRNS, ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, /* ping_exclude_EXIF, */ ping_exclude_eXIf, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tIME, /* ping_exclude_tRNS, */ ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, ping_preserve_iCCP, ping_need_colortype_warning, status, tried_332, tried_333, tried_444; MemoryInfo *volatile pixel_info; QuantumInfo *quantum_info; register ssize_t i, x; unsigned char *ping_pixels; volatile int image_colors, ping_bit_depth, ping_color_type, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans; volatile size_t image_depth, old_bit_depth; size_t quality, rowbytes, save_image_depth; int j, number_colors, number_opaque, number_semitransparent, number_transparent, ping_pHYs_unit_type; png_uint_32 ping_pHYs_x_resolution, ping_pHYs_y_resolution; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOnePNGImage()"); /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,MaxTextExtent); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,MaxTextExtent); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " IM version = %s", im_vers); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Libpng version = %s", libpng_vers); if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " running with %s", zlib_runv); } } /* Initialize some stuff */ ping_bit_depth=0, ping_color_type=0, ping_interlace_method=0, ping_compression_method=0, ping_filter_method=0, ping_num_trans = 0; ping_background.red = 0; ping_background.green = 0; ping_background.blue = 0; ping_background.gray = 0; ping_background.index = 0; ping_trans_color.red=0; ping_trans_color.green=0; ping_trans_color.blue=0; ping_trans_color.gray=0; ping_pHYs_unit_type = 0; ping_pHYs_x_resolution = 0; ping_pHYs_y_resolution = 0; ping_have_blob=MagickFalse; ping_have_cheap_transparency=MagickFalse; ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; ping_have_PLTE=MagickFalse; ping_have_bKGD=MagickFalse; ping_have_eXIf=MagickTrue; ping_have_iCCP=MagickFalse; ping_have_pHYs=MagickFalse; ping_have_sRGB=MagickFalse; ping_have_tRNS=MagickFalse; ping_exclude_bKGD=mng_info->ping_exclude_bKGD; ping_exclude_caNv=mng_info->ping_exclude_caNv; ping_exclude_cHRM=mng_info->ping_exclude_cHRM; ping_exclude_date=mng_info->ping_exclude_date; /* ping_exclude_EXIF=mng_info->ping_exclude_EXIF; */ ping_exclude_eXIf=mng_info->ping_exclude_eXIf; ping_exclude_gAMA=mng_info->ping_exclude_gAMA; ping_exclude_iCCP=mng_info->ping_exclude_iCCP; /* ping_exclude_iTXt=mng_info->ping_exclude_iTXt; */ ping_exclude_oFFs=mng_info->ping_exclude_oFFs; ping_exclude_pHYs=mng_info->ping_exclude_pHYs; ping_exclude_sRGB=mng_info->ping_exclude_sRGB; ping_exclude_tEXt=mng_info->ping_exclude_tEXt; ping_exclude_tIME=mng_info->ping_exclude_tIME; /* ping_exclude_tRNS=mng_info->ping_exclude_tRNS; */ ping_exclude_zCCP=mng_info->ping_exclude_zCCP; /* hex-encoded iCCP in zTXt */ ping_exclude_zTXt=mng_info->ping_exclude_zTXt; ping_preserve_colormap = mng_info->ping_preserve_colormap; ping_preserve_iCCP = mng_info->ping_preserve_iCCP; ping_need_colortype_warning = MagickFalse; property=(const char *) NULL; /* Recognize the ICC sRGB profile and convert it to the sRGB chunk, * i.e., eliminate the ICC profile and set image->rendering_intent. * Note that this will not involve any changes to the actual pixels * but merely passes information to applications that read the resulting * PNG image. * * To do: recognize other variants of the sRGB profile, using the CRC to * verify all recognized variants including the 7 already known. * * Work around libpng16+ rejecting some "known invalid sRGB profiles". * * Use something other than image->rendering_intent to record the fact * that the sRGB profile was found. * * Record the ICC version (currently v2 or v4) of the incoming sRGB ICC * profile. Record the Blackpoint Compensation, if any. */ if (ping_exclude_sRGB == MagickFalse && ping_preserve_iCCP == MagickFalse) { char *name; const StringInfo *profile; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } ping_exclude_iCCP = MagickTrue; ping_exclude_zCCP = MagickTrue; ping_have_sRGB = MagickTrue; break; } } } if (sRGB_info[icheck].len == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); } } name=GetNextImageProfile(image); } } number_opaque = 0; number_semitransparent = 0; number_transparent = 0; if (logging != MagickFalse) { if (image->storage_class == UndefinedClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=UndefinedClass"); if (image->storage_class == DirectClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=DirectClass"); if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=PseudoClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->magick= %s",image_info->magick); (void) LogMagickEvent(CoderEvent,GetMagickModule(), image->taint ? " image->taint=MagickTrue": " image->taint=MagickFalse"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%g", image->gamma); } if (image->storage_class == PseudoClass && (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (mng_info->write_png_colortype != 1 && mng_info->write_png_colortype != 5))) { (void) SyncImage(image); image->storage_class = DirectClass; } if (ping_preserve_colormap == MagickFalse) { if (image->storage_class != PseudoClass && image->colormap != NULL) { /* Free the bogus colormap; it can cause trouble later */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Freeing bogus colormap"); (void) RelinquishMagickMemory(image->colormap); image->colormap=NULL; } } if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); /* Sometimes we get PseudoClass images whose RGB values don't match the colors in the colormap. This code syncs the RGB values. */ if (image->depth <= 8 && image->taint && image->storage_class == PseudoClass) (void) SyncImage(image); #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->depth > 8) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reducing PNG bit depth to 8 since this is a Q8 build."); image->depth=8; } #endif /* Respect the -depth option */ if (image->depth < 4) { register PixelPacket *r; ExceptionInfo *exception; exception=(&image->exception); if (image->depth > 2) { /* Scale to 4-bit */ LBR04PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR04PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR04PacketRGBO(image->colormap[i]); } } } else if (image->depth > 1) { /* Scale to 2-bit */ LBR02PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR02PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR02PacketRGBO(image->colormap[i]); } } } else { /* Scale to 1-bit */ LBR01PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR01PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR01PacketRGBO(image->colormap[i]); } } } } /* To do: set to next higher multiple of 8 */ if (image->depth < 8) image->depth=8; #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy */ if (image->depth > 8) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (image->depth == 16 && mng_info->write_png_depth != 16) if (mng_info->write_png8 || LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif image_colors = (int) image->colors; if (mng_info->write_png_colortype && (mng_info->write_png_colortype > 4 || (mng_info->write_png_depth >= 8 && mng_info->write_png_colortype < 4 && image->matte == MagickFalse))) { /* Avoid the expensive BUILD_PALETTE operation if we're sure that we * are not going to need the result. */ number_opaque = (int) image->colors; if (mng_info->write_png_colortype == 1 || mng_info->write_png_colortype == 5) ping_have_color=MagickFalse; else ping_have_color=MagickTrue; ping_have_non_bw=MagickFalse; if (image->matte != MagickFalse) { number_transparent = 2; number_semitransparent = 1; } else { number_transparent = 0; number_semitransparent = 0; } } if (mng_info->write_png_colortype < 7) { /* BUILD_PALETTE * * Normally we run this just once, but in the case of writing PNG8 * we reduce the transparency to binary and run again, then if there * are still too many colors we reduce to a simple 4-4-4-1, then 3-3-3-1 * RGBA palette and run again, and then to a simple 3-3-2-1 RGBA * palette. Then (To do) we take care of a final reduction that is only * needed if there are still 256 colors present and one of them has both * transparent and opaque instances. */ tried_332 = MagickFalse; tried_333 = MagickFalse; tried_444 = MagickFalse; for (j=0; j<6; j++) { /* * Sometimes we get DirectClass images that have 256 colors or fewer. * This code will build a colormap. * * Also, sometimes we get PseudoClass images with an out-of-date * colormap. This code will replace the colormap with a new one. * Sometimes we get PseudoClass images that have more than 256 colors. * This code will delete the colormap and change the image to * DirectClass. * * If image->matte is MagickFalse, we ignore the opacity channel * even though it sometimes contains left-over non-opaque values. * * Also we gather some information (number of opaque, transparent, * and semitransparent pixels, and whether the image has any non-gray * pixels or only black-and-white pixels) that we might need later. * * Even if the user wants to force GrayAlpha or RGBA (colortype 4 or 6) * we need to check for bogus non-opaque values, at least. */ ExceptionInfo *exception; int n; PixelPacket opaque[260], semitransparent[260], transparent[260]; register IndexPacket *indexes; register const PixelPacket *s, *q; register PixelPacket *r; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Enter BUILD_PALETTE:"); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->columns=%.20g",(double) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->rows=%.20g",(double) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); if (image->storage_class == PseudoClass && image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Original colormap:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < 256; i++) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } for (i=image->colors - 10; i < (ssize_t) image->colors; i++) { if (i > 255) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d",(int) image->colors); if (image->colors == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " (zero means unknown)"); if (ping_preserve_colormap == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Regenerate the colormap"); } exception=(&image->exception); image_colors=0; number_opaque = 0; number_semitransparent = 0; number_transparent = 0; for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte == MagickFalse || GetPixelOpacity(q) == OpaqueOpacity) { if (number_opaque < 259) { if (number_opaque == 0) { GetPixelRGB(q, opaque); opaque[0].opacity=OpaqueOpacity; number_opaque=1; } for (i=0; i< (ssize_t) number_opaque; i++) { if (IsColorEqual(q, opaque+i)) break; } if (i == (ssize_t) number_opaque && number_opaque < 259) { number_opaque++; GetPixelRGB(q, opaque+i); opaque[i].opacity=OpaqueOpacity; } } } else if (q->opacity == TransparentOpacity) { if (number_transparent < 259) { if (number_transparent == 0) { GetPixelRGBO(q, transparent); ping_trans_color.red= (unsigned short) GetPixelRed(q); ping_trans_color.green= (unsigned short) GetPixelGreen(q); ping_trans_color.blue= (unsigned short) GetPixelBlue(q); ping_trans_color.gray= (unsigned short) GetPixelRed(q); number_transparent = 1; } for (i=0; i< (ssize_t) number_transparent; i++) { if (IsColorEqual(q, transparent+i)) break; } if (i == (ssize_t) number_transparent && number_transparent < 259) { number_transparent++; GetPixelRGBO(q, transparent+i); } } } else { if (number_semitransparent < 259) { if (number_semitransparent == 0) { GetPixelRGBO(q, semitransparent); number_semitransparent = 1; } for (i=0; i< (ssize_t) number_semitransparent; i++) { if (IsColorEqual(q, semitransparent+i) && GetPixelOpacity(q) == semitransparent[i].opacity) break; } if (i == (ssize_t) number_semitransparent && number_semitransparent < 259) { number_semitransparent++; GetPixelRGBO(q, semitransparent+i); } } } q++; } } if (mng_info->write_png8 == MagickFalse && ping_exclude_bKGD == MagickFalse) { /* Add the background color to the palette, if it * isn't already there. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Check colormap for background (%d,%d,%d)", (int) image->background_color.red, (int) image->background_color.green, (int) image->background_color.blue); } for (i=0; i<number_opaque; i++) { if (opaque[i].red == image->background_color.red && opaque[i].green == image->background_color.green && opaque[i].blue == image->background_color.blue) break; } if (number_opaque < 259 && i == number_opaque) { opaque[i] = image->background_color; ping_background.index = i; number_opaque++; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d",(int) i); } } else if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in the colormap to add background color"); } image_colors=number_opaque+number_transparent+number_semitransparent; if (logging != MagickFalse) { if (image_colors > 256) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has more than 256 colors"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has %d colors",image_colors); } if (ping_preserve_colormap != MagickFalse) break; if (mng_info->write_png_colortype != 7) /* We won't need this info */ { ping_have_color=MagickFalse; ping_have_non_bw=MagickFalse; if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "incompatible colorspace"); ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; } if(image_colors > 256) { for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != GetPixelGreen(s) || GetPixelRed(s) != GetPixelBlue(s)) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } s++; } if (ping_have_color != MagickFalse) break; /* Worst case is black-and-white; we are looking at every * pixel twice. */ if (ping_have_non_bw == MagickFalse) { s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != 0 && GetPixelRed(s) != QuantumRange) { ping_have_non_bw=MagickTrue; break; } s++; } } } } } if (image_colors < 257) { PixelPacket colormap[260]; /* * Initialize image colormap. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Sort the new colormap"); /* Sort palette, transparent first */; n = 0; for (i=0; i<number_transparent; i++) colormap[n++] = transparent[i]; for (i=0; i<number_semitransparent; i++) colormap[n++] = semitransparent[i]; for (i=0; i<number_opaque; i++) colormap[n++] = opaque[i]; ping_background.index += (number_transparent + number_semitransparent); /* image_colors < 257; search the colormap instead of the pixels * to get ping_have_color and ping_have_non_bw */ for (i=0; i<n; i++) { if (ping_have_color == MagickFalse) { if (colormap[i].red != colormap[i].green || colormap[i].red != colormap[i].blue) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } } if (ping_have_non_bw == MagickFalse) { if (colormap[i].red != 0 && colormap[i].red != QuantumRange) ping_have_non_bw=MagickTrue; } } if ((mng_info->ping_exclude_tRNS == MagickFalse || (number_transparent == 0 && number_semitransparent == 0)) && (((mng_info->write_png_colortype-1) == PNG_COLOR_TYPE_PALETTE) || (mng_info->write_png_colortype == 0))) { if (logging != MagickFalse) { if (n != (ssize_t) image_colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_colors (%d) and n (%d) don't match", image_colors, n); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireImageColormap"); } image->colors = image_colors; if (AcquireImageColormap(image,image_colors) == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } for (i=0; i< (ssize_t) image_colors; i++) image->colormap[i] = colormap[i]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d (%d)", (int) image->colors, image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Update the pixel indexes"); } /* Sync the pixel indices with the new colormap */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i< (ssize_t) image_colors; i++) { if ((image->matte == MagickFalse || image->colormap[i].opacity == GetPixelOpacity(q)) && image->colormap[i].red == GetPixelRed(q) && image->colormap[i].green == GetPixelGreen(q) && image->colormap[i].blue == GetPixelBlue(q)) { SetPixelIndex(indexes+x,i); break; } } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d", (int) image->colors); if (image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < (ssize_t) image->colors; i++) { if (i < 300 || i >= (ssize_t) image->colors - 10) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } if (number_transparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent = %d", number_transparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent > 256"); if (number_opaque < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque = %d", number_opaque); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque > 256"); if (number_semitransparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent = %d", number_semitransparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent > 256"); if (ping_have_non_bw == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are black or white"); else if (ping_have_color == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are gray"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " At least one pixel or the background is non-gray"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Exit BUILD_PALETTE:"); } if (mng_info->write_png8 == MagickFalse) break; /* Make any reductions necessary for the PNG8 format */ if (image_colors <= 256 && image_colors != 0 && image->colormap != NULL && number_semitransparent == 0 && number_transparent <= 1) break; /* PNG8 can't have semitransparent colors so we threshold the * opacity to 0 or OpaqueOpacity, and PNG8 can only have one * transparent color so if more than one is transparent we merge * them into image->background_color. */ if (number_semitransparent != 0 || number_transparent > 1) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Thresholding the alpha channel to binary"); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) > TransparentOpacity/2) { SetPixelOpacity(r,TransparentOpacity); SetPixelRgb(r,&image->background_color); } else SetPixelOpacity(r,OpaqueOpacity); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image_colors != 0 && image_colors <= 256 && image->colormap != NULL) for (i=0; i<image_colors; i++) image->colormap[i].opacity = (image->colormap[i].opacity > TransparentOpacity/2 ? TransparentOpacity : OpaqueOpacity); } continue; } /* PNG8 can't have more than 256 colors so we quantize the pixels and * background color to the 4-4-4-1, 3-3-3-1 or 3-3-2-1 palette. If the * image is mostly gray, the 4-4-4-1 palette is likely to end up with 256 * colors or less. */ if (tried_444 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 4-4-4"); tried_444 = MagickTrue; LBR04PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 4-4-4"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR04PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 4-4-4"); for (i=0; i<image_colors; i++) { LBR04PacketRGB(image->colormap[i]); } } continue; } if (tried_333 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-3"); tried_333 = MagickTrue; LBR03PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-3-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR03PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-3-1"); for (i=0; i<image_colors; i++) { LBR03PacketRGB(image->colormap[i]); } } continue; } if (tried_332 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-2"); tried_332 = MagickTrue; /* Red and green were already done so we only quantize the blue * channel */ LBR02PacketBlue(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR02PixelBlue(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-2-1"); for (i=0; i<image_colors; i++) { LBR02PacketBlue(image->colormap[i]); } } continue; } if (image_colors == 0 || image_colors > 256) { /* Take care of special case with 256 opaque colors + 1 transparent * color. We don't need to quantize to 2-3-2-1; we only need to * eliminate one color, so we'll merge the two darkest red * colors (0x49, 0, 0) -> (0x24, 0, 0). */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red background colors to 3-3-2-1"); if (ScaleQuantumToChar(image->background_color.red) == 0x49 && ScaleQuantumToChar(image->background_color.green) == 0x00 && ScaleQuantumToChar(image->background_color.blue) == 0x00) { image->background_color.red=ScaleCharToQuantum(0x24); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (ScaleQuantumToChar(GetPixelRed(r)) == 0x49 && ScaleQuantumToChar(GetPixelGreen(r)) == 0x00 && ScaleQuantumToChar(GetPixelBlue(r)) == 0x00 && GetPixelOpacity(r) == OpaqueOpacity) { SetPixelRed(r,ScaleCharToQuantum(0x24)); } r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else { for (i=0; i<image_colors; i++) { if (ScaleQuantumToChar(image->colormap[i].red) == 0x49 && ScaleQuantumToChar(image->colormap[i].green) == 0x00 && ScaleQuantumToChar(image->colormap[i].blue) == 0x00) { image->colormap[i].red=ScaleCharToQuantum(0x24); } } } } } } /* END OF BUILD_PALETTE */ /* If we are excluding the tRNS chunk and there is transparency, * then we must write a Gray-Alpha (color-type 4) or RGBA (color-type 6) * PNG. */ if (mng_info->ping_exclude_tRNS != MagickFalse && (number_transparent != 0 || number_semitransparent != 0)) { unsigned int colortype=mng_info->write_png_colortype; if (ping_have_color == MagickFalse) mng_info->write_png_colortype = 5; else mng_info->write_png_colortype = 7; if (colortype != 0 && mng_info->write_png_colortype != colortype) ping_need_colortype_warning=MagickTrue; } /* See if cheap transparency is possible. It is only possible * when there is a single transparent color, no semitransparent * color, and no opaque color that has the same RGB components * as the transparent color. We only need this information if * we are writing a PNG with colortype 0 or 2, and we have not * excluded the tRNS chunk. */ if (number_transparent == 1 && mng_info->write_png_colortype < 4) { ping_have_cheap_transparency = MagickTrue; if (number_semitransparent != 0) ping_have_cheap_transparency = MagickFalse; else if (image_colors == 0 || image_colors > 256 || image->colormap == NULL) { ExceptionInfo *exception; register const PixelPacket *q; exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { q=GetVirtualPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity != TransparentOpacity && (unsigned short) GetPixelRed(q) == ping_trans_color.red && (unsigned short) GetPixelGreen(q) == ping_trans_color.green && (unsigned short) GetPixelBlue(q) == ping_trans_color.blue) { ping_have_cheap_transparency = MagickFalse; break; } q++; } if (ping_have_cheap_transparency == MagickFalse) break; } } else { /* Assuming that image->colormap[0] is the one transparent color * and that all others are opaque. */ if (image_colors > 1) for (i=1; i<image_colors; i++) if (image->colormap[i].red == image->colormap[0].red && image->colormap[i].green == image->colormap[0].green && image->colormap[i].blue == image->colormap[0].blue) { ping_have_cheap_transparency = MagickFalse; break; } } if (logging != MagickFalse) { if (ping_have_cheap_transparency == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is not possible."); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is possible."); } } else ping_have_cheap_transparency = MagickFalse; image_depth=image->depth; quantum_info = (QuantumInfo *) NULL; number_colors=0; image_colors=(int) image->colors; image_matte=image->matte; if (mng_info->write_png_colortype < 5) mng_info->IsPalette=image->storage_class == PseudoClass && image_colors <= 256 && image->colormap != NULL; else mng_info->IsPalette = MagickFalse; if ((mng_info->write_png_colortype == 4 || mng_info->write_png8) && (image->colors == 0 || image->colormap == NULL)) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Cannot write PNG8 or color-type 3; colormap is NULL", "`%s'",image->filename); return(MagickFalse); } /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_write_struct_2(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler,(void *) NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_write_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_write_struct(&ping,(png_info **) NULL); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } png_set_write_fn(ping,image,png_put_data,png_flush_data); pixel_info=(MemoryInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG write failed. */ #ifdef PNG_DEBUG if (image_info->verbose) (void) printf("PNG write has failed.\n"); #endif png_destroy_write_struct(&ping,&ping_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); return(MagickFalse); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for writing. */ #if defined(PNG_MNG_FEATURES_SUPPORTED) if (mng_info->write_mng) { (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); # ifdef PNG_WRITE_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature when writing a MNG because * zero-length PLTE is OK */ png_set_check_for_invalid_index (ping, 0); # endif } #else # ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if (mng_info->write_mng) png_permit_empty_plte(ping,MagickTrue); # endif #endif x=0; ping_width=(png_uint_32) image->columns; ping_height=(png_uint_32) image->rows; if (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32) image_depth=8; if (mng_info->write_png48 || mng_info->write_png64) image_depth=16; if (mng_info->write_png_depth != 0) image_depth=mng_info->write_png_depth; /* Adjust requested depth to next higher valid depth if necessary */ if (image_depth > 8) image_depth=16; if ((image_depth > 4) && (image_depth < 8)) image_depth=8; if (image_depth == 3) image_depth=4; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " width=%.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " height=%.20g",(double) ping_height); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative ping_bit_depth=%.20g",(double) image_depth); } save_image_depth=image_depth; ping_bit_depth=(png_byte) save_image_depth; #if defined(PNG_pHYs_SUPPORTED) if (ping_exclude_pHYs == MagickFalse) { if ((image->x_resolution != 0) && (image->y_resolution != 0) && (!mng_info->write_mng || !mng_info->equal_physs)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); if (image->units == PixelsPerInchResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution= (png_uint_32) ((100.0*image->x_resolution+0.5)/2.54); ping_pHYs_y_resolution= (png_uint_32) ((100.0*image->y_resolution+0.5)/2.54); } else if (image->units == PixelsPerCentimeterResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution=(png_uint_32) (100.0*image->x_resolution+0.5); ping_pHYs_y_resolution=(png_uint_32) (100.0*image->y_resolution+0.5); } else { ping_pHYs_unit_type=PNG_RESOLUTION_UNKNOWN; ping_pHYs_x_resolution=(png_uint_32) image->x_resolution; ping_pHYs_y_resolution=(png_uint_32) image->y_resolution; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Set up PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) ping_pHYs_x_resolution,(double) ping_pHYs_y_resolution, (int) ping_pHYs_unit_type); ping_have_pHYs = MagickTrue; } } #endif if (ping_exclude_bKGD == MagickFalse) { if ((!mng_info->adjoin || !mng_info->equal_backgrounds)) { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_background.red=(png_uint_16) (ScaleQuantumToShort(image->background_color.red) & mask); ping_background.green=(png_uint_16) (ScaleQuantumToShort(image->background_color.green) & mask); ping_background.blue=(png_uint_16) (ScaleQuantumToShort(image->background_color.blue) & mask); ping_background.gray=(png_uint_16) ping_background.green; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (1)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth=%d",ping_bit_depth); } ping_have_bKGD = MagickTrue; } /* Select the color type. */ matte=image_matte; old_bit_depth=0; if (mng_info->IsPalette && mng_info->write_png8) { /* To do: make this a function cause it's used twice, except for reducing the sample depth from 8. */ number_colors=image_colors; ping_have_tRNS=MagickFalse; /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors (%d)", number_colors, image_colors); for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), #if MAGICKCORE_QUANTUM_DEPTH == 8 " %3ld (%3d,%3d,%3d)", #else " %5ld (%5d,%5d,%5d)", #endif (long) i,palette[i].red,palette[i].green,palette[i].blue); } ping_have_PLTE=MagickTrue; image_depth=ping_bit_depth; ping_num_trans=0; if (matte != MagickFalse) { /* Identify which colormap entry is transparent. */ assert(number_colors <= 256); assert(image->colormap != NULL); for (i=0; i < (ssize_t) number_transparent; i++) ping_trans_alpha[i]=0; ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else ping_have_tRNS=MagickTrue; } if (ping_exclude_bKGD == MagickFalse) { /* * Identify which colormap entry is the background color. */ for (i=0; i < (ssize_t) MagickMax(1L*number_colors-1L,1L); i++) if (IsPNGColorEqual(ping_background,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); } } } /* end of write_png8 */ else if (mng_info->write_png_colortype == 1) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; } else if (mng_info->write_png24 || mng_info->write_png48 || mng_info->write_png_colortype == 3) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; } else if (mng_info->write_png32 || mng_info->write_png64 || mng_info->write_png_colortype == 7) { image_matte=MagickTrue; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; } else /* mng_info->write_pngNN not specified */ { image_depth=ping_bit_depth; if (mng_info->write_png_colortype != 0) { ping_color_type=(png_byte) mng_info->write_png_colortype-1; if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) image_matte=MagickTrue; else image_matte=MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG colortype %d was specified:",(int) ping_color_type); } else /* write_png_colortype not specified */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selecting PNG colortype:"); ping_color_type=(png_byte) ((matte != MagickFalse)? PNG_COLOR_TYPE_RGB_ALPHA:PNG_COLOR_TYPE_RGB); if (image_info->type == TrueColorType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } if (image_info->type == TrueColorMatteType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; image_matte=MagickTrue; } if (image_info->type == PaletteType || image_info->type == PaletteMatteType) ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (mng_info->write_png_colortype == 0 && image_info->type == UndefinedType) { if (ping_have_color == MagickFalse) { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY_ALPHA; image_matte=MagickTrue; } } else { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGBA; image_matte=MagickTrue; } } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selected PNG colortype=%d",ping_color_type); if (ping_bit_depth < 8) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) ping_bit_depth=8; } old_bit_depth=ping_bit_depth; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->matte == MagickFalse && ping_have_non_bw == MagickFalse) ping_bit_depth=1; } if (ping_color_type == PNG_COLOR_TYPE_PALETTE) { size_t one = 1; ping_bit_depth=1; if (image->colors == 0) { /* DO SOMETHING */ png_error(ping,"image has 0 colors"); } while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG bit depth: %d",ping_bit_depth); } if (ping_bit_depth < (int) mng_info->write_png_depth) ping_bit_depth = mng_info->write_png_depth; } image_depth=ping_bit_depth; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG color type: %s (%.20g)", PngColorTypeToString(ping_color_type), (double) ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->type: %.20g",(double) image_info->type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_depth: %.20g",(double) image_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth: %.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth: %.20g",(double) ping_bit_depth); } if (matte != MagickFalse) { if (mng_info->IsPalette) { if (mng_info->write_png_colortype == 0) { ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; if (ping_have_color != MagickFalse) ping_color_type=PNG_COLOR_TYPE_RGBA; } /* * Determine if there is any transparent color. */ if (number_transparent + number_semitransparent == 0) { /* No transparent pixels are present. Change 4 or 6 to 0 or 2. */ image_matte=MagickFalse; if (mng_info->write_png_colortype == 0) ping_color_type&=0x03; } else { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_trans_color.red=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].red) & mask); ping_trans_color.green=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].green) & mask); ping_trans_color.blue=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].blue) & mask); ping_trans_color.gray=(png_uint_16) (ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image, image->colormap))) & mask); ping_trans_color.index=(png_byte) 0; ping_have_tRNS=MagickTrue; } if (ping_have_tRNS != MagickFalse) { /* * Determine if there is one and only one transparent color * and if so if it is fully transparent. */ if (ping_have_cheap_transparency == MagickFalse) ping_have_tRNS=MagickFalse; } if (ping_have_tRNS != MagickFalse) { if (mng_info->write_png_colortype == 0) ping_color_type &= 0x03; /* changes 4 or 6 to 0 or 2 */ if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } else { if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } matte=image_matte; if (ping_have_tRNS != MagickFalse) image_matte=MagickFalse; if ((mng_info->IsPalette) && mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE && ping_have_color == MagickFalse && (image_matte == MagickFalse || image_depth >= 8)) { size_t one=1; if (image_matte != MagickFalse) ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; else if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_GRAY_ALPHA) { ping_color_type=PNG_COLOR_TYPE_GRAY; if (save_image_depth == 16 && image_depth == 8) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (0)"); } ping_trans_color.gray*=0x0101; } } if (image_depth > MAGICKCORE_QUANTUM_DEPTH) image_depth=MAGICKCORE_QUANTUM_DEPTH; if ((image_colors == 0) || ((ssize_t) (image_colors-1) > (ssize_t) MaxColormapSize)) image_colors=(int) (one << image_depth); if (image_depth > 8) ping_bit_depth=16; else { ping_bit_depth=8; if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { if(!mng_info->write_png_depth) { ping_bit_depth=1; while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY && image_colors < 17 && mng_info->IsPalette) { /* Check if grayscale is reducible */ int depth_4_ok=MagickTrue, depth_2_ok=MagickTrue, depth_1_ok=MagickTrue; for (i=0; i < (ssize_t) image_colors; i++) { unsigned char intensity; intensity=ScaleQuantumToChar(image->colormap[i].red); if ((intensity & 0x0f) != ((intensity & 0xf0) >> 4)) depth_4_ok=depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x03) != ((intensity & 0x0c) >> 2)) depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x01) != ((intensity & 0x02) >> 1)) depth_1_ok=MagickFalse; } if (depth_1_ok && mng_info->write_png_depth <= 1) ping_bit_depth=1; else if (depth_2_ok && mng_info->write_png_depth <= 2) ping_bit_depth=2; else if (depth_4_ok && mng_info->write_png_depth <= 4) ping_bit_depth=4; } } image_depth=ping_bit_depth; } else if (mng_info->IsPalette) { number_colors=image_colors; if (image_depth <= 8) { /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (!(mng_info->have_write_global_plte && matte == MagickFalse)) { for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors", number_colors); ping_have_PLTE=MagickTrue; } /* color_type is PNG_COLOR_TYPE_PALETTE */ if (mng_info->write_png_depth == 0) { size_t one; ping_bit_depth=1; one=1; while ((one << ping_bit_depth) < (size_t) number_colors) ping_bit_depth <<= 1; } ping_num_trans=0; if (matte != MagickFalse) { /* * Set up trans_colors array. */ assert(number_colors <= 256); ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (1)"); } ping_have_tRNS=MagickTrue; for (i=0; i < ping_num_trans; i++) { ping_trans_alpha[i]= (png_byte) (255- ScaleQuantumToChar(image->colormap[i].opacity)); } } } } } else { if (image_depth < 8) image_depth=8; if ((save_image_depth == 16) && (image_depth == 8)) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color from (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } ping_trans_color.red*=0x0101; ping_trans_color.green*=0x0101; ping_trans_color.blue*=0x0101; ping_trans_color.gray*=0x0101; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } if (ping_bit_depth < (ssize_t) mng_info->write_png_depth) ping_bit_depth = (ssize_t) mng_info->write_png_depth; /* Adjust background and transparency samples in sub-8-bit grayscale files. */ if (ping_bit_depth < 8 && ping_color_type == PNG_COLOR_TYPE_GRAY) { png_uint_16 maxval; size_t one=1; maxval=(png_uint_16) ((one << ping_bit_depth)-1); if (ping_exclude_bKGD == MagickFalse) { ping_background.gray=(png_uint_16) ((maxval/65535.)*(ScaleQuantumToShort((Quantum) GetPixelLuma(image,&image->background_color)))+.5); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (2)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.gray is %d", (int) ping_background.gray); } ping_have_bKGD = MagickTrue; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color.gray from %d", (int)ping_trans_color.gray); ping_trans_color.gray=(png_uint_16) ((maxval/255.)*( ping_trans_color.gray)+.5); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to %d", (int)ping_trans_color.gray); } if (ping_exclude_bKGD == MagickFalse) { if (mng_info->IsPalette && (int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { /* Identify which colormap entry is the background color. */ number_colors=image_colors; for (i=0; i < (ssize_t) MagickMax(1L*number_colors,1L); i++) if (IsPNGColorEqual(image->background_color,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk with index=%d",(int) i); } if (i < (ssize_t) number_colors) { ping_have_bKGD = MagickTrue; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background =(%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); } } else /* Can't happen */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in PLTE to add bKGD color"); ping_have_bKGD = MagickFalse; } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color type: %s (%d)", PngColorTypeToString(ping_color_type), ping_color_type); /* Initialize compression level and filtering. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up deflate compression"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression buffer size: 32768"); } png_set_compression_buffer_size(ping,32768L); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression mem level: 9"); png_set_compression_mem_level(ping, 9); /* Untangle the "-quality" setting: Undefined is 0; the default is used. Default is 75 10's digit: 0 or omitted: Use Z_HUFFMAN_ONLY strategy with the zlib default compression level 1-9: the zlib compression level 1's digit: 0-4: the PNG filter method 5: libpng adaptive filtering if compression level > 5 libpng filter type "none" if compression level <= 5 or if image is grayscale or palette 6: libpng adaptive filtering 7: "LOCO" filtering (intrapixel differing) if writing a MNG, otherwise "none". Did not work in IM-6.7.0-9 and earlier because of a missing "else". 8: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), adaptive filtering. Unused prior to IM-6.7.0-10, was same as 6 9: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), no PNG filters Unused prior to IM-6.7.0-10, was same as 6 Note that using the -quality option, not all combinations of PNG filter type, zlib compression level, and zlib compression strategy are possible. This is addressed by using "-define png:compression-strategy", etc., which takes precedence over -quality. */ quality=image_info->quality == UndefinedCompressionQuality ? 75UL : image_info->quality; if (quality <= 9) { if (mng_info->write_png_compression_strategy == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; } else if (mng_info->write_png_compression_level == 0) { int level; level=(int) MagickMin((ssize_t) quality/10,9); mng_info->write_png_compression_level = level+1; } if (mng_info->write_png_compression_strategy == 0) { if ((quality %10) == 8 || (quality %10) == 9) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy=Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif } if (mng_info->write_png_compression_filter == 0) mng_info->write_png_compression_filter=((int) quality % 10) + 1; if (logging != MagickFalse) { if (mng_info->write_png_compression_level) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression level: %d", (int) mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_strategy) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression strategy: %d", (int) mng_info->write_png_compression_strategy-1); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up filtering"); if (mng_info->write_png_compression_filter == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: ADAPTIVE"); else if (mng_info->write_png_compression_filter == 0 || mng_info->write_png_compression_filter == 1) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: NONE"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: %d", (int) mng_info->write_png_compression_filter-1); } if (mng_info->write_png_compression_level != 0) png_set_compression_level(ping,mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_filter == 6) { if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || (quality < 50)) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); } else if (mng_info->write_png_compression_filter == 7 || mng_info->write_png_compression_filter == 10) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); else if (mng_info->write_png_compression_filter == 8) { #if defined(PNG_MNG_FEATURES_SUPPORTED) && defined(PNG_INTRAPIXEL_DIFFERENCING) if (mng_info->write_mng) { if (((int) ping_color_type == PNG_COLOR_TYPE_RGB) || ((int) ping_color_type == PNG_COLOR_TYPE_RGBA)) ping_filter_method=PNG_INTRAPIXEL_DIFFERENCING; } #endif png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); } else if (mng_info->write_png_compression_filter == 9) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else if (mng_info->write_png_compression_filter != 0) png_set_filter(ping,PNG_FILTER_TYPE_BASE, mng_info->write_png_compression_filter-1); if (mng_info->write_png_compression_strategy != 0) png_set_compression_strategy(ping, mng_info->write_png_compression_strategy-1); ping_interlace_method=image_info->interlace != NoInterlace; if (mng_info->write_mng) png_set_sig_bytes(ping,8); /* Bail out if cannot meet defined png:bit-depth or png:color-type */ if (mng_info->write_png_colortype != 0) { if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY) if (ping_have_color != MagickFalse) { ping_color_type = PNG_COLOR_TYPE_RGB; if (ping_bit_depth < 8) ping_bit_depth=8; } if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY_ALPHA) if (ping_have_color != MagickFalse) ping_color_type = PNG_COLOR_TYPE_RGB_ALPHA; } if (ping_need_colortype_warning != MagickFalse || ((mng_info->write_png_depth && (int) mng_info->write_png_depth != ping_bit_depth) || (mng_info->write_png_colortype && ((int) mng_info->write_png_colortype-1 != ping_color_type && mng_info->write_png_colortype != 7 && !(mng_info->write_png_colortype == 5 && ping_color_type == 0))))) { if (logging != MagickFalse) { if (ping_need_colortype_warning != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image has transparency but tRNS chunk was excluded"); } if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth=%u, Computed depth=%u", mng_info->write_png_depth, ping_bit_depth); } if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type=%u, Computed color type=%u", mng_info->write_png_colortype-1, ping_color_type); } } png_warning(ping, "Cannot write image with defined png:bit-depth or png:color-type."); } if (image_matte != MagickFalse && image->matte == MagickFalse) { /* Add an opaque matte channel */ image->matte = MagickTrue; (void) SetImageOpacity(image,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Added an opaque matte channel"); } if (number_transparent != 0 || number_semitransparent != 0) { if (ping_color_type < 4) { ping_have_tRNS=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting ping_have_tRNS=MagickTrue."); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG header chunks"); png_set_IHDR(ping,ping_info,ping_width,ping_height, ping_bit_depth,ping_color_type, ping_interlace_method,ping_compression_method, ping_filter_method); if (ping_color_type == 3 && ping_have_PLTE != MagickFalse) { if (mng_info->have_write_global_plte && matte == MagickFalse) { png_set_PLTE(ping,ping_info,NULL,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up empty PLTE chunk"); } else png_set_PLTE(ping,ping_info,palette,number_colors); if (logging != MagickFalse) { for (i=0; i< (ssize_t) number_colors; i++) { if (i < ping_num_trans) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d), tRNS[%d] = (%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue, (int) i, (int) ping_trans_alpha[i]); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue); } } } /* Only write the iCCP chunk if we are not writing the sRGB chunk. */ if (ping_exclude_sRGB != MagickFalse || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if ((ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) && (ping_exclude_iCCP == MagickFalse || ping_exclude_zCCP == MagickFalse)) { ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { #ifdef PNG_WRITE_iCCP_SUPPORTED if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { if (ping_exclude_iCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up iCCP chunk"); png_set_iCCP(ping,ping_info,(const png_charp) name,0, #if (PNG_LIBPNG_VER < 10500) (png_charp) GetStringInfoDatum(profile), #else (const png_byte *) GetStringInfoDatum(profile), #endif (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } else #endif { if (LocaleCompare(name,"exif") == 0) { /* Do not write hex-encoded ICC chunk; we will write it later as an eXIf chunk */ name=GetNextImageProfile(image); continue; } if (ping_exclude_zCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up zTXT chunk with uuencoded ICC"); Magick_png_write_raw_profile(image_info,ping,ping_info, (unsigned char *) name,(unsigned char *) name, GetStringInfoDatum(profile), (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk with %s profile",name); name=GetNextImageProfile(image); } } } #if defined(PNG_WRITE_sRGB_SUPPORTED) if ((mng_info->have_write_global_srgb == 0) && ping_have_iCCP != MagickTrue && (ping_have_sRGB != MagickFalse || png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if (ping_exclude_sRGB == MagickFalse) { /* Note image rendering intent. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up sRGB chunk"); (void) png_set_sRGB(ping,ping_info,( Magick_RenderingIntent_to_PNG_RenderingIntent( image->rendering_intent))); ping_have_sRGB = MagickTrue; } } if ((!mng_info->write_mng) || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) #endif { if (ping_exclude_gAMA == MagickFalse && ping_have_iCCP == MagickFalse && ping_have_sRGB == MagickFalse && (ping_exclude_sRGB == MagickFalse || (image->gamma < .45 || image->gamma > .46))) { if ((mng_info->have_write_global_gama == 0) && (image->gamma != 0.0)) { /* Note image gamma. To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up gAMA chunk"); png_set_gAMA(ping,ping_info,image->gamma); } } if (ping_exclude_cHRM == MagickFalse && ping_have_sRGB == MagickFalse) { if ((mng_info->have_write_global_chrm == 0) && (image->chromaticity.red_primary.x != 0.0)) { /* Note image chromaticity. Note: if cHRM+gAMA == sRGB write sRGB instead. */ PrimaryInfo bp, gp, rp, wp; wp=image->chromaticity.white_point; rp=image->chromaticity.red_primary; gp=image->chromaticity.green_primary; bp=image->chromaticity.blue_primary; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up cHRM chunk"); png_set_cHRM(ping,ping_info,wp.x,wp.y,rp.x,rp.y,gp.x,gp.y, bp.x,bp.y); } } } if (ping_exclude_bKGD == MagickFalse) { if (ping_have_bKGD != MagickFalse) { png_set_bKGD(ping,ping_info,&ping_background); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background color = (%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " index = %d, gray=%d", (int) ping_background.index, (int) ping_background.gray); } } } if (ping_exclude_pHYs == MagickFalse) { if (ping_have_pHYs != MagickFalse) { png_set_pHYs(ping,ping_info, ping_pHYs_x_resolution, ping_pHYs_y_resolution, ping_pHYs_unit_type); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_resolution=%lu", (unsigned long) ping_pHYs_x_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " y_resolution=%lu", (unsigned long) ping_pHYs_y_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " unit_type=%lu", (unsigned long) ping_pHYs_unit_type); } } } #if defined(PNG_tIME_SUPPORTED) if (ping_exclude_tIME == MagickFalse) { const char *timestamp; if (image->taint == MagickFalse) { timestamp=GetImageOption(image_info,"png:tIME"); if (timestamp == (const char *) NULL) timestamp=GetImageProperty(image,"png:tIME"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reset tIME in tainted image"); timestamp=GetImageProperty(image,"date:modify"); } if (timestamp != (const char *) NULL) write_tIME_chunk(image,ping,ping_info,timestamp); } #endif if (mng_info->need_blob != MagickFalse) { if (OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception) == MagickFalse) png_error(ping,"WriteBlob Failed"); ping_have_blob=MagickTrue; (void) ping_have_blob; } png_write_info_before_PLTE(ping, ping_info); if (ping_have_tRNS != MagickFalse && ping_color_type < 4) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Calling png_set_tRNS with num_trans=%d",ping_num_trans); } if (ping_color_type == 3) (void) png_set_tRNS(ping, ping_info, ping_trans_alpha, ping_num_trans, NULL); else { (void) png_set_tRNS(ping, ping_info, NULL, 0, &ping_trans_color); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS color =(%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } png_write_info(ping,ping_info); ping_wrote_caNv = MagickFalse; /* write caNv chunk */ if (ping_exclude_caNv == MagickFalse) { if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || image->page.x != 0 || image->page.y != 0) { unsigned char chunk[20]; (void) WriteBlobMSBULong(image,16L); /* data length=8 */ PNGType(chunk,mng_caNv); LogPNGChunk(logging,mng_caNv,16L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); PNGsLong(chunk+12,(png_int_32) image->page.x); PNGsLong(chunk+16,(png_int_32) image->page.y); (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); ping_wrote_caNv = MagickTrue; } } #if defined(PNG_oFFs_SUPPORTED) if (ping_exclude_oFFs == MagickFalse && ping_wrote_caNv == MagickFalse) { if (image->page.x || image->page.y) { png_set_oFFs(ping,ping_info,(png_int_32) image->page.x, (png_int_32) image->page.y, 0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up oFFs chunk with x=%d, y=%d, units=0", (int) image->page.x, (int) image->page.y); } } #endif #if (PNG_LIBPNG_VER == 10206) /* avoid libpng-1.2.6 bug by setting PNG_HAVE_IDAT flag */ #define PNG_HAVE_IDAT 0x04 ping->mode |= PNG_HAVE_IDAT; #undef PNG_HAVE_IDAT #endif png_set_packing(ping); /* Allocate memory. */ rowbytes=image->columns; if (image_depth > 8) rowbytes*=2; switch (ping_color_type) { case PNG_COLOR_TYPE_RGB: rowbytes*=3; break; case PNG_COLOR_TYPE_GRAY_ALPHA: rowbytes*=2; break; case PNG_COLOR_TYPE_RGBA: rowbytes*=4; break; default: break; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocating %.20g bytes of memory for pixels",(double) rowbytes); } pixel_info=AcquireVirtualMemory(rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Allocation of memory for pixels failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); /* Initialize image scanlines. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Memory allocation for quantum_info failed"); quantum_info->format=UndefinedQuantumFormat; SetQuantumDepth(image,quantum_info,image_depth); (void) SetQuantumEndian(image,quantum_info,MSBEndian); num_passes=png_set_interlace_handling(ping); if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (mng_info->IsPalette || (image_info->type == BilevelType)) && image_matte == MagickFalse && ping_have_non_bw == MagickFalse) { /* Palette, Bilevel, or Opaque Monochrome */ register const PixelPacket *p; SetQuantumDepth(image,quantum_info,8); for (pass=0; pass < num_passes; pass++) { /* Convert PseudoClass image to a PNG monochrome image. */ for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (0)"); p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (mng_info->IsPalette) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_PALETTE && mng_info->write_png_depth && mng_info->write_png_depth != old_bit_depth) { /* Undo pixel scaling */ for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) (*(ping_pixels+i) >> (8-old_bit_depth)); } } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); } if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE) for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) ((*(ping_pixels+i) > 127) ? 255 : 0); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (1)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else /* Not Palette, Bilevel, or Opaque Monochrome */ { if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (image_matte != MagickFalse || (ping_bit_depth >= MAGICKCORE_QUANTUM_DEPTH)) && (mng_info->IsPalette) && ping_have_color == MagickFalse) { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (mng_info->IsPalette) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY PNG pixels (2)"); } else /* PNG_COLOR_TYPE_GRAY_ALPHA */ { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (2)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels,&image->exception); } if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (2)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { if ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->storage_class == DirectClass) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (3)"); } else if (image_matte != MagickFalse) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBAQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (3)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } else /* not ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) */ { if ((ping_color_type != PNG_COLOR_TYPE_GRAY) && (ping_color_type != PNG_COLOR_TYPE_GRAY_ALPHA)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is not GRAY or GRAY_ALPHA",pass); SetQuantumDepth(image,quantum_info,8); image_depth=8; } for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is RGB, 16-bit GRAY, or GRAY_ALPHA",pass); p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { SetQuantumDepth(image,quantum_info,image->depth); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (4)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,IndexQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y <= 2) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of non-gray pixels (4)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_pixels[0]=%d,ping_pixels[1]=%d", (int)ping_pixels[0],(int)ping_pixels[1]); } } png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } } } if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Wrote PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Width: %.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Height: %.20g",(double) ping_height); if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth: %d",mng_info->write_png_depth); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG bit-depth written: %d",ping_bit_depth); if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type: %d",mng_info->write_png_colortype-1); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color-type written: %d",ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG Interlace method: %d",ping_interlace_method); } /* Generate text chunks after IDAT. */ if (ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) { ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { png_textp text; value=GetImageProperty(image,property); /* Don't write any "png:" or "jpeg:" properties; those are just for * "identify" or for passing through to another JPEG */ if ((LocaleNCompare(property,"png:",4) != 0 && LocaleNCompare(property,"jpeg:",5) != 0) && /* Suppress density and units if we wrote a pHYs chunk */ (ping_exclude_pHYs != MagickFalse || LocaleCompare(property,"density") != 0 || LocaleCompare(property,"units") != 0) && /* Suppress the IM-generated Date:create and Date:modify */ (ping_exclude_date == MagickFalse || LocaleNCompare(property, "Date:",5) != 0)) { if (value != (const char *) NULL) { #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping, (png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif text[0].key=(char *) property; text[0].text=(char *) value; text[0].text_length=strlen(value); if (ping_exclude_tEXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_zTXt; else if (ping_exclude_zTXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_NONE; else { text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? PNG_TEXT_COMPRESSION_NONE : PNG_TEXT_COMPRESSION_zTXt ; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " keyword: '%s'",text[0].key); } png_set_text(ping,ping_info,text,1); png_free(ping,text); } } property=GetNextImageProperty(image); } } /* write eXIf profile */ if (ping_have_eXIf != MagickFalse && ping_exclude_eXIf == MagickFalse) { char *name; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { if (LocaleCompare(name,"exif") == 0) { const StringInfo *profile; profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { png_uint_32 length; unsigned char chunk[4], *data; StringInfo *ping_profile; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Have eXIf profile"); ping_profile=CloneStringInfo(profile); data=GetStringInfoDatum(ping_profile), length=(png_uint_32) GetStringInfoLength(ping_profile); PNGType(chunk,mng_eXIf); if (length < 7) { ping_profile=DestroyStringInfo(ping_profile); break; /* otherwise crashes */ } if (*data == 'E' && *(data+1) == 'x' && *(data+2) == 'i' && *(data+3) == 'f' && *(data+4) == '\0' && *(data+5) == '\0') { /* skip the "Exif\0\0" JFIF Exif Header ID */ length -= 6; data += 6; } LogPNGChunk(logging,chunk,length); (void) WriteBlobMSBULong(image,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,data); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4), data, (uInt) length)); ping_profile=DestroyStringInfo(ping_profile); break; } } name=GetNextImageProfile(image); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG end info"); png_write_end(ping,ping_info); if (mng_info->need_fram && (int) image->dispose == BackgroundDispose) { if (mng_info->page.x || mng_info->page.y || (ping_width != mng_info->page.width) || (ping_height != mng_info->page.height)) { unsigned char chunk[32]; /* Write FRAM 4 with clipping boundaries followed by FRAM 1. */ (void) WriteBlobMSBULong(image,27L); /* data length=27 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,27L); chunk[4]=4; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=1; /* flag for changing delay, for next frame only */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=1; /* flag for changing frame clipping for next frame */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) (0L)); /* temporary 0 delay */ chunk[14]=0; /* clipping boundaries delta type */ PNGLong(chunk+15,(png_uint_32) (mng_info->page.x)); /* left cb */ PNGLong(chunk+19, (png_uint_32) (mng_info->page.x + ping_width)); PNGLong(chunk+23,(png_uint_32) (mng_info->page.y)); /* top cb */ PNGLong(chunk+27, (png_uint_32) (mng_info->page.y + ping_height)); (void) WriteBlob(image,31,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,31)); mng_info->old_framing_mode=4; mng_info->framing_mode=1; } else mng_info->framing_mode=3; } if (mng_info->write_mng && !mng_info->need_fram && ((int) image->dispose == 3)) png_error(ping, "Cannot convert GIF with disposal method 3 to MNG-LC"); /* Free PNG resources. */ png_destroy_write_struct(&ping,&ping_info); pixel_info=RelinquishVirtualMemory(pixel_info); /* Store bit depth actually written */ s[0]=(char) ping_bit_depth; s[1]='\0'; (void) SetImageProperty(image,"png:bit-depth-written",s); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block. Revert to * Throwing an Exception when an error occurs. */ return(MagickTrue); /* End write one PNG image */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePNGImage() writes a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WritePNGImage method is: % % MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % Returns MagickTrue on success, MagickFalse on failure. % % Communicating with the PNG encoder: % % While the datastream written is always in PNG format and normally would % be given the "png" file extension, this method also writes the following % pseudo-formats which are subsets of png: % % o PNG8: An 8-bit indexed PNG datastream is written. If the image has % a depth greater than 8, the depth is reduced. If transparency % is present, the tRNS chunk must only have values 0 and 255 % (i.e., transparency is binary: fully opaque or fully % transparent). If other values are present they will be % 50%-thresholded to binary transparency. If more than 256 % colors are present, they will be quantized to the 4-4-4-1, % 3-3-3-1, or 3-3-2-1 palette. The underlying RGB color % of any resulting fully-transparent pixels is changed to % the image's background color. % % If you want better quantization or dithering of the colors % or alpha than that, you need to do it before calling the % PNG encoder. The pixels contain 8-bit indices even if % they could be represented with 1, 2, or 4 bits. Grayscale % images will be written as indexed PNG files even though the % PNG grayscale type might be slightly more efficient. Please % note that writing to the PNG8 format may result in loss % of color and alpha data. % % o PNG24: An 8-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. The only loss incurred % is reduction of sample depth to 8. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG32: An 8-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 255. The alpha % channel is present even if the image is fully opaque. % The only loss in data is the reduction of the sample depth % to 8. % % o PNG48: A 16-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG64: A 16-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 65535. The alpha % channel is present even if the image is fully opaque. % % o PNG00: A PNG that inherits its colortype and bit-depth from the input % image, if the input was a PNG, is written. If these values % cannot be found, or if the pixels have been changed in a way % that makes this impossible, then "PNG00" falls back to the % regular "PNG" format. % % o -define: For more precise control of the PNG output, you can use the % Image options "png:bit-depth" and "png:color-type". These % can be set from the commandline with "-define" and also % from the application programming interfaces. The options % are case-independent and are converted to lowercase before % being passed to this encoder. % % png:color-type can be 0, 2, 3, 4, or 6. % % When png:color-type is 0 (Grayscale), png:bit-depth can % be 1, 2, 4, 8, or 16. % % When png:color-type is 2 (RGB), png:bit-depth can % be 8 or 16. % % When png:color-type is 3 (Indexed), png:bit-depth can % be 1, 2, 4, or 8. This refers to the number of bits % used to store the index. The color samples always have % bit-depth 8 in indexed PNG files. % % When png:color-type is 4 (Gray-Matte) or 6 (RGB-Matte), % png:bit-depth can be 8 or 16. % % If the image cannot be written without loss with the % requested bit-depth and color-type, a PNG file will not % be written, a warning will be issued, and the encoder will % return MagickFalse. % % Since image encoders should not be responsible for the "heavy lifting", % the user should make sure that ImageMagick has already reduced the % image depth and number of colors and limit transparency to binary % transparency prior to attempting to write the image with depth, color, % or transparency limitations. % % To do: Enforce the previous paragraph. % % Note that another definition, "png:bit-depth-written" exists, but it % is not intended for external use. It is only used internally by the % PNG encoder to inform the JNG encoder of the depth of the alpha channel. % % As of version 6.6.6 the following optimizations are always done: % % o 32-bit depth is reduced to 16. % o 16-bit depth is reduced to 8 if all pixels contain samples whose % high byte and low byte are identical. % o Palette is sorted to remove unused entries and to put a % transparent color first, if BUILD_PNG_PALETTE is defined. % o Opaque matte channel is removed when writing an indexed PNG. % o Grayscale images are reduced to 1, 2, or 4 bit depth if % this can be done without loss and a larger bit depth N was not % requested via the "-define png:bit-depth=N" option. % o If matte channel is present but only one transparent color is % present, RGB+tRNS is written instead of RGBA % o Opaque matte channel is removed (or added, if color-type 4 or 6 % was requested when converting an opaque image). % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType excluding, logging, status; MngInfo *mng_info; const char *value; int source; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WritePNGImage()"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; mng_info->equal_backgrounds=MagickTrue; /* See if user has requested a specific PNG subformat */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; mng_info->write_png48=LocaleCompare(image_info->magick,"PNG48") == 0; mng_info->write_png64=LocaleCompare(image_info->magick,"PNG64") == 0; value=GetImageOption(image_info,"png:format"); if (value != (char *) NULL || LocaleCompare(image_info->magick,"PNG00") == 0) { mng_info->write_png8 = MagickFalse; mng_info->write_png24 = MagickFalse; mng_info->write_png32 = MagickFalse; mng_info->write_png48 = MagickFalse; mng_info->write_png64 = MagickFalse; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format=%s",value); if (LocaleCompare(value,"png8") == 0) mng_info->write_png8 = MagickTrue; else if (LocaleCompare(value,"png24") == 0) mng_info->write_png24 = MagickTrue; else if (LocaleCompare(value,"png32") == 0) mng_info->write_png32 = MagickTrue; else if (LocaleCompare(value,"png48") == 0) mng_info->write_png48 = MagickTrue; else if (LocaleCompare(value,"png64") == 0) mng_info->write_png64 = MagickTrue; else if ((LocaleCompare(value,"png00") == 0) || LocaleCompare(image_info->magick,"PNG00") == 0) { /* Retrieve png:IHDR.bit-depth-orig and png:IHDR.color-type-orig */ value=GetImageProperty(image,"png:IHDR.bit-depth-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited bit depth=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; } value=GetImageProperty(image,"png:IHDR.color-type-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited color type=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; } } } if (mng_info->write_png8) { mng_info->write_png_colortype = /* 3 */ 4; mng_info->write_png_depth = 8; image->depth = 8; } if (mng_info->write_png24) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 8; image->depth = 8; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png32) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 8; image->depth = 8; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } if (mng_info->write_png48) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 16; image->depth = 16; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png64) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 16; image->depth = 16; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } value=GetImageOption(image_info,"png:bit-depth"); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:bit-depth", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:bit-depth=%d was defined.\n",mng_info->write_png_depth); } value=GetImageOption(image_info,"png:color-type"); if (value != (char *) NULL) { /* We must store colortype+1 because 0 is a valid colortype */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:color-type", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:color-type=%d was defined.\n",mng_info->write_png_colortype-1); } /* Check for chunks to be excluded: * * The default is to not exclude any known chunks except for any * listed in the "unused_chunks" array, above. * * Chunks can be listed for exclusion via a "png:exclude-chunk" * define (in the image properties or in the image artifacts) * or via a mng_info member. For convenience, in addition * to or instead of a comma-separated list of chunks, the * "exclude-chunk" string can be simply "all" or "none". * * The exclude-chunk define takes priority over the mng_info. * * A "png:include-chunk" define takes priority over both the * mng_info and the "png:exclude-chunk" define. Like the * "exclude-chunk" string, it can define "all" or "none" as * well as a comma-separated list. Chunks that are unknown to * ImageMagick are always excluded, regardless of their "copy-safe" * status according to the PNG specification, and even if they * appear in the "include-chunk" list. Such defines appearing among * the image options take priority over those found among the image * artifacts. * * Finally, all chunks listed in the "unused_chunks" array are * automatically excluded, regardless of the other instructions * or lack thereof. * * if you exclude sRGB but not gAMA (recommended), then sRGB chunk * will not be written and the gAMA chunk will only be written if it * is not between .45 and .46, or approximately (1.0/2.2). * * If you exclude tRNS and the image has transparency, the colortype * is forced to be 4 or 6 (GRAY_ALPHA or RGB_ALPHA). * * The -strip option causes StripImage() to set the png:include-chunk * artifact to "none,trns,gama". */ mng_info->ping_exclude_bKGD=MagickFalse; mng_info->ping_exclude_caNv=MagickFalse; mng_info->ping_exclude_cHRM=MagickFalse; mng_info->ping_exclude_date=MagickFalse; mng_info->ping_exclude_eXIf=MagickFalse; mng_info->ping_exclude_EXIF=MagickFalse; /* hex-encoded EXIF in zTXt */ mng_info->ping_exclude_gAMA=MagickFalse; mng_info->ping_exclude_iCCP=MagickFalse; /* mng_info->ping_exclude_iTXt=MagickFalse; */ mng_info->ping_exclude_oFFs=MagickFalse; mng_info->ping_exclude_pHYs=MagickFalse; mng_info->ping_exclude_sRGB=MagickFalse; mng_info->ping_exclude_tEXt=MagickFalse; mng_info->ping_exclude_tIME=MagickFalse; mng_info->ping_exclude_tRNS=MagickFalse; mng_info->ping_exclude_zCCP=MagickFalse; /* hex-encoded iCCP in zTXt */ mng_info->ping_exclude_zTXt=MagickFalse; mng_info->ping_preserve_colormap=MagickFalse; value=GetImageOption(image_info,"png:preserve-colormap"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-colormap"); if (value != NULL) mng_info->ping_preserve_colormap=MagickTrue; mng_info->ping_preserve_iCCP=MagickFalse; value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) mng_info->ping_preserve_iCCP=MagickTrue; /* These compression-level, compression-strategy, and compression-filter * defines take precedence over values from the -quality option. */ value=GetImageOption(image_info,"png:compression-level"); if (value == NULL) value=GetImageArtifact(image,"png:compression-level"); if (value != NULL) { /* To do: use a "LocaleInteger:()" function here. */ /* We have to add 1 to everything because 0 is a valid input, * and we want to use 0 (the default) to mean undefined. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_level = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_level = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_level = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_level = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_level = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_level = 6; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_compression_level = 7; else if (LocaleCompare(value,"7") == 0) mng_info->write_png_compression_level = 8; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_compression_level = 9; else if (LocaleCompare(value,"9") == 0) mng_info->write_png_compression_level = 10; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-level", "=%s",value); } value=GetImageOption(image_info,"png:compression-strategy"); if (value == NULL) value=GetImageArtifact(image,"png:compression-strategy"); if (value != NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_strategy = Z_FILTERED+1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; else if (LocaleCompare(value,"3") == 0) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy = Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else if (LocaleCompare(value,"4") == 0) #ifdef Z_FIXED /* Z_FIXED was added to zlib-1.2.2.2 */ mng_info->write_png_compression_strategy = Z_FIXED+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-strategy", "=%s",value); } value=GetImageOption(image_info,"png:compression-filter"); if (value == NULL) value=GetImageArtifact(image,"png:compression-filter"); if (value != NULL) { /* To do: combinations of filters allowed by libpng * masks 0x08 through 0xf8 * * Implement this as a comma-separated list of 0,1,2,3,4,5 * where 5 is a special case meaning PNG_ALL_FILTERS. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_filter = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_filter = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_filter = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_filter = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_filter = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_filter = 6; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-filter", "=%s",value); } for (source=0; source<8; source++) { value = NULL; if (source == 0) value=GetImageOption(image_info,"png:exclude-chunks"); if (source == 1) value=GetImageArtifact(image,"png:exclude-chunks"); if (source == 2) value=GetImageOption(image_info,"png:exclude-chunk"); if (source == 3) value=GetImageArtifact(image,"png:exclude-chunk"); if (source == 4) value=GetImageOption(image_info,"png:include-chunks"); if (source == 5) value=GetImageArtifact(image,"png:include-chunks"); if (source == 6) value=GetImageOption(image_info,"png:include-chunk"); if (source == 7) value=GetImageArtifact(image,"png:include-chunk"); if (value == NULL) continue; if (source < 4) excluding = MagickTrue; else excluding = MagickFalse; if (logging != MagickFalse) { if (source == 0 || source == 2) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image options.\n", value); else if (source == 1 || source == 3) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image artifacts.\n", value); else if (source == 4 || source == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image options.\n", value); else /* if (source == 5 || source == 7) */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image artifacts.\n", value); } if (IsOptionMember("all",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding; mng_info->ping_exclude_caNv=excluding; mng_info->ping_exclude_cHRM=excluding; mng_info->ping_exclude_date=excluding; mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; mng_info->ping_exclude_gAMA=excluding; mng_info->ping_exclude_iCCP=excluding; /* mng_info->ping_exclude_iTXt=excluding; */ mng_info->ping_exclude_oFFs=excluding; mng_info->ping_exclude_pHYs=excluding; mng_info->ping_exclude_sRGB=excluding; mng_info->ping_exclude_tIME=excluding; mng_info->ping_exclude_tEXt=excluding; mng_info->ping_exclude_tRNS=excluding; mng_info->ping_exclude_zCCP=excluding; mng_info->ping_exclude_zTXt=excluding; } if (IsOptionMember("none",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_caNv=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_cHRM=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_date=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_eXIf=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_EXIF=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_gAMA=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_iCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; /* mng_info->ping_exclude_iTXt=!excluding; */ mng_info->ping_exclude_oFFs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_pHYs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_sRGB=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tEXt=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tIME=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tRNS=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zTXt=excluding != MagickFalse ? MagickFalse : MagickTrue; } if (IsOptionMember("bkgd",value) != MagickFalse) mng_info->ping_exclude_bKGD=excluding; if (IsOptionMember("caNv",value) != MagickFalse) mng_info->ping_exclude_caNv=excluding; if (IsOptionMember("chrm",value) != MagickFalse) mng_info->ping_exclude_cHRM=excluding; if (IsOptionMember("date",value) != MagickFalse) mng_info->ping_exclude_date=excluding; if (IsOptionMember("exif",value) != MagickFalse) { mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; } if (IsOptionMember("gama",value) != MagickFalse) mng_info->ping_exclude_gAMA=excluding; if (IsOptionMember("iccp",value) != MagickFalse) mng_info->ping_exclude_iCCP=excluding; #if 0 if (IsOptionMember("itxt",value) != MagickFalse) mng_info->ping_exclude_iTXt=excluding; #endif if (IsOptionMember("offs",value) != MagickFalse) mng_info->ping_exclude_oFFs=excluding; if (IsOptionMember("phys",value) != MagickFalse) mng_info->ping_exclude_pHYs=excluding; if (IsOptionMember("srgb",value) != MagickFalse) mng_info->ping_exclude_sRGB=excluding; if (IsOptionMember("text",value) != MagickFalse) mng_info->ping_exclude_tEXt=excluding; if (IsOptionMember("time",value) != MagickFalse) mng_info->ping_exclude_tIME=excluding; if (IsOptionMember("trns",value) != MagickFalse) mng_info->ping_exclude_tRNS=excluding; if (IsOptionMember("zccp",value) != MagickFalse) mng_info->ping_exclude_zCCP=excluding; if (IsOptionMember("ztxt",value) != MagickFalse) mng_info->ping_exclude_zTXt=excluding; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Chunks to be excluded from the output png:"); if (mng_info->ping_exclude_bKGD != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " bKGD"); if (mng_info->ping_exclude_caNv != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " caNv"); if (mng_info->ping_exclude_cHRM != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " cHRM"); if (mng_info->ping_exclude_date != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " date"); if (mng_info->ping_exclude_EXIF != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " EXIF"); if (mng_info->ping_exclude_eXIf != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " eXIf"); if (mng_info->ping_exclude_gAMA != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " gAMA"); if (mng_info->ping_exclude_iCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iCCP"); #if 0 if (mng_info->ping_exclude_iTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iTXt"); #endif if (mng_info->ping_exclude_oFFs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " oFFs"); if (mng_info->ping_exclude_pHYs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pHYs"); if (mng_info->ping_exclude_sRGB != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " sRGB"); if (mng_info->ping_exclude_tEXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tEXt"); if (mng_info->ping_exclude_tIME != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tIME"); if (mng_info->ping_exclude_tRNS != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS"); if (mng_info->ping_exclude_zCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zCCP"); if (mng_info->ping_exclude_zTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zTXt"); } mng_info->need_blob = MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WritePNGImage()"); return(status); } #if defined(JNG_SUPPORTED) /* Write one JNG image */ static MagickBooleanType WriteOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { Image *jpeg_image; ImageInfo *jpeg_image_info; int unique_filenames; MagickBooleanType logging, status; size_t length; unsigned char *blob, chunk[80], *p; unsigned int jng_alpha_compression_method, jng_alpha_sample_depth, jng_color_type, transparent; size_t jng_alpha_quality, jng_quality; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOneJNGImage()"); blob=(unsigned char *) NULL; jpeg_image=(Image *) NULL; jpeg_image_info=(ImageInfo *) NULL; length=0; unique_filenames=0; status=MagickTrue; transparent=image_info->type==GrayscaleMatteType || image_info->type==TrueColorMatteType || image->matte != MagickFalse; jng_alpha_sample_depth = 0; jng_quality=image_info->quality == 0UL ? 75UL : image_info->quality%1000; jng_alpha_compression_method=image->compression==JPEGCompression? 8 : 0; jng_alpha_quality=image_info->quality == 0UL ? 75UL : image_info->quality; if (jng_alpha_quality >= 1000) jng_alpha_quality /= 1000; if (transparent != 0) { jng_color_type=14; /* Create JPEG blob, image, and image_info */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info for opacity."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) { jpeg_image_info=DestroyImageInfo(jpeg_image_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) { jpeg_image_info=DestroyImageInfo(jpeg_image_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); status=SeparateImageChannel(jpeg_image,OpacityChannel); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); status=NegateImage(jpeg_image,MagickFalse); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_image->matte=MagickFalse; jpeg_image_info->type=GrayscaleType; jpeg_image->quality=jng_alpha_quality; jpeg_image_info->type=GrayscaleType; (void) SetImageType(jpeg_image,GrayscaleType); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent, "%s",jpeg_image->filename); } else { jng_alpha_compression_method=0; jng_color_type=10; jng_alpha_sample_depth=0; } /* To do: check bit depth of PNG alpha channel */ /* Check if image is grayscale. */ if (image_info->type != TrueColorMatteType && image_info->type != TrueColorType && SetImageGray(image,&image->exception)) jng_color_type-=2; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Quality = %d",(int) jng_quality); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Color Type = %d",jng_color_type); if (transparent != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Compression = %d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Depth = %d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Quality = %d",(int) jng_alpha_quality); } } if (transparent != 0) { if (jng_alpha_compression_method==0) { const char *value; /* Encode opacity as a grayscale PNG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating PNG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); length=0; (void) CopyMagickString(jpeg_image_info->magick,"PNG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"PNG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; /* Exclude all ancillary chunks */ (void) SetImageArtifact(jpeg_image,"png:exclude-chunks","all"); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); /* Retrieve sample depth used */ value=GetImageProperty(jpeg_image,"png:bit-depth-written"); if (value != (char *) NULL) jng_alpha_sample_depth= (unsigned int) value[0]; } else { /* Encode opacity as a grayscale JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating JPEG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); jng_alpha_sample_depth=8; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); } /* Destroy JPEG image and image_info */ jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); } /* Write JHDR chunk */ (void) WriteBlobMSBULong(image,16L); /* chunk data length=16 */ PNGType(chunk,mng_JHDR); LogPNGChunk(logging,mng_JHDR,16L); PNGLong(chunk+4,(png_uint_32) image->columns); PNGLong(chunk+8,(png_uint_32) image->rows); chunk[12]=jng_color_type; chunk[13]=8; /* sample depth */ chunk[14]=8; /*jng_image_compression_method */ chunk[15]=(unsigned char) (image_info->interlace == NoInterlace ? 0 : 8); chunk[16]=jng_alpha_sample_depth; chunk[17]=jng_alpha_compression_method; chunk[18]=0; /*jng_alpha_filter_method */ chunk[19]=0; /*jng_alpha_interlace_method */ (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG width:%15lu",(unsigned long) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG height:%14lu",(unsigned long) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG color type:%10d",jng_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG sample depth:%8d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG compression:%9d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG interlace:%11d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha depth:%9d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha compression:%3d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha filter:%8d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha interlace:%5d",0); } /* Write leading ancillary chunks */ if (transparent != 0) { /* Write JNG bKGD chunk */ unsigned char blue, green, red; ssize_t num_bytes; if (jng_color_type == 8 || jng_color_type == 12) num_bytes=6L; else num_bytes=10L; (void) WriteBlobMSBULong(image,(size_t) (num_bytes-4L)); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,(size_t) (num_bytes-4L)); red=ScaleQuantumToChar(image->background_color.red); green=ScaleQuantumToChar(image->background_color.green); blue=ScaleQuantumToChar(image->background_color.blue); *(chunk+4)=0; *(chunk+5)=red; *(chunk+6)=0; *(chunk+7)=green; *(chunk+8)=0; *(chunk+9)=blue; (void) WriteBlob(image,(size_t) num_bytes,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) num_bytes)); } if ((image->colorspace == sRGBColorspace || image->rendering_intent)) { /* Write JNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { if (image->gamma != 0.0) { /* Write JNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); } if ((mng_info->equal_chrms == MagickFalse) && (image->chromaticity.red_primary.x != 0.0)) { PrimaryInfo primary; /* Write JNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); } } if (image->x_resolution && image->y_resolution && !mng_info->equal_physs) { /* Write JNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (mng_info->write_mng == 0 && (image->page.x || image->page.y)) { /* Write JNG oFFs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_oFFs); LogPNGChunk(logging,mng_oFFs,9L); PNGsLong(chunk+4,(ssize_t) (image->page.x)); PNGsLong(chunk+8,(ssize_t) (image->page.y)); chunk[12]=0; (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (transparent != 0) { if (jng_alpha_compression_method==0) { register ssize_t i; size_t len; /* Write IDAT chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write IDAT chunks from blob, length=%.20g.",(double) length); /* Copy IDAT chunks */ len=0; p=blob+8; for (i=8; i<(ssize_t) length; i+=len+12) { len=(((unsigned int) *(p ) & 0xff) << 24) + (((unsigned int) *(p + 1) & 0xff) << 16) + (((unsigned int) *(p + 2) & 0xff) << 8) + (((unsigned int) *(p + 3) & 0xff) ) ; p+=4; if (*(p)==73 && *(p+1)==68 && *(p+2)==65 && *(p+3)==84) /* IDAT */ { /* Found an IDAT chunk. */ (void) WriteBlobMSBULong(image,len); LogPNGChunk(logging,mng_IDAT,len); (void) WriteBlob(image,len+4,p); (void) WriteBlobMSBULong(image,crc32(0,p,(uInt) len+4)); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping %c%c%c%c chunk, length=%.20g.", *(p),*(p+1),*(p+2),*(p+3),(double) len); } p+=(8+len); } } else if (length != 0) { /* Write JDAA chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAA chunk, length=%.20g.",(double) length); (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAA); LogPNGChunk(logging,mng_JDAA,length); /* Write JDAT chunk(s) data */ (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob, (uInt) length)); } blob=(unsigned char *) RelinquishMagickMemory(blob); } /* Encode image as a JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent,"%s", jpeg_image->filename); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Created jpeg_image, %.20g x %.20g.",(double) jpeg_image->columns, (double) jpeg_image->rows); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (jng_color_type == 8 || jng_color_type == 12) jpeg_image_info->type=GrayscaleType; jpeg_image_info->quality=jng_quality; jpeg_image->quality=jng_quality; (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating blob."); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length,&image->exception); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAT chunk, length=%.20g.",(double) length); } /* Write JDAT chunk(s) */ (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAT); LogPNGChunk(logging,mng_JDAT,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob,(uInt) length)); jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); blob=(unsigned char *) RelinquishMagickMemory(blob); /* Write IEND chunk */ (void) WriteBlobMSBULong(image,0L); PNGType(chunk,mng_IEND); LogPNGChunk(logging,mng_IEND,0); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOneJNGImage(); unique_filenames=%d",unique_filenames); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJNGImage() writes a JPEG Network Graphics (JNG) image file. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteJNGImage method is: % % MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteJNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); if ((image->columns > 65535UL) || (image->rows > 65535UL)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; (void) WriteBlob(image,8,(const unsigned char *) "\213JNG\r\n\032\n"); status=WriteOneJNGImage(mng_info,image_info,image); mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); (void) CatchImageException(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteJNGImage()"); return(status); } #endif static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { const char *option; Image *next_image; MagickBooleanType status; volatile MagickBooleanType logging; MngInfo *mng_info; int image_count, need_iterations, need_matte; volatile int #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) need_local_plte, #endif all_images_are_gray, need_defi, use_global_plte; register ssize_t i; unsigned char chunk[800]; volatile unsigned int write_jng, write_mng; volatile size_t scene; size_t final_delay=0, initial_delay, imageListLength; #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteMNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) memset(mng_info,0,sizeof(MngInfo)); mng_info->image=image; write_mng=LocaleCompare(image_info->magick,"MNG") == 0; /* * See if user has requested a specific PNG subformat to be used * for all of the PNGs in the MNG being written, e.g., * * convert *.png png8:animation.mng * * To do: check -define png:bit_depth and png:color_type as well, * or perhaps use mng:bit_depth and mng:color_type instead for * global settings. */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; write_jng=MagickFalse; if (image_info->compression == JPEGCompression) write_jng=MagickTrue; mng_info->adjoin=image_info->adjoin && (GetNextImageInList(image) != (Image *) NULL) && write_mng; if (logging != MagickFalse) { /* Log some info about the input */ Image *p; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Checking input image(s)\n" " Image_info depth: %.20g, Type: %d", (double) image_info->depth, image_info->type); scene=0; for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scene: %.20g\n, Image depth: %.20g", (double) scene++, (double) p->depth); if (p->matte) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: True"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: False"); if (p->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: DirectClass"); if (p->colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) p->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: unspecified"); if (mng_info->adjoin == MagickFalse) break; } } use_global_plte=MagickFalse; all_images_are_gray=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_defi=MagickFalse; need_matte=MagickFalse; mng_info->framing_mode=1; mng_info->old_framing_mode=1; if (write_mng) if (image_info->page != (char *) NULL) { /* Determine image bounding box. */ SetGeometry(image,&mng_info->page); (void) ParseMetaGeometry(image_info->page,&mng_info->page.x, &mng_info->page.y,&mng_info->page.width,&mng_info->page.height); } if (write_mng) { unsigned int need_geom; unsigned short red, green, blue; mng_info->page=image->page; need_geom=MagickTrue; if (mng_info->page.width || mng_info->page.height) need_geom=MagickFalse; /* Check all the scenes. */ initial_delay=image->delay; need_iterations=MagickFalse; mng_info->equal_chrms=image->chromaticity.red_primary.x != 0.0; mng_info->equal_physs=MagickTrue, mng_info->equal_gammas=MagickTrue; mng_info->equal_srgbs=MagickTrue; mng_info->equal_backgrounds=MagickTrue; image_count=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) all_images_are_gray=MagickTrue; mng_info->equal_palettes=MagickFalse; need_local_plte=MagickFalse; #endif for (next_image=image; next_image != (Image *) NULL; ) { if (need_geom) { if ((next_image->columns+next_image->page.x) > mng_info->page.width) mng_info->page.width=next_image->columns+next_image->page.x; if ((next_image->rows+next_image->page.y) > mng_info->page.height) mng_info->page.height=next_image->rows+next_image->page.y; } if (next_image->page.x || next_image->page.y) need_defi=MagickTrue; if (next_image->matte) need_matte=MagickTrue; if ((int) next_image->dispose >= BackgroundDispose) if (next_image->matte || next_image->page.x || next_image->page.y || ((next_image->columns < mng_info->page.width) && (next_image->rows < mng_info->page.height))) mng_info->need_fram=MagickTrue; if (next_image->iterations) need_iterations=MagickTrue; final_delay=next_image->delay; if (final_delay != initial_delay || final_delay > 1UL* next_image->ticks_per_second) mng_info->need_fram=1; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* check for global palette possibility. */ if (image->matte != MagickFalse) need_local_plte=MagickTrue; if (need_local_plte == 0) { if (SetImageGray(image,&image->exception) == MagickFalse) all_images_are_gray=MagickFalse; mng_info->equal_palettes=PalettesAreEqual(image,next_image); if (use_global_plte == 0) use_global_plte=mng_info->equal_palettes; need_local_plte=!mng_info->equal_palettes; } #endif if (GetNextImageInList(next_image) != (Image *) NULL) { if (next_image->background_color.red != next_image->next->background_color.red || next_image->background_color.green != next_image->next->background_color.green || next_image->background_color.blue != next_image->next->background_color.blue) mng_info->equal_backgrounds=MagickFalse; if (next_image->gamma != next_image->next->gamma) mng_info->equal_gammas=MagickFalse; if (next_image->rendering_intent != next_image->next->rendering_intent) mng_info->equal_srgbs=MagickFalse; if ((next_image->units != next_image->next->units) || (next_image->x_resolution != next_image->next->x_resolution) || (next_image->y_resolution != next_image->next->y_resolution)) mng_info->equal_physs=MagickFalse; if (mng_info->equal_chrms) { if (next_image->chromaticity.red_primary.x != next_image->next->chromaticity.red_primary.x || next_image->chromaticity.red_primary.y != next_image->next->chromaticity.red_primary.y || next_image->chromaticity.green_primary.x != next_image->next->chromaticity.green_primary.x || next_image->chromaticity.green_primary.y != next_image->next->chromaticity.green_primary.y || next_image->chromaticity.blue_primary.x != next_image->next->chromaticity.blue_primary.x || next_image->chromaticity.blue_primary.y != next_image->next->chromaticity.blue_primary.y || next_image->chromaticity.white_point.x != next_image->next->chromaticity.white_point.x || next_image->chromaticity.white_point.y != next_image->next->chromaticity.white_point.y) mng_info->equal_chrms=MagickFalse; } } image_count++; next_image=GetNextImageInList(next_image); } if (image_count < 2) { mng_info->equal_backgrounds=MagickFalse; mng_info->equal_chrms=MagickFalse; mng_info->equal_gammas=MagickFalse; mng_info->equal_srgbs=MagickFalse; mng_info->equal_physs=MagickFalse; use_global_plte=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_iterations=MagickFalse; } if (mng_info->need_fram == MagickFalse) { /* Only certain framing rates 100/n are exactly representable without the FRAM chunk but we'll allow some slop in VLC files */ if (final_delay == 0) { if (need_iterations != MagickFalse) { /* It's probably a GIF with loop; don't run it *too* fast. */ if (mng_info->adjoin) { final_delay=10; (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "input has zero delay between all frames; assuming", " 10 cs `%s'",""); } } else mng_info->ticks_per_second=0; } if (final_delay != 0) mng_info->ticks_per_second=(png_uint_32) (image->ticks_per_second/final_delay); if (final_delay > 50) mng_info->ticks_per_second=2; if (final_delay > 75) mng_info->ticks_per_second=1; if (final_delay > 125) mng_info->need_fram=MagickTrue; if (need_defi && final_delay > 2 && (final_delay != 4) && (final_delay != 5) && (final_delay != 10) && (final_delay != 20) && (final_delay != 25) && (final_delay != 50) && (final_delay != (size_t) image->ticks_per_second)) mng_info->need_fram=MagickTrue; /* make it exact; cannot be VLC */ } if (mng_info->need_fram != MagickFalse) mng_info->ticks_per_second=1UL*image->ticks_per_second; /* If pseudocolor, we should also check to see if all the palettes are identical and write a global PLTE if they are. ../glennrp Feb 99. */ /* Write the MNG version 1.0 signature and MHDR chunk. */ (void) WriteBlob(image,8,(const unsigned char *) "\212MNG\r\n\032\n"); (void) WriteBlobMSBULong(image,28L); /* chunk data length=28 */ PNGType(chunk,mng_MHDR); LogPNGChunk(logging,mng_MHDR,28L); PNGLong(chunk+4,(png_uint_32) mng_info->page.width); PNGLong(chunk+8,(png_uint_32) mng_info->page.height); PNGLong(chunk+12,mng_info->ticks_per_second); PNGLong(chunk+16,0L); /* layer count=unknown */ PNGLong(chunk+20,0L); /* frame count=unknown */ PNGLong(chunk+24,0L); /* play time=unknown */ if (write_jng) { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,27L); /* simplicity=LC+JNG */ else PNGLong(chunk+28,25L); /* simplicity=VLC+JNG */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,19L); /* simplicity=LC+JNG, no transparency */ else PNGLong(chunk+28,17L); /* simplicity=VLC+JNG, no transparency */ } } else { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,11L); /* simplicity=LC */ else PNGLong(chunk+28,9L); /* simplicity=VLC */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,3L); /* simplicity=LC, no transparency */ else PNGLong(chunk+28,1L); /* simplicity=VLC, no transparency */ } } (void) WriteBlob(image,32,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,32)); option=GetImageOption(image_info,"mng:need-cacheoff"); if (option != (const char *) NULL) { size_t length; /* Write "nEED CACHEOFF" to turn playback caching off for streaming MNG. */ PNGType(chunk,mng_nEED); length=CopyMagickString((char *) chunk+4,"CACHEOFF",20); (void) WriteBlobMSBULong(image,(size_t) length); LogPNGChunk(logging,mng_nEED,(size_t) length); length+=4; (void) WriteBlob(image,length,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) length)); } if ((GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) != (Image *) NULL) && (image->iterations != 1)) { /* Write MNG TERM chunk */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_TERM); LogPNGChunk(logging,mng_TERM,10L); chunk[4]=3; /* repeat animation */ chunk[5]=0; /* show last frame when done */ PNGLong(chunk+6,(png_uint_32) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) PNGLong(chunk+10,PNG_UINT_31_MAX); else PNGLong(chunk+10,(png_uint_32) image->iterations); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM delay: %.20g",(double) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM iterations: %.20g",(double) PNG_UINT_31_MAX); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image iterations: %.20g",(double) image->iterations); } (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); } /* To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if ((image->colorspace == sRGBColorspace || image->rendering_intent) && mng_info->equal_srgbs) { /* Write MNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); mng_info->have_write_global_srgb=MagickTrue; } else { if (image->gamma && mng_info->equal_gammas) { /* Write MNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); mng_info->have_write_global_gama=MagickTrue; } if (mng_info->equal_chrms) { PrimaryInfo primary; /* Write MNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); mng_info->have_write_global_chrm=MagickTrue; } } if (image->x_resolution && image->y_resolution && mng_info->equal_physs) { /* Write MNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } /* Write MNG BACK chunk and global bKGD chunk, if the image is transparent or does not cover the entire frame. */ if (write_mng && (image->matte || image->page.x > 0 || image->page.y > 0 || (image->page.width && (image->page.width+image->page.x < mng_info->page.width)) || (image->page.height && (image->page.height+image->page.y < mng_info->page.height)))) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_BACK); LogPNGChunk(logging,mng_BACK,6L); red=ScaleQuantumToShort(image->background_color.red); green=ScaleQuantumToShort(image->background_color.green); blue=ScaleQuantumToShort(image->background_color.blue); PNGShort(chunk+4,red); PNGShort(chunk+6,green); PNGShort(chunk+8,blue); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); if (mng_info->equal_backgrounds) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,6L); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); } } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if ((need_local_plte == MagickFalse) && (image->storage_class == PseudoClass) && (all_images_are_gray == MagickFalse)) { size_t data_length; /* Write MNG PLTE chunk */ data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red) & 0xff; chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green) & 0xff; chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue) & 0xff; } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } #endif } scene=0; mng_info->delay=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) mng_info->equal_palettes=MagickFalse; #endif imageListLength=GetImageListLength(image); do { if (mng_info->adjoin) { #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* If we aren't using a global palette for the entire MNG, check to see if we can use one for two or more consecutive images. */ if (need_local_plte && use_global_plte && !all_images_are_gray) { if (mng_info->IsPalette) { /* When equal_palettes is true, this image has the same palette as the previous PseudoClass image */ mng_info->have_write_global_plte=mng_info->equal_palettes; mng_info->equal_palettes=PalettesAreEqual(image,image->next); if (mng_info->equal_palettes && !mng_info->have_write_global_plte) { /* Write MNG PLTE chunk */ size_t data_length; data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red); chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green); chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue); } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk, (uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } } else mng_info->have_write_global_plte=MagickFalse; } #endif if (need_defi) { ssize_t previous_x, previous_y; if (scene != 0) { previous_x=mng_info->page.x; previous_y=mng_info->page.y; } else { previous_x=0; previous_y=0; } mng_info->page=image->page; if ((mng_info->page.x != previous_x) || (mng_info->page.y != previous_y)) { (void) WriteBlobMSBULong(image,12L); /* data length=12 */ PNGType(chunk,mng_DEFI); LogPNGChunk(logging,mng_DEFI,12L); chunk[4]=0; /* object 0 MSB */ chunk[5]=0; /* object 0 LSB */ chunk[6]=0; /* visible */ chunk[7]=0; /* abstract */ PNGLong(chunk+8,(png_uint_32) mng_info->page.x); PNGLong(chunk+12,(png_uint_32) mng_info->page.y); (void) WriteBlob(image,16,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,16)); } } } mng_info->write_mng=write_mng; if ((int) image->dispose >= 3) mng_info->framing_mode=3; if (mng_info->need_fram && mng_info->adjoin && ((image->delay != mng_info->delay) || (mng_info->framing_mode != mng_info->old_framing_mode))) { if (image->delay == mng_info->delay) { /* Write a MNG FRAM chunk with the new framing mode. */ (void) WriteBlobMSBULong(image,1L); /* data length=1 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,1L); chunk[4]=(unsigned char) mng_info->framing_mode; (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { /* Write a MNG FRAM chunk with the delay. */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,10L); chunk[4]=(unsigned char) mng_info->framing_mode; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=2; /* flag for changing default delay */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=0; /* flag for changing frame clipping */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) ((mng_info->ticks_per_second* image->delay)/MagickMax(image->ticks_per_second,1))); (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); mng_info->delay=(png_uint_32) image->delay; } mng_info->old_framing_mode=mng_info->framing_mode; } #if defined(JNG_SUPPORTED) if (image_info->compression == JPEGCompression) { ImageInfo *write_info; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing JNG object."); /* To do: specify the desired alpha compression method. */ write_info=CloneImageInfo(image_info); write_info->compression=UndefinedCompression; status=WriteOneJNGImage(mng_info,write_info,image); write_info=DestroyImageInfo(write_info); } else #endif { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG object."); mng_info->need_blob = MagickFalse; mng_info->ping_preserve_colormap = MagickFalse; /* We don't want any ancillary chunks written */ mng_info->ping_exclude_bKGD=MagickTrue; mng_info->ping_exclude_caNv=MagickTrue; mng_info->ping_exclude_cHRM=MagickTrue; mng_info->ping_exclude_date=MagickTrue; mng_info->ping_exclude_EXIF=MagickTrue; mng_info->ping_exclude_eXIf=MagickTrue; mng_info->ping_exclude_gAMA=MagickTrue; mng_info->ping_exclude_iCCP=MagickTrue; /* mng_info->ping_exclude_iTXt=MagickTrue; */ mng_info->ping_exclude_oFFs=MagickTrue; mng_info->ping_exclude_pHYs=MagickTrue; mng_info->ping_exclude_sRGB=MagickTrue; mng_info->ping_exclude_tEXt=MagickTrue; mng_info->ping_exclude_tRNS=MagickTrue; mng_info->ping_exclude_zCCP=MagickTrue; mng_info->ping_exclude_zTXt=MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); } if (status == MagickFalse) { mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); return(MagickFalse); } (void) CatchImageException(image); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength); if (status == MagickFalse) break; } while (mng_info->adjoin); if (write_mng) { while (GetPreviousImageInList(image) != (Image *) NULL) image=GetPreviousImageInList(image); /* Write the MEND chunk. */ (void) WriteBlobMSBULong(image,0x00000000L); PNGType(chunk,mng_MEND); LogPNGChunk(logging,mng_MEND,0L); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); } /* Relinquish resources. */ (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WriteMNGImage()"); return(MagickTrue); } #else /* PNG_LIBPNG_VER > 10011 */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { (void) image; printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); ThrowBinaryException(CoderError,"PNG library is too old", image_info->filename); } static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { return(WritePNGImage(image_info,image)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif
./CrossVul/dataset_final_sorted/CWE-617/c/good_370_0
crossvul-cpp_data_bad_2523_0
/* * kvm eventfd support - use eventfd objects to signal various KVM events * * Copyright 2009 Novell. All Rights Reserved. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: * Gregory Haskins <ghaskins@novell.com> * * This file is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/kvm_irqfd.h> #include <linux/workqueue.h> #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/list.h> #include <linux/eventfd.h> #include <linux/kernel.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/seqlock.h> #include <linux/irqbypass.h> #include <trace/events/kvm.h> #include <kvm/iodev.h> #ifdef CONFIG_HAVE_KVM_IRQFD static struct workqueue_struct *irqfd_cleanup_wq; static void irqfd_inject(struct work_struct *work) { struct kvm_kernel_irqfd *irqfd = container_of(work, struct kvm_kernel_irqfd, inject); struct kvm *kvm = irqfd->kvm; if (!irqfd->resampler) { kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, false); kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, false); } else kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, irqfd->gsi, 1, false); } /* * Since resampler irqfds share an IRQ source ID, we de-assert once * then notify all of the resampler irqfds using this GSI. We can't * do multiple de-asserts or we risk racing with incoming re-asserts. */ static void irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian) { struct kvm_kernel_irqfd_resampler *resampler; struct kvm *kvm; struct kvm_kernel_irqfd *irqfd; int idx; resampler = container_of(kian, struct kvm_kernel_irqfd_resampler, notifier); kvm = resampler->kvm; kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, resampler->notifier.gsi, 0, false); idx = srcu_read_lock(&kvm->irq_srcu); list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link) eventfd_signal(irqfd->resamplefd, 1); srcu_read_unlock(&kvm->irq_srcu, idx); } static void irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd) { struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler; struct kvm *kvm = resampler->kvm; mutex_lock(&kvm->irqfds.resampler_lock); list_del_rcu(&irqfd->resampler_link); synchronize_srcu(&kvm->irq_srcu); if (list_empty(&resampler->list)) { list_del(&resampler->link); kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, resampler->notifier.gsi, 0, false); kfree(resampler); } mutex_unlock(&kvm->irqfds.resampler_lock); } /* * Race-free decouple logic (ordering is critical) */ static void irqfd_shutdown(struct work_struct *work) { struct kvm_kernel_irqfd *irqfd = container_of(work, struct kvm_kernel_irqfd, shutdown); u64 cnt; /* * Synchronize with the wait-queue and unhook ourselves to prevent * further events. */ eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt); /* * We know no new events will be scheduled at this point, so block * until all previously outstanding events have completed */ flush_work(&irqfd->inject); if (irqfd->resampler) { irqfd_resampler_shutdown(irqfd); eventfd_ctx_put(irqfd->resamplefd); } /* * It is now safe to release the object's resources */ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS irq_bypass_unregister_consumer(&irqfd->consumer); #endif eventfd_ctx_put(irqfd->eventfd); kfree(irqfd); } /* assumes kvm->irqfds.lock is held */ static bool irqfd_is_active(struct kvm_kernel_irqfd *irqfd) { return list_empty(&irqfd->list) ? false : true; } /* * Mark the irqfd as inactive and schedule it for removal * * assumes kvm->irqfds.lock is held */ static void irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) { BUG_ON(!irqfd_is_active(irqfd)); list_del_init(&irqfd->list); queue_work(irqfd_cleanup_wq, &irqfd->shutdown); } int __attribute__((weak)) kvm_arch_set_irq_inatomic( struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm, int irq_source_id, int level, bool line_status) { return -EWOULDBLOCK; } /* * Called with wqh->lock held and interrupts disabled */ static int irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct kvm_kernel_irqfd *irqfd = container_of(wait, struct kvm_kernel_irqfd, wait); unsigned long flags = (unsigned long)key; struct kvm_kernel_irq_routing_entry irq; struct kvm *kvm = irqfd->kvm; unsigned seq; int idx; if (flags & POLLIN) { idx = srcu_read_lock(&kvm->irq_srcu); do { seq = read_seqcount_begin(&irqfd->irq_entry_sc); irq = irqfd->irq_entry; } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); /* An event has been signaled, inject an interrupt */ if (kvm_arch_set_irq_inatomic(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false) == -EWOULDBLOCK) schedule_work(&irqfd->inject); srcu_read_unlock(&kvm->irq_srcu, idx); } if (flags & POLLHUP) { /* The eventfd is closing, detach from KVM */ unsigned long flags; spin_lock_irqsave(&kvm->irqfds.lock, flags); /* * We must check if someone deactivated the irqfd before * we could acquire the irqfds.lock since the item is * deactivated from the KVM side before it is unhooked from * the wait-queue. If it is already deactivated, we can * simply return knowing the other side will cleanup for us. * We cannot race against the irqfd going away since the * other side is required to acquire wqh->lock, which we hold */ if (irqfd_is_active(irqfd)) irqfd_deactivate(irqfd); spin_unlock_irqrestore(&kvm->irqfds.lock, flags); } return 0; } static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { struct kvm_kernel_irqfd *irqfd = container_of(pt, struct kvm_kernel_irqfd, pt); add_wait_queue(wqh, &irqfd->wait); } /* Must be called under irqfds.lock */ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) { struct kvm_kernel_irq_routing_entry *e; struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; int n_entries; n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); write_seqcount_begin(&irqfd->irq_entry_sc); e = entries; if (n_entries == 1) irqfd->irq_entry = *e; else irqfd->irq_entry.type = 0; write_seqcount_end(&irqfd->irq_entry_sc); } #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS void __attribute__((weak)) kvm_arch_irq_bypass_stop( struct irq_bypass_consumer *cons) { } void __attribute__((weak)) kvm_arch_irq_bypass_start( struct irq_bypass_consumer *cons) { } int __attribute__((weak)) kvm_arch_update_irqfd_routing( struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { return 0; } #endif static int kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) { struct kvm_kernel_irqfd *irqfd, *tmp; struct fd f; struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; int ret; unsigned int events; int idx; if (!kvm_arch_intc_initialized(kvm)) return -EAGAIN; irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); if (!irqfd) return -ENOMEM; irqfd->kvm = kvm; irqfd->gsi = args->gsi; INIT_LIST_HEAD(&irqfd->list); INIT_WORK(&irqfd->inject, irqfd_inject); INIT_WORK(&irqfd->shutdown, irqfd_shutdown); seqcount_init(&irqfd->irq_entry_sc); f = fdget(args->fd); if (!f.file) { ret = -EBADF; goto out; } eventfd = eventfd_ctx_fileget(f.file); if (IS_ERR(eventfd)) { ret = PTR_ERR(eventfd); goto fail; } irqfd->eventfd = eventfd; if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) { struct kvm_kernel_irqfd_resampler *resampler; resamplefd = eventfd_ctx_fdget(args->resamplefd); if (IS_ERR(resamplefd)) { ret = PTR_ERR(resamplefd); goto fail; } irqfd->resamplefd = resamplefd; INIT_LIST_HEAD(&irqfd->resampler_link); mutex_lock(&kvm->irqfds.resampler_lock); list_for_each_entry(resampler, &kvm->irqfds.resampler_list, link) { if (resampler->notifier.gsi == irqfd->gsi) { irqfd->resampler = resampler; break; } } if (!irqfd->resampler) { resampler = kzalloc(sizeof(*resampler), GFP_KERNEL); if (!resampler) { ret = -ENOMEM; mutex_unlock(&kvm->irqfds.resampler_lock); goto fail; } resampler->kvm = kvm; INIT_LIST_HEAD(&resampler->list); resampler->notifier.gsi = irqfd->gsi; resampler->notifier.irq_acked = irqfd_resampler_ack; INIT_LIST_HEAD(&resampler->link); list_add(&resampler->link, &kvm->irqfds.resampler_list); kvm_register_irq_ack_notifier(kvm, &resampler->notifier); irqfd->resampler = resampler; } list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list); synchronize_srcu(&kvm->irq_srcu); mutex_unlock(&kvm->irqfds.resampler_lock); } /* * Install our own custom wake-up handling so we are notified via * a callback whenever someone signals the underlying eventfd */ init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); spin_lock_irq(&kvm->irqfds.lock); ret = 0; list_for_each_entry(tmp, &kvm->irqfds.items, list) { if (irqfd->eventfd != tmp->eventfd) continue; /* This fd is used for another irq already. */ ret = -EBUSY; spin_unlock_irq(&kvm->irqfds.lock); goto fail; } idx = srcu_read_lock(&kvm->irq_srcu); irqfd_update(kvm, irqfd); srcu_read_unlock(&kvm->irq_srcu, idx); list_add_tail(&irqfd->list, &kvm->irqfds.items); spin_unlock_irq(&kvm->irqfds.lock); /* * Check if there was an event already pending on the eventfd * before we registered, and trigger it as if we didn't miss it. */ events = f.file->f_op->poll(f.file, &irqfd->pt); if (events & POLLIN) schedule_work(&irqfd->inject); /* * do not drop the file until the irqfd is fully initialized, otherwise * we might race against the POLLHUP */ fdput(f); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS if (kvm_arch_has_irq_bypass()) { irqfd->consumer.token = (void *)irqfd->eventfd; irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer; irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer; irqfd->consumer.stop = kvm_arch_irq_bypass_stop; irqfd->consumer.start = kvm_arch_irq_bypass_start; ret = irq_bypass_register_consumer(&irqfd->consumer); if (ret) pr_info("irq bypass consumer (token %p) registration fails: %d\n", irqfd->consumer.token, ret); } #endif return 0; fail: if (irqfd->resampler) irqfd_resampler_shutdown(irqfd); if (resamplefd && !IS_ERR(resamplefd)) eventfd_ctx_put(resamplefd); if (eventfd && !IS_ERR(eventfd)) eventfd_ctx_put(eventfd); fdput(f); out: kfree(irqfd); return ret; } bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; int gsi, idx; idx = srcu_read_lock(&kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); if (gsi != -1) hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) { srcu_read_unlock(&kvm->irq_srcu, idx); return true; } srcu_read_unlock(&kvm->irq_srcu, idx); return false; } EXPORT_SYMBOL_GPL(kvm_irq_has_notifier); void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) { struct kvm_irq_ack_notifier *kian; hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) kian->irq_acked(kian); } void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) { int gsi, idx; trace_kvm_ack_irq(irqchip, pin); idx = srcu_read_lock(&kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); if (gsi != -1) kvm_notify_acked_gsi(kvm, gsi); srcu_read_unlock(&kvm->irq_srcu, idx); } void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); kvm_arch_post_irq_ack_notifier_list_update(kvm); } void kvm_unregister_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); hlist_del_init_rcu(&kian->link); mutex_unlock(&kvm->irq_lock); synchronize_srcu(&kvm->irq_srcu); kvm_arch_post_irq_ack_notifier_list_update(kvm); } #endif void kvm_eventfd_init(struct kvm *kvm) { #ifdef CONFIG_HAVE_KVM_IRQFD spin_lock_init(&kvm->irqfds.lock); INIT_LIST_HEAD(&kvm->irqfds.items); INIT_LIST_HEAD(&kvm->irqfds.resampler_list); mutex_init(&kvm->irqfds.resampler_lock); #endif INIT_LIST_HEAD(&kvm->ioeventfds); } #ifdef CONFIG_HAVE_KVM_IRQFD /* * shutdown any irqfd's that match fd+gsi */ static int kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) { struct kvm_kernel_irqfd *irqfd, *tmp; struct eventfd_ctx *eventfd; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { /* * This clearing of irq_entry.type is needed for when * another thread calls kvm_irq_routing_update before * we flush workqueue below (we synchronize with * kvm_irq_routing_update using irqfds.lock). */ write_seqcount_begin(&irqfd->irq_entry_sc); irqfd->irq_entry.type = 0; write_seqcount_end(&irqfd->irq_entry_sc); irqfd_deactivate(irqfd); } } spin_unlock_irq(&kvm->irqfds.lock); eventfd_ctx_put(eventfd); /* * Block until we know all outstanding shutdown jobs have completed * so that we guarantee there will not be any more interrupts on this * gsi once this deassign function returns. */ flush_workqueue(irqfd_cleanup_wq); return 0; } int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) { if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE)) return -EINVAL; if (args->flags & KVM_IRQFD_FLAG_DEASSIGN) return kvm_irqfd_deassign(kvm, args); return kvm_irqfd_assign(kvm, args); } /* * This function is called as the kvm VM fd is being released. Shutdown all * irqfds that still remain open */ void kvm_irqfd_release(struct kvm *kvm) { struct kvm_kernel_irqfd *irqfd, *tmp; spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) irqfd_deactivate(irqfd); spin_unlock_irq(&kvm->irqfds.lock); /* * Block until we know all outstanding shutdown jobs have completed * since we do not take a kvm* reference. */ flush_workqueue(irqfd_cleanup_wq); } /* * Take note of a change in irq routing. * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards. */ void kvm_irq_routing_update(struct kvm *kvm) { struct kvm_kernel_irqfd *irqfd; spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry(irqfd, &kvm->irqfds.items, list) { irqfd_update(kvm, irqfd); #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS if (irqfd->producer) { int ret = kvm_arch_update_irqfd_routing( irqfd->kvm, irqfd->producer->irq, irqfd->gsi, 1); WARN_ON(ret); } #endif } spin_unlock_irq(&kvm->irqfds.lock); } /* * create a host-wide workqueue for issuing deferred shutdown requests * aggregated from all vm* instances. We need our own isolated * queue to ease flushing work items when a VM exits. */ int kvm_irqfd_init(void) { irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0); if (!irqfd_cleanup_wq) return -ENOMEM; return 0; } void kvm_irqfd_exit(void) { destroy_workqueue(irqfd_cleanup_wq); } #endif /* * -------------------------------------------------------------------- * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal. * * userspace can register a PIO/MMIO address with an eventfd for receiving * notification when the memory has been touched. * -------------------------------------------------------------------- */ struct _ioeventfd { struct list_head list; u64 addr; int length; struct eventfd_ctx *eventfd; u64 datamatch; struct kvm_io_device dev; u8 bus_idx; bool wildcard; }; static inline struct _ioeventfd * to_ioeventfd(struct kvm_io_device *dev) { return container_of(dev, struct _ioeventfd, dev); } static void ioeventfd_release(struct _ioeventfd *p) { eventfd_ctx_put(p->eventfd); list_del(&p->list); kfree(p); } static bool ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val) { u64 _val; if (addr != p->addr) /* address must be precise for a hit */ return false; if (!p->length) /* length = 0 means only look at the address, so always a hit */ return true; if (len != p->length) /* address-range must be precise for a hit */ return false; if (p->wildcard) /* all else equal, wildcard is always a hit */ return true; /* otherwise, we have to actually compare the data */ BUG_ON(!IS_ALIGNED((unsigned long)val, len)); switch (len) { case 1: _val = *(u8 *)val; break; case 2: _val = *(u16 *)val; break; case 4: _val = *(u32 *)val; break; case 8: _val = *(u64 *)val; break; default: return false; } return _val == p->datamatch ? true : false; } /* MMIO/PIO writes trigger an event if the addr/val match */ static int ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct _ioeventfd *p = to_ioeventfd(this); if (!ioeventfd_in_range(p, addr, len, val)) return -EOPNOTSUPP; eventfd_signal(p->eventfd, 1); return 0; } /* * This function is called as KVM is completely shutting down. We do not * need to worry about locking just nuke anything we have as quickly as possible */ static void ioeventfd_destructor(struct kvm_io_device *this) { struct _ioeventfd *p = to_ioeventfd(this); ioeventfd_release(p); } static const struct kvm_io_device_ops ioeventfd_ops = { .write = ioeventfd_write, .destructor = ioeventfd_destructor, }; /* assumes kvm->slots_lock held */ static bool ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) { struct _ioeventfd *_p; list_for_each_entry(_p, &kvm->ioeventfds, list) if (_p->bus_idx == p->bus_idx && _p->addr == p->addr && (!_p->length || !p->length || (_p->length == p->length && (_p->wildcard || p->wildcard || _p->datamatch == p->datamatch)))) return true; return false; } static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags) { if (flags & KVM_IOEVENTFD_FLAG_PIO) return KVM_PIO_BUS; if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY) return KVM_VIRTIO_CCW_NOTIFY_BUS; return KVM_MMIO_BUS; } static int kvm_assign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_ioeventfd *args) { struct eventfd_ctx *eventfd; struct _ioeventfd *p; int ret; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { ret = -ENOMEM; goto fail; } INIT_LIST_HEAD(&p->list); p->addr = args->addr; p->bus_idx = bus_idx; p->length = args->len; p->eventfd = eventfd; /* The datamatch feature is optional, otherwise this is a wildcard */ if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH) p->datamatch = args->datamatch; else p->wildcard = true; mutex_lock(&kvm->slots_lock); /* Verify that there isn't a match already */ if (ioeventfd_check_collision(kvm, p)) { ret = -EEXIST; goto unlock_fail; } kvm_iodevice_init(&p->dev, &ioeventfd_ops); ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, &p->dev); if (ret < 0) goto unlock_fail; kvm_get_bus(kvm, bus_idx)->ioeventfd_count++; list_add_tail(&p->list, &kvm->ioeventfds); mutex_unlock(&kvm->slots_lock); return 0; unlock_fail: mutex_unlock(&kvm->slots_lock); fail: kfree(p); eventfd_ctx_put(eventfd); return ret; } static int kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_ioeventfd *args) { struct _ioeventfd *p, *tmp; struct eventfd_ctx *eventfd; struct kvm_io_bus *bus; int ret = -ENOENT; eventfd = eventfd_ctx_fdget(args->fd); if (IS_ERR(eventfd)) return PTR_ERR(eventfd); mutex_lock(&kvm->slots_lock); list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH); if (p->bus_idx != bus_idx || p->eventfd != eventfd || p->addr != args->addr || p->length != args->len || p->wildcard != wildcard) continue; if (!p->wildcard && p->datamatch != args->datamatch) continue; kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); bus = kvm_get_bus(kvm, bus_idx); if (bus) bus->ioeventfd_count--; ioeventfd_release(p); ret = 0; break; } mutex_unlock(&kvm->slots_lock); eventfd_ctx_put(eventfd); return ret; } static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags); int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); if (!args->len && bus_idx == KVM_MMIO_BUS) kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); return ret; } static int kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { enum kvm_bus bus_idx; int ret; bus_idx = ioeventfd_bus_from_flags(args->flags); /* must be natural-word sized, or 0 to ignore length */ switch (args->len) { case 0: case 1: case 2: case 4: case 8: break; default: return -EINVAL; } /* check for range overflow */ if (args->addr + args->len < args->addr) return -EINVAL; /* check for extra flags that we don't understand */ if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK) return -EINVAL; /* ioeventfd with no length can't be combined with DATAMATCH */ if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)) return -EINVAL; ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); if (ret) goto fail; /* When length is ignored, MMIO is also put on a separate bus, for * faster lookups. */ if (!args->len && bus_idx == KVM_MMIO_BUS) { ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); if (ret < 0) goto fast_fail; } return 0; fast_fail: kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); fail: return ret; } int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN) return kvm_deassign_ioeventfd(kvm, args); return kvm_assign_ioeventfd(kvm, args); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2523_0
crossvul-cpp_data_bad_2524_0
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "irq.h" #include "mmu.h" #include "cpuid.h" #include "lapic.h" #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <linux/mod_devicetable.h> #include <linux/trace_events.h> #include <linux/slab.h> #include <linux/tboot.h> #include <linux/hrtimer.h> #include <linux/frame.h> #include "kvm_cache_regs.h" #include "x86.h" #include <asm/cpu.h> #include <asm/io.h> #include <asm/desc.h> #include <asm/vmx.h> #include <asm/virtext.h> #include <asm/mce.h> #include <asm/fpu/internal.h> #include <asm/perf_event.h> #include <asm/debugreg.h> #include <asm/kexec.h> #include <asm/apic.h> #include <asm/irq_remapping.h> #include <asm/mmu_context.h> #include "trace.h" #include "pmu.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id vmx_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_VMX), {} }; MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); static bool __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); static bool __read_mostly flexpriority_enabled = 1; module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); static bool __read_mostly enable_ept = 1; module_param_named(ept, enable_ept, bool, S_IRUGO); static bool __read_mostly enable_unrestricted_guest = 1; module_param_named(unrestricted_guest, enable_unrestricted_guest, bool, S_IRUGO); static bool __read_mostly enable_ept_ad_bits = 1; module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); static bool __read_mostly emulate_invalid_guest_state = true; module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); static bool __read_mostly enable_apicv = 1; module_param(enable_apicv, bool, S_IRUGO); static bool __read_mostly enable_shadow_vmcs = 1; module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); /* * If nested=1, nested virtualization is supported, i.e., guests may use * VMX and be a hypervisor for its own guests. If nested=0, guests may not * use VMX instructions. */ static bool __read_mostly nested = 0; module_param(nested, bool, S_IRUGO); static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, S_IRUGO); #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ static int __read_mostly cpu_preemption_timer_multi; static bool __read_mostly enable_preemption_timer = 1; #ifdef CONFIG_X86_64 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); #endif #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 /* * Hyper-V requires all of these, so mark them as supported even though * they are just treated the same as all-context. */ #define VMX_VPID_EXTENT_SUPPORTED_MASK \ (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) /* * These 2 parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive * executions of PAUSE in a loop. Also indicate if ple enabled. * According to test, this time is usually smaller than 128 cycles. * ple_window: upper bound on the amount of time a guest is allowed to execute * in a PAUSE loop. Tests indicate that most spinlocks are held for * less than 2^12 cycles * Time is measured based on a counter that runs at the same rate as the TSC, * refer SDM volume 3b section 21.6.13 & 22.1.3. */ #define KVM_VMX_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \ INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; module_param(ple_gap, int, S_IRUGO); static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, int, S_IRUGO); /* Default doubles per-vcpu window every exit. */ static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW; module_param(ple_window_grow, int, S_IRUGO); /* Default resets per-vcpu window every exit to ple_window. */ static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK; module_param(ple_window_shrink, int, S_IRUGO); /* Default is to compute the maximum so we can never overflow. */ static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; #define NR_AUTOLOAD_MSRS 8 #define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; u32 abort; char data[0]; }; /* * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs * loaded on this CPU (so we can clear them if the CPU goes down). */ struct loaded_vmcs { struct vmcs *vmcs; struct vmcs *shadow_vmcs; int cpu; bool launched; bool nmi_known_unmasked; struct list_head loaded_vmcss_on_cpu_link; }; struct shared_msr_entry { unsigned index; u64 data; u64 mask; }; /* * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a * single nested guest (L2), hence the name vmcs12. Any VMX implementation has * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). * If there are changes in this struct, VMCS12_REVISION must be changed. */ typedef u64 natural_width; struct __packed vmcs12 { /* According to the Intel spec, a VMCS region must start with the * following two fields. Then follow implementation-specific data. */ u32 revision_id; u32 abort; u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ u32 padding[7]; /* room for future expansion */ u64 io_bitmap_a; u64 io_bitmap_b; u64 msr_bitmap; u64 vm_exit_msr_store_addr; u64 vm_exit_msr_load_addr; u64 vm_entry_msr_load_addr; u64 tsc_offset; u64 virtual_apic_page_addr; u64 apic_access_addr; u64 posted_intr_desc_addr; u64 vm_function_control; u64 ept_pointer; u64 eoi_exit_bitmap0; u64 eoi_exit_bitmap1; u64 eoi_exit_bitmap2; u64 eoi_exit_bitmap3; u64 eptp_list_address; u64 xss_exit_bitmap; u64 guest_physical_address; u64 vmcs_link_pointer; u64 pml_address; u64 guest_ia32_debugctl; u64 guest_ia32_pat; u64 guest_ia32_efer; u64 guest_ia32_perf_global_ctrl; u64 guest_pdptr0; u64 guest_pdptr1; u64 guest_pdptr2; u64 guest_pdptr3; u64 guest_bndcfgs; u64 host_ia32_pat; u64 host_ia32_efer; u64 host_ia32_perf_global_ctrl; u64 padding64[8]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have * unsigned long fields with no explict size. We use u64 (aliased * natural_width) instead. Luckily, x86 is little-endian. */ natural_width cr0_guest_host_mask; natural_width cr4_guest_host_mask; natural_width cr0_read_shadow; natural_width cr4_read_shadow; natural_width cr3_target_value0; natural_width cr3_target_value1; natural_width cr3_target_value2; natural_width cr3_target_value3; natural_width exit_qualification; natural_width guest_linear_address; natural_width guest_cr0; natural_width guest_cr3; natural_width guest_cr4; natural_width guest_es_base; natural_width guest_cs_base; natural_width guest_ss_base; natural_width guest_ds_base; natural_width guest_fs_base; natural_width guest_gs_base; natural_width guest_ldtr_base; natural_width guest_tr_base; natural_width guest_gdtr_base; natural_width guest_idtr_base; natural_width guest_dr7; natural_width guest_rsp; natural_width guest_rip; natural_width guest_rflags; natural_width guest_pending_dbg_exceptions; natural_width guest_sysenter_esp; natural_width guest_sysenter_eip; natural_width host_cr0; natural_width host_cr3; natural_width host_cr4; natural_width host_fs_base; natural_width host_gs_base; natural_width host_tr_base; natural_width host_gdtr_base; natural_width host_idtr_base; natural_width host_ia32_sysenter_esp; natural_width host_ia32_sysenter_eip; natural_width host_rsp; natural_width host_rip; natural_width paddingl[8]; /* room for future expansion */ u32 pin_based_vm_exec_control; u32 cpu_based_vm_exec_control; u32 exception_bitmap; u32 page_fault_error_code_mask; u32 page_fault_error_code_match; u32 cr3_target_count; u32 vm_exit_controls; u32 vm_exit_msr_store_count; u32 vm_exit_msr_load_count; u32 vm_entry_controls; u32 vm_entry_msr_load_count; u32 vm_entry_intr_info_field; u32 vm_entry_exception_error_code; u32 vm_entry_instruction_len; u32 tpr_threshold; u32 secondary_vm_exec_control; u32 vm_instruction_error; u32 vm_exit_reason; u32 vm_exit_intr_info; u32 vm_exit_intr_error_code; u32 idt_vectoring_info_field; u32 idt_vectoring_error_code; u32 vm_exit_instruction_len; u32 vmx_instruction_info; u32 guest_es_limit; u32 guest_cs_limit; u32 guest_ss_limit; u32 guest_ds_limit; u32 guest_fs_limit; u32 guest_gs_limit; u32 guest_ldtr_limit; u32 guest_tr_limit; u32 guest_gdtr_limit; u32 guest_idtr_limit; u32 guest_es_ar_bytes; u32 guest_cs_ar_bytes; u32 guest_ss_ar_bytes; u32 guest_ds_ar_bytes; u32 guest_fs_ar_bytes; u32 guest_gs_ar_bytes; u32 guest_ldtr_ar_bytes; u32 guest_tr_ar_bytes; u32 guest_interruptibility_info; u32 guest_activity_state; u32 guest_sysenter_cs; u32 host_ia32_sysenter_cs; u32 vmx_preemption_timer_value; u32 padding32[7]; /* room for future expansion */ u16 virtual_processor_id; u16 posted_intr_nv; u16 guest_es_selector; u16 guest_cs_selector; u16 guest_ss_selector; u16 guest_ds_selector; u16 guest_fs_selector; u16 guest_gs_selector; u16 guest_ldtr_selector; u16 guest_tr_selector; u16 guest_intr_status; u16 guest_pml_index; u16 host_es_selector; u16 host_cs_selector; u16 host_ss_selector; u16 host_ds_selector; u16 host_fs_selector; u16 host_gs_selector; u16 host_tr_selector; }; /* * VMCS12_REVISION is an arbitrary id that should be changed if the content or * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. */ #define VMCS12_REVISION 0x11e57ed0 /* * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region * and any VMCS region. Although only sizeof(struct vmcs12) are used by the * current implementation, 4K are reserved to avoid future complications. */ #define VMCS12_SIZE 0x1000 /* Used to remember the last vmcs02 used for some recently used vmcs12s */ struct vmcs02_list { struct list_head list; gpa_t vmptr; struct loaded_vmcs vmcs02; }; /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. */ struct nested_vmx { /* Has the level1 guest done vmxon? */ bool vmxon; gpa_t vmxon_ptr; bool pml_full; /* The guest-physical address of the current VMCS L1 keeps for L2 */ gpa_t current_vmptr; /* * Cache of the guest's VMCS, existing outside of guest memory. * Loaded from guest memory during VMPTRLD. Flushed to guest * memory during VMCLEAR and VMPTRLD. */ struct vmcs12 *cached_vmcs12; /* * Indicates if the shadow vmcs must be updated with the * data hold by vmcs12 */ bool sync_shadow_vmcs; /* vmcs02_list cache of VMCSs recently used to run L2 guests */ struct list_head vmcs02_pool; int vmcs02_num; bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; /* * Guest pages referred to in vmcs02 with host-physical pointers, so * we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; struct page *pi_desc_page; struct pi_desc *pi_desc; bool pi_pending; u16 posted_intr_nv; unsigned long *msr_bitmap; struct hrtimer preemption_timer; bool preemption_timer_expired; /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ u64 vmcs01_debugctl; u16 vpid02; u16 last_vpid; /* * We only store the "true" versions of the VMX capability MSRs. We * generate the "non-true" versions by setting the must-be-1 bits * according to the SDM. */ u32 nested_vmx_procbased_ctls_low; u32 nested_vmx_procbased_ctls_high; u32 nested_vmx_secondary_ctls_low; u32 nested_vmx_secondary_ctls_high; u32 nested_vmx_pinbased_ctls_low; u32 nested_vmx_pinbased_ctls_high; u32 nested_vmx_exit_ctls_low; u32 nested_vmx_exit_ctls_high; u32 nested_vmx_entry_ctls_low; u32 nested_vmx_entry_ctls_high; u32 nested_vmx_misc_low; u32 nested_vmx_misc_high; u32 nested_vmx_ept_caps; u32 nested_vmx_vpid_caps; u64 nested_vmx_basic; u64 nested_vmx_cr0_fixed0; u64 nested_vmx_cr0_fixed1; u64 nested_vmx_cr4_fixed0; u64 nested_vmx_cr4_fixed1; u64 nested_vmx_vmcs_enum; u64 nested_vmx_vmfunc_controls; }; #define POSTED_INTR_ON 0 #define POSTED_INTR_SN 1 /* Posted-Interrupt Descriptor */ struct pi_desc { u32 pir[8]; /* Posted interrupt requested */ union { struct { /* bit 256 - Outstanding Notification */ u16 on : 1, /* bit 257 - Suppress Notification */ sn : 1, /* bit 271:258 - Reserved */ rsvd_1 : 14; /* bit 279:272 - Notification Vector */ u8 nv; /* bit 287:280 - Reserved */ u8 rsvd_2; /* bit 319:288 - Notification Destination */ u32 ndst; }; u64 control; }; u32 rsvd[6]; } __aligned(64); static bool pi_test_and_set_on(struct pi_desc *pi_desc) { return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static bool pi_test_and_clear_on(struct pi_desc *pi_desc) { return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) { return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } static inline void pi_clear_sn(struct pi_desc *pi_desc) { return clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } static inline void pi_set_sn(struct pi_desc *pi_desc) { return set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } static inline void pi_clear_on(struct pi_desc *pi_desc) { clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static inline int pi_test_on(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static inline int pi_test_sn(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; u32 secondary_exec_control; /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested * guest (L2), it points to a different VMCS. */ struct loaded_vmcs vmcs01; struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { unsigned nr; struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; } msr_autoload; struct { int loaded; u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; #endif int gs_ldt_reload_needed; int fs_reload_needed; u64 msr_host_bndcfgs; unsigned long vmcs_host_cr3; /* May not match real cr3 */ unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; ulong save_rflags; struct kvm_segment segs[8]; } rmode; struct { u32 bitmask; /* 4 bits per segment (1 bit per field) */ struct kvm_save_segment { u16 selector; unsigned long base; u32 limit; u32 ar; } seg[8]; } segment_cache; int vpid; bool emulation_required; u32 exit_reason; /* Posted interrupt descriptor */ struct pi_desc pi_desc; /* Support for a guest hypervisor (nested VMX) */ struct nested_vmx nested; /* Dynamic PLE window. */ int ple_window; bool ple_window_dirty; /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; /* apic deadline value in host tsc */ u64 hv_deadline_tsc; u64 current_tsc_ratio; u32 host_pkru; /* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included * in msr_ia32_feature_control_valid_bits. */ u64 msr_ia32_feature_control; u64 msr_ia32_feature_control_valid_bits; }; enum segment_cache_field { SEG_FIELD_SEL = 0, SEG_FIELD_BASE = 1, SEG_FIELD_LIMIT = 2, SEG_FIELD_AR = 3, SEG_FIELD_NR = 4 }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_vmx, vcpu); } static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) { return &(to_vmx(vcpu)->pi_desc); } #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) #define FIELD(number, name) [number] = VMCS12_OFFSET(name) #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ [number##_HIGH] = VMCS12_OFFSET(name)+4 static unsigned long shadow_read_only_fields[] = { /* * We do NOT shadow fields that are modified when L0 * traps and emulates any vmx instruction (e.g. VMPTRLD, * VMXON...) executed by L1. * For example, VM_INSTRUCTION_ERROR is read * by L1 if a vmx instruction fails (part of the error path). * Note the code assumes this logic. If for some reason * we start shadowing these fields then we need to * force a shadow sync when L0 emulates vmx instructions * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified * by nested_vmx_failValid) */ VM_EXIT_REASON, VM_EXIT_INTR_INFO, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_INFO_FIELD, IDT_VECTORING_ERROR_CODE, VM_EXIT_INTR_ERROR_CODE, EXIT_QUALIFICATION, GUEST_LINEAR_ADDRESS, GUEST_PHYSICAL_ADDRESS }; static int max_shadow_read_only_fields = ARRAY_SIZE(shadow_read_only_fields); static unsigned long shadow_read_write_fields[] = { TPR_THRESHOLD, GUEST_RIP, GUEST_RSP, GUEST_CR0, GUEST_CR3, GUEST_CR4, GUEST_INTERRUPTIBILITY_INFO, GUEST_RFLAGS, GUEST_CS_SELECTOR, GUEST_CS_AR_BYTES, GUEST_CS_LIMIT, GUEST_CS_BASE, GUEST_ES_BASE, GUEST_BNDCFGS, CR0_GUEST_HOST_MASK, CR0_READ_SHADOW, CR4_READ_SHADOW, TSC_OFFSET, EXCEPTION_BITMAP, CPU_BASED_VM_EXEC_CONTROL, VM_ENTRY_EXCEPTION_ERROR_CODE, VM_ENTRY_INTR_INFO_FIELD, VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE, HOST_FS_BASE, HOST_GS_BASE, HOST_FS_SELECTOR, HOST_GS_SELECTOR }; static int max_shadow_read_write_fields = ARRAY_SIZE(shadow_read_write_fields); static const unsigned short vmcs_field_to_offset_table[] = { FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), FIELD(POSTED_INTR_NV, posted_intr_nv), FIELD(GUEST_ES_SELECTOR, guest_es_selector), FIELD(GUEST_CS_SELECTOR, guest_cs_selector), FIELD(GUEST_SS_SELECTOR, guest_ss_selector), FIELD(GUEST_DS_SELECTOR, guest_ds_selector), FIELD(GUEST_FS_SELECTOR, guest_fs_selector), FIELD(GUEST_GS_SELECTOR, guest_gs_selector), FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), FIELD(GUEST_TR_SELECTOR, guest_tr_selector), FIELD(GUEST_INTR_STATUS, guest_intr_status), FIELD(GUEST_PML_INDEX, guest_pml_index), FIELD(HOST_ES_SELECTOR, host_es_selector), FIELD(HOST_CS_SELECTOR, host_cs_selector), FIELD(HOST_SS_SELECTOR, host_ss_selector), FIELD(HOST_DS_SELECTOR, host_ds_selector), FIELD(HOST_FS_SELECTOR, host_fs_selector), FIELD(HOST_GS_SELECTOR, host_gs_selector), FIELD(HOST_TR_SELECTOR, host_tr_selector), FIELD64(IO_BITMAP_A, io_bitmap_a), FIELD64(IO_BITMAP_B, io_bitmap_b), FIELD64(MSR_BITMAP, msr_bitmap), FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), FIELD64(TSC_OFFSET, tsc_offset), FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), FIELD64(APIC_ACCESS_ADDR, apic_access_addr), FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr), FIELD64(VM_FUNCTION_CONTROL, vm_function_control), FIELD64(EPT_POINTER, ept_pointer), FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0), FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), FIELD64(EPTP_LIST_ADDRESS, eptp_list_address), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), FIELD64(PML_ADDRESS, pml_address), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), FIELD64(GUEST_IA32_PAT, guest_ia32_pat), FIELD64(GUEST_IA32_EFER, guest_ia32_efer), FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), FIELD64(GUEST_PDPTR0, guest_pdptr0), FIELD64(GUEST_PDPTR1, guest_pdptr1), FIELD64(GUEST_PDPTR2, guest_pdptr2), FIELD64(GUEST_PDPTR3, guest_pdptr3), FIELD64(GUEST_BNDCFGS, guest_bndcfgs), FIELD64(HOST_IA32_PAT, host_ia32_pat), FIELD64(HOST_IA32_EFER, host_ia32_efer), FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), FIELD(EXCEPTION_BITMAP, exception_bitmap), FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), FIELD(CR3_TARGET_COUNT, cr3_target_count), FIELD(VM_EXIT_CONTROLS, vm_exit_controls), FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), FIELD(TPR_THRESHOLD, tpr_threshold), FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), FIELD(VM_EXIT_REASON, vm_exit_reason), FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), FIELD(GUEST_ES_LIMIT, guest_es_limit), FIELD(GUEST_CS_LIMIT, guest_cs_limit), FIELD(GUEST_SS_LIMIT, guest_ss_limit), FIELD(GUEST_DS_LIMIT, guest_ds_limit), FIELD(GUEST_FS_LIMIT, guest_fs_limit), FIELD(GUEST_GS_LIMIT, guest_gs_limit), FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), FIELD(GUEST_TR_LIMIT, guest_tr_limit), FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), FIELD(CR0_READ_SHADOW, cr0_read_shadow), FIELD(CR4_READ_SHADOW, cr4_read_shadow), FIELD(CR3_TARGET_VALUE0, cr3_target_value0), FIELD(CR3_TARGET_VALUE1, cr3_target_value1), FIELD(CR3_TARGET_VALUE2, cr3_target_value2), FIELD(CR3_TARGET_VALUE3, cr3_target_value3), FIELD(EXIT_QUALIFICATION, exit_qualification), FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), FIELD(GUEST_CR0, guest_cr0), FIELD(GUEST_CR3, guest_cr3), FIELD(GUEST_CR4, guest_cr4), FIELD(GUEST_ES_BASE, guest_es_base), FIELD(GUEST_CS_BASE, guest_cs_base), FIELD(GUEST_SS_BASE, guest_ss_base), FIELD(GUEST_DS_BASE, guest_ds_base), FIELD(GUEST_FS_BASE, guest_fs_base), FIELD(GUEST_GS_BASE, guest_gs_base), FIELD(GUEST_LDTR_BASE, guest_ldtr_base), FIELD(GUEST_TR_BASE, guest_tr_base), FIELD(GUEST_GDTR_BASE, guest_gdtr_base), FIELD(GUEST_IDTR_BASE, guest_idtr_base), FIELD(GUEST_DR7, guest_dr7), FIELD(GUEST_RSP, guest_rsp), FIELD(GUEST_RIP, guest_rip), FIELD(GUEST_RFLAGS, guest_rflags), FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), FIELD(HOST_CR0, host_cr0), FIELD(HOST_CR3, host_cr3), FIELD(HOST_CR4, host_cr4), FIELD(HOST_FS_BASE, host_fs_base), FIELD(HOST_GS_BASE, host_gs_base), FIELD(HOST_TR_BASE, host_tr_base), FIELD(HOST_GDTR_BASE, host_gdtr_base), FIELD(HOST_IDTR_BASE, host_idtr_base), FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), FIELD(HOST_RSP, host_rsp), FIELD(HOST_RIP, host_rip), }; static inline short vmcs_field_to_offset(unsigned long field) { BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || vmcs_field_to_offset_table[field] == 0) return -ENOENT; return vmcs_field_to_offset_table[field]; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { return to_vmx(vcpu)->nested.cached_vmcs12; } static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu); static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); static bool vmx_xsaves_supported(void); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static bool guest_state_valid(struct kvm_vcpu *vcpu); static u32 vmx_segment_access_rights(struct kvm_segment *var); static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static int alloc_identity_pagetable(struct kvm *kvm); static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); /* * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. */ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); /* * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we * can find which vCPU should be waken up. */ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); enum { VMX_IO_BITMAP_A, VMX_IO_BITMAP_B, VMX_MSR_BITMAP_LEGACY, VMX_MSR_BITMAP_LONGMODE, VMX_MSR_BITMAP_LEGACY_X2APIC_APICV, VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV, VMX_MSR_BITMAP_LEGACY_X2APIC, VMX_MSR_BITMAP_LONGMODE_X2APIC, VMX_VMREAD_BITMAP, VMX_VMWRITE_BITMAP, VMX_BITMAP_NR }; static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; #define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A]) #define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B]) #define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY]) #define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE]) #define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV]) #define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV]) #define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC]) #define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC]) #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) static bool cpu_has_load_ia32_efer; static bool cpu_has_load_perf_global_ctrl; static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); static struct vmcs_config { int size; int order; u32 basic_cap; u32 revision_id; u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; } vmcs_config; static struct vmx_capability { u32 ept; u32 vpid; } vmx_capability; #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ .base = GUEST_##seg##_BASE, \ .limit = GUEST_##seg##_LIMIT, \ .ar_bytes = GUEST_##seg##_AR_BYTES, \ } static const struct kvm_vmx_segment_field { unsigned selector; unsigned base; unsigned limit; unsigned ar_bytes; } kvm_vmx_segment_fields[] = { VMX_SEGMENT_FIELD(CS), VMX_SEGMENT_FIELD(DS), VMX_SEGMENT_FIELD(ES), VMX_SEGMENT_FIELD(FS), VMX_SEGMENT_FIELD(GS), VMX_SEGMENT_FIELD(SS), VMX_SEGMENT_FIELD(TR), VMX_SEGMENT_FIELD(LDTR), }; static u64 host_efer; static void ept_save_pdptrs(struct kvm_vcpu *vcpu); /* * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. */ static const u32 vmx_msr_index[] = { #ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, #endif MSR_EFER, MSR_TSC_AUX, MSR_STAR, }; static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); } static inline bool is_debug(u32 intr_info) { return is_exception_n(intr_info, DB_VECTOR); } static inline bool is_breakpoint(u32 intr_info) { return is_exception_n(intr_info, BP_VECTOR); } static inline bool is_page_fault(u32 intr_info) { return is_exception_n(intr_info, PF_VECTOR); } static inline bool is_no_device(u32 intr_info) { return is_exception_n(intr_info, NM_VECTOR); } static inline bool is_invalid_opcode(u32 intr_info) { return is_exception_n(intr_info, UD_VECTOR); } static inline bool is_external_interrupt(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); } static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; } static inline bool cpu_has_vmx_tpr_shadow(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; } static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu) { return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu); } static inline bool cpu_has_secondary_exec_ctrls(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; } static inline bool cpu_has_vmx_virtualize_apic_accesses(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } static inline bool cpu_has_vmx_apic_register_virt(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_APIC_REGISTER_VIRT; } static inline bool cpu_has_vmx_virtual_intr_delivery(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; } /* * Comment's format: document - errata name - stepping - processor name. * Refer from * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp */ static u32 vmx_preemption_cpu_tfms[] = { /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 0x000206E6, /* 323056.pdf - AAX65 - C2 - Xeon L3406 */ /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 0x00020652, /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 0x00020655, /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ /* * 320767.pdf - AAP86 - B1 - * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile */ 0x000106E5, /* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 0x000106A0, /* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 0x000106A1, /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 0x000106A4, /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 0x000106A5, }; static inline bool cpu_has_broken_vmx_preemption_timer(void) { u32 eax = cpuid_eax(0x00000001), i; /* Clear the reserved bits */ eax &= ~(0x3U << 14 | 0xfU << 28); for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) if (eax == vmx_preemption_cpu_tfms[i]) return true; return false; } static inline bool cpu_has_vmx_preemption_timer(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VMX_PREEMPTION_TIMER; } static inline bool cpu_has_vmx_posted_intr(void) { return IS_ENABLED(CONFIG_X86_LOCAL_APIC) && vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; } static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && cpu_has_vmx_virtual_intr_delivery() && cpu_has_vmx_posted_intr(); } static inline bool cpu_has_vmx_flexpriority(void) { return cpu_has_vmx_tpr_shadow() && cpu_has_vmx_virtualize_apic_accesses(); } static inline bool cpu_has_vmx_ept_execute_only(void) { return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; } static inline bool cpu_has_vmx_ept_2m_page(void) { return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_1g_page(void) { return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_4levels(void) { return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; } static inline bool cpu_has_vmx_ept_mt_wb(void) { return vmx_capability.ept & VMX_EPTP_WB_BIT; } static inline bool cpu_has_vmx_ept_5levels(void) { return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT; } static inline bool cpu_has_vmx_ept_ad_bits(void) { return vmx_capability.ept & VMX_EPT_AD_BIT; } static inline bool cpu_has_vmx_invept_context(void) { return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; } static inline bool cpu_has_vmx_invept_global(void) { return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; } static inline bool cpu_has_vmx_invvpid_single(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; } static inline bool cpu_has_vmx_invvpid_global(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; } static inline bool cpu_has_vmx_invvpid(void) { return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; } static inline bool cpu_has_vmx_ept(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_EPT; } static inline bool cpu_has_vmx_unrestricted_guest(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_UNRESTRICTED_GUEST; } static inline bool cpu_has_vmx_ple(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PAUSE_LOOP_EXITING; } static inline bool cpu_has_vmx_basic_inout(void) { return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT); } static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) { return flexpriority_enabled && lapic_in_kernel(vcpu); } static inline bool cpu_has_vmx_vpid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_VPID; } static inline bool cpu_has_vmx_rdtscp(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDTSCP; } static inline bool cpu_has_vmx_invpcid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_INVPCID; } static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_WBINVD_EXITING; } static inline bool cpu_has_vmx_shadow_vmcs(void) { u64 vmx_msr; rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); /* check if the cpu supports writing r/o exit information fields */ if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) return false; return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_SHADOW_VMCS; } static inline bool cpu_has_vmx_pml(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML; } static inline bool cpu_has_vmx_tsc_scaling(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_TSC_SCALING; } static inline bool cpu_has_vmx_vmfunc(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_VMFUNC; } static inline bool report_flexpriority(void) { return flexpriority_enabled; } static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) { return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low); } static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) { return vmcs12->cpu_based_vm_exec_control & bit; } static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) { return (vmcs12->cpu_based_vm_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && (vmcs12->secondary_vm_exec_control & bit); } static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER; } static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); } static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); } static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML); } static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); } static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); } static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); } static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); } static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; } static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); } static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) { return nested_cpu_has_vmfunc(vmcs12) && (vmcs12->vm_function_control & VMX_VMFUNC_EPTP_SWITCHING); } static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); } static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; for (i = 0; i < vmx->nmsrs; ++i) if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) return i; return -1; } static inline void __invvpid(int ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; u64 rsvd : 48; u64 gva; } operand = { vpid, 0, gva }; asm volatile (__ex(ASM_VMX_INVVPID) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:" : : "a"(&operand), "c"(ext) : "cc", "memory"); } static inline void __invept(int ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; asm volatile (__ex(ASM_VMX_INVEPT) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:\n" : : "a" (&operand), "c" (ext) : "cc", "memory"); } static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; i = __find_msr_index(vmx, msr); if (i >= 0) return &vmx->guest_msrs[i]; return NULL; } static void vmcs_clear(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); } static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) { vmcs_clear(loaded_vmcs->vmcs); if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) vmcs_clear(loaded_vmcs->shadow_vmcs); loaded_vmcs->cpu = -1; loaded_vmcs->launched = 0; } static void vmcs_load(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); } #ifdef CONFIG_KEXEC_CORE /* * This bitmap is used to indicate whether the vmclear * operation is enabled on all cpus. All disabled by * default. */ static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; static inline void crash_enable_local_vmclear(int cpu) { cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline void crash_disable_local_vmclear(int cpu) { cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline int crash_local_vmclear_enabled(int cpu) { return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); } static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; if (!crash_local_vmclear_enabled(cpu)) return; list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } #else static inline void crash_enable_local_vmclear(int cpu) { } static inline void crash_disable_local_vmclear(int cpu) { } #endif /* CONFIG_KEXEC_CORE */ static void __loaded_vmcs_clear(void *arg) { struct loaded_vmcs *loaded_vmcs = arg; int cpu = raw_smp_processor_id(); if (loaded_vmcs->cpu != cpu) return; /* vcpu migration can race with cpu offline */ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; crash_disable_local_vmclear(cpu); list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); /* * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link * is before setting loaded_vmcs->vcpu to -1 which is done in * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist * then adds the vmcs into percpu list before it is deleted. */ smp_wmb(); loaded_vmcs_init(loaded_vmcs); crash_enable_local_vmclear(cpu); } static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) { int cpu = loaded_vmcs->cpu; if (cpu != -1) smp_call_function_single(cpu, __loaded_vmcs_clear, loaded_vmcs, 1); } static inline void vpid_sync_vcpu_single(int vpid) { if (vpid == 0) return; if (cpu_has_vmx_invvpid_single()) __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); } static inline void vpid_sync_vcpu_global(void) { if (cpu_has_vmx_invvpid_global()) __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); } static inline void vpid_sync_context(int vpid) { if (cpu_has_vmx_invvpid_single()) vpid_sync_vcpu_single(vpid); else vpid_sync_vcpu_global(); } static inline void ept_sync_global(void) { if (cpu_has_vmx_invept_global()) __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); } static inline void ept_sync_context(u64 eptp) { if (enable_ept) { if (cpu_has_vmx_invept_context()) __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); else ept_sync_global(); } } static __always_inline void vmcs_check16(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, "16-bit accessor invalid for 64-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "16-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "16-bit accessor invalid for 32-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "16-bit accessor invalid for natural width field"); } static __always_inline void vmcs_check32(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "32-bit accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "32-bit accessor invalid for natural width field"); } static __always_inline void vmcs_check64(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "64-bit accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "64-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "64-bit accessor invalid for 32-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "64-bit accessor invalid for natural width field"); } static __always_inline void vmcs_checkl(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "Natural width accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, "Natural width accessor invalid for 64-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "Natural width accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "Natural width accessor invalid for 32-bit field"); } static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") : "=a"(value) : "d"(field) : "cc"); return value; } static __always_inline u16 vmcs_read16(unsigned long field) { vmcs_check16(field); return __vmcs_readl(field); } static __always_inline u32 vmcs_read32(unsigned long field) { vmcs_check32(field); return __vmcs_readl(field); } static __always_inline u64 vmcs_read64(unsigned long field) { vmcs_check64(field); #ifdef CONFIG_X86_64 return __vmcs_readl(field); #else return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32); #endif } static __always_inline unsigned long vmcs_readl(unsigned long field) { vmcs_checkl(field); return __vmcs_readl(field); } static noinline void vmwrite_error(unsigned long field, unsigned long value) { printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); dump_stack(); } static __always_inline void __vmcs_writel(unsigned long field, unsigned long value) { u8 error; asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" : "=q"(error) : "a"(value), "d"(field) : "cc"); if (unlikely(error)) vmwrite_error(field, value); } static __always_inline void vmcs_write16(unsigned long field, u16 value) { vmcs_check16(field); __vmcs_writel(field, value); } static __always_inline void vmcs_write32(unsigned long field, u32 value) { vmcs_check32(field); __vmcs_writel(field, value); } static __always_inline void vmcs_write64(unsigned long field, u64 value) { vmcs_check64(field); __vmcs_writel(field, value); #ifndef CONFIG_X86_64 asm volatile (""); __vmcs_writel(field+1, value >> 32); #endif } static __always_inline void vmcs_writel(unsigned long field, unsigned long value) { vmcs_checkl(field); __vmcs_writel(field, value); } static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_clear_bits does not support 64-bit fields"); __vmcs_writel(field, __vmcs_readl(field) & ~mask); } static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_set_bits does not support 64-bit fields"); __vmcs_writel(field, __vmcs_readl(field) | mask); } static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx) { vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS); } static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_ENTRY_CONTROLS, val); vmx->vm_entry_controls_shadow = val; } static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_entry_controls_shadow != val) vm_entry_controls_init(vmx, val); } static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_entry_controls_shadow; } static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); } static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); } static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx) { vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS); } static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_EXIT_CONTROLS, val); vmx->vm_exit_controls_shadow = val; } static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_exit_controls_shadow != val) vm_exit_controls_init(vmx, val); } static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_exit_controls_shadow; } static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); } static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); } static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) { vmx->segment_cache.bitmask = 0; } static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) { bool ret; u32 mask = 1 << (seg * SEG_FIELD_NR + field); if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); vmx->segment_cache.bitmask = 0; } ret = vmx->segment_cache.bitmask & mask; vmx->segment_cache.bitmask |= mask; return ret; } static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) { u16 *p = &vmx->segment_cache.seg[seg].selector; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); return *p; } static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) { ulong *p = &vmx->segment_cache.seg[seg].base; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); return *p; } static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].limit; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); return *p; } static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].ar; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); return *p; } static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR); if ((vcpu->guest_debug & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ /* When we are running a nested L2 guest and L1 specified for it a * certain exception bitmap, we must trap the same exceptions and pass * them to L1. When running L2, we will only handle the exceptions * specified above if L1 did not want them. */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; vmcs_write32(EXCEPTION_BITMAP, eb); } static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { vm_entry_controls_clearbit(vmx, entry); vm_exit_controls_clearbit(vmx, exit); } static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); return; } break; } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == m->nr) return; --m->nr; m->guest[i] = m->guest[m->nr]; m->host[i] = m->host[m->nr]; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); vmcs_write64(host_val_vmcs, host_val); vm_entry_controls_setbit(vmx, entry); vm_exit_controls_setbit(vmx, exit); } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER, GUEST_IA32_EFER, HOST_IA32_EFER, guest_val, host_val); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, GUEST_IA32_PERF_GLOBAL_CTRL, HOST_IA32_PERF_GLOBAL_CTRL, guest_val, host_val); return; } break; case MSR_IA32_PEBS_ENABLE: /* PEBS needs a quiescent period after being disabled (to write * a record). Disabling PEBS through VMX MSR swapping doesn't * provide that period, so a CPU could write host's record into * guest's memory. */ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; } else if (i == m->nr) { ++m->nr; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } m->guest[i].index = msr; m->guest[i].value = guest_val; m->host[i].index = msr; m->host[i].value = host_val; } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { u64 guest_efer = vmx->vcpu.arch.efer; u64 ignore_bits = 0; if (!enable_ept) { /* * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing * host CPUID is more efficient than testing guest CPUID * or CR4. Host SMEP is anyway a requirement for guest SMEP. */ if (boot_cpu_has(X86_FEATURE_SMEP)) guest_efer |= EFER_NX; else if (!(guest_efer & EFER_NX)) ignore_bits |= EFER_NX; } /* * LMA and LME handled by hardware; SCE meaningless outside long mode. */ ignore_bits |= EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif clear_atomic_switch_msr(vmx, MSR_EFER); /* * On EPT, we can't emulate NX, so we must switch EFER atomically. * On CPUs that support "load IA32_EFER", always switch EFER * atomically, since it's faster than switching it manually. */ if (cpu_has_load_ia32_efer || (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; } else { guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].mask = ~ignore_bits; return true; } } #ifdef CONFIG_X86_32 /* * On 32-bit kernels, VM exits still load the FS and GS bases from the * VMCS rather than the segment table. KVM uses this helper to figure * out the current bases to poke them into the VMCS before entry. */ static unsigned long segment_base(u16 selector) { struct desc_struct *table; unsigned long v; if (!(selector & ~SEGMENT_RPL_MASK)) return 0; table = get_current_gdt_ro(); if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { u16 ldt_selector = kvm_read_ldt(); if (!(ldt_selector & ~SEGMENT_RPL_MASK)) return 0; table = (struct desc_struct *)segment_base(ldt_selector); } v = get_desc_base(&table[selector >> 3]); return v; } #endif static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int i; if (vmx->host_state.loaded) return; vmx->host_state.loaded = 1; /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; } else { vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { vmcs_write16(HOST_GS_SELECTOR, 0); vmx->host_state.gs_ldt_reload_needed = 1; } #ifdef CONFIG_X86_64 savesegment(ds, vmx->host_state.ds_sel); savesegment(es, vmx->host_state.es_sel); #endif #ifdef CONFIG_X86_64 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); #else vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); #endif #ifdef CONFIG_X86_64 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (boot_cpu_has(X86_FEATURE_MPX)) rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); for (i = 0; i < vmx->save_nmsrs; ++i) kvm_set_shared_msr(vmx->guest_msrs[i].index, vmx->guest_msrs[i].data, vmx->guest_msrs[i].mask); } static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); #ifdef CONFIG_X86_64 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } #endif invalidate_tss_limit(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif if (vmx->host_state.msr_host_bndcfgs) wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); load_fixmap_gdt(raw_smp_processor_id()); } static void vmx_load_host_state(struct vcpu_vmx *vmx) { preempt_disable(); __vmx_load_host_state(vmx); preempt_enable(); } static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); struct pi_desc old, new; unsigned int dest; if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return; do { old.control = new.control = pi_desc->control; /* * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there * are two possible cases: * 1. After running 'pre_block', context switch * happened. For this case, 'sn' was set in * vmx_vcpu_put(), so we need to clear it here. * 2. After running 'pre_block', we were blocked, * and woken up by some other guy. For this case, * we don't need to do anything, 'pi_post_block' * will do everything for us. However, we cannot * check whether it is case #1 or case #2 here * (maybe, not needed), so we also clear sn here, * I think it is not a big deal. */ if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) { if (vcpu->cpu != cpu) { dest = cpu_physical_id(cpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; } /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; } /* Allow posting non-urgent interrupts */ new.sn = 0; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); } static void decache_tsc_multiplier(struct vcpu_vmx *vmx) { vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); } /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. */ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); bool already_loaded = vmx->loaded_vmcs->cpu == cpu; if (!already_loaded) { loaded_vmcs_clear(vmx->loaded_vmcs); local_irq_disable(); crash_disable_local_vmclear(cpu); /* * Read loaded_vmcs->cpu should be before fetching * loaded_vmcs->loaded_vmcss_on_cpu_link. * See the comments in __loaded_vmcs_clear(). */ smp_rmb(); list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); crash_enable_local_vmclear(cpu); local_irq_enable(); } if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); } if (!already_loaded) { void *gdt = get_current_gdt_ro(); unsigned long sysenter_esp; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); /* * Linux uses per-cpu TSS and GDT, so set these when switching * processors. See 22.2.4. */ vmcs_writel(HOST_TR_BASE, (unsigned long)this_cpu_ptr(&cpu_tss)); vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ /* * VM exits change the host TR limit to 0x67 after a VM * exit. This is okay, since 0x67 covers everything except * the IO bitmap and have have code to handle the IO bitmap * being lost after a VM exit. */ BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67); rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmx->loaded_vmcs->cpu = cpu; } /* Setup TSC multiplier */ if (kvm_has_tsc_control && vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) decache_tsc_multiplier(vmx); vmx_vcpu_pi_load(vcpu, cpu); vmx->host_pkru = read_pkru(); } static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return; /* Set SN when the vCPU is preempted */ if (vcpu->preempted) pi_set_sn(pi_desc); } static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { vmx_vcpu_pi_put(vcpu); __vmx_load_host_state(to_vmx(vcpu)); } static bool emulation_required(struct kvm_vcpu *vcpu) { return emulate_invalid_guest_state && !guest_state_valid(vcpu); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); /* * Return the cr0 value that a nested guest would read. This is a combination * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by * its hypervisor (cr0_read_shadow). */ static inline unsigned long nested_read_cr0(struct vmcs12 *fields) { return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | (fields->cr0_read_shadow & fields->cr0_guest_host_mask); } static inline unsigned long nested_read_cr4(struct vmcs12 *fields) { return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | (fields->cr4_read_shadow & fields->cr4_guest_host_mask); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags, save_rflags; if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); rflags = vmcs_readl(GUEST_RFLAGS); if (to_vmx(vcpu)->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; save_rflags = to_vmx(vcpu)->rmode.save_rflags; rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; } to_vmx(vcpu)->rflags = rflags; } return to_vmx(vcpu)->rflags; } static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { unsigned long old_rflags = vmx_get_rflags(vcpu); __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); to_vmx(vcpu)->rflags = rflags; if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx(vcpu)->rmode.save_rflags = rflags; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; } vmcs_writel(GUEST_RFLAGS, rflags); if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) to_vmx(vcpu)->emulation_required = emulation_required(vcpu); } static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); int ret = 0; if (interruptibility & GUEST_INTR_STATE_STI) ret |= KVM_X86_SHADOW_INT_STI; if (interruptibility & GUEST_INTR_STATE_MOV_SS) ret |= KVM_X86_SHADOW_INT_MOV_SS; return ret; } static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); u32 interruptibility = interruptibility_old; interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); if (mask & KVM_X86_SHADOW_INT_MOV_SS) interruptibility |= GUEST_INTR_STATE_MOV_SS; else if (mask & KVM_X86_SHADOW_INT_STI) interruptibility |= GUEST_INTR_STATE_STI; if ((interruptibility != interruptibility_old)) vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { unsigned long rip; rip = kvm_rip_read(vcpu); rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_rip_write(vcpu, rip); /* skipping an emulated instruction also counts */ vmx_set_interrupt_shadow(vcpu, 0); } static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, unsigned long exit_qual) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; u32 intr_info = nr | INTR_INFO_VALID_MASK; if (vcpu->arch.exception.has_error_code) { vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; intr_info |= INTR_INFO_DELIVER_CODE_MASK; } if (kvm_exception_is_soft(nr)) intr_info |= INTR_TYPE_SOFT_EXCEPTION; else intr_info |= INTR_TYPE_HARD_EXCEPTION; if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && vmx_get_nmi_mask(vcpu)) intr_info |= INTR_INFO_UNBLOCK_NMI; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); } /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. */ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; if (nr == PF_VECTOR) { if (vcpu->arch.exception.nested_apf) { *exit_qual = vcpu->arch.apf.nested_apf_token; return 1; } /* * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception. * The fix is to add the ancillary datum (CR2 or DR6) to structs * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 * can be written only when inject_pending_event runs. This should be * conditional on a new capability---if the capability is disabled, * kvm_multiple_exception would write the ancillary information to * CR2 or DR6, for backwards ABI-compatibility. */ if (nested_vmx_is_page_fault_vmexit(vmcs12, vcpu->arch.exception.error_code)) { *exit_qual = vcpu->arch.cr2; return 1; } } else { if (vmcs12->exception_bitmap & (1u << nr)) { if (nr == DB_VECTOR) *exit_qual = vcpu->arch.dr6; else *exit_qual = 0; return 1; } } return 0; } static void vmx_queue_exception(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned nr = vcpu->arch.exception.nr; bool has_error_code = vcpu->arch.exception.has_error_code; u32 error_code = vcpu->arch.exception.error_code; u32 intr_info = nr | INTR_INFO_VALID_MASK; if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; } if (vmx->rmode.vm86_active) { int inc_eip = 0; if (kvm_exception_is_soft(nr)) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } if (kvm_exception_is_soft(nr)) { vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); intr_info |= INTR_TYPE_SOFT_EXCEPTION; } else intr_info |= INTR_TYPE_HARD_EXCEPTION; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } static bool vmx_rdtscp_supported(void) { return cpu_has_vmx_rdtscp(); } static bool vmx_invpcid_supported(void) { return cpu_has_vmx_invpcid() && enable_ept; } /* * Swap MSR entry in host/guest MSR entry array. */ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) { struct shared_msr_entry tmp; tmp = vmx->guest_msrs[to]; vmx->guest_msrs[to] = vmx->guest_msrs[from]; vmx->guest_msrs[from] = tmp; } static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) { unsigned long *msr_bitmap; if (is_guest_mode(vcpu)) msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap; else if (cpu_has_secondary_exec_ctrls() && (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv; else msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv; } else { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic; else msr_bitmap = vmx_msr_bitmap_legacy_x2apic; } } else { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode; else msr_bitmap = vmx_msr_bitmap_legacy; } vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); } /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy * mode, as fiddling with msrs is very expensive. */ static void setup_msrs(struct vcpu_vmx *vmx) { int save_nmsrs, index; save_nmsrs = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) { index = __find_msr_index(vmx, MSR_SYSCALL_MASK); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_LSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_TSC_AUX); if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) move_msr_up(vmx, index, save_nmsrs++); /* * MSR_STAR is only needed on long mode guests, and only * if efer.sce is enabled. */ index = __find_msr_index(vmx, MSR_STAR); if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) move_msr_up(vmx, index, save_nmsrs++); } #endif index = __find_msr_index(vmx, MSR_EFER); if (index >= 0 && update_transition_efer(vmx, index)) move_msr_up(vmx, index, save_nmsrs++); vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(&vmx->vcpu); } /* * reads and returns guest's timestamp counter "register" * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 */ static u64 guest_read_tsc(struct kvm_vcpu *vcpu) { u64 host_tsc, tsc_offset; host_tsc = rdtsc(); tsc_offset = vmcs_read64(TSC_OFFSET); return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; } /* * writes 'offset' into guest's timestamp counter offset register */ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { if (is_guest_mode(vcpu)) { /* * We're here if L1 chose not to trap WRMSR to TSC. According * to the spec, this should set L1's TSC; The offset that L1 * set for L2 remains unchanged, and still needs to be added * to the newly set TSC to get L2's TSC. */ struct vmcs12 *vmcs12; /* recalculate vmcs02.TSC_OFFSET: */ vmcs12 = get_vmcs12(vcpu); vmcs_write64(TSC_OFFSET, offset + (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? vmcs12->tsc_offset : 0)); } else { trace_kvm_write_tsc_offset(vcpu->vcpu_id, vmcs_read64(TSC_OFFSET), offset); vmcs_write64(TSC_OFFSET, offset); } } /* * nested_vmx_allowed() checks whether a guest should be allowed to use VMX * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for * all guests if the "nested" module option is off, and can also be disabled * for a single guest by disabling its VMX cpuid bit. */ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) { return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); } /* * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be * returned for the various VMX controls MSRs when nested VMX is enabled. * The same values should also be used to verify that vmcs12 control fields are * valid during nested entry from L1 to L2. * Each of these control msrs has a low and high 32-bit half: A low bit is on * if the corresponding bit in the (32-bit) control field *must* be on, and a * bit in the high half is on if the corresponding bit in the control field * may be on. See also vmx_control_verify(). */ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) { /* * Note that as a general rule, the high half of the MSRs (bits in * the control fields which may be 1) should be initialized by the * intersection of the underlying hardware's MSR (i.e., features which * can be supported) and the list of features we want to expose - * because they are known to be properly supported in our code. * Also, usually, the low half of the MSRs (bits which must be 1) can * be set to 0, meaning that L1 may turn off any of these bits. The * reason is that if one of these bits is necessary, it will appear * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control * fields of vmcs01 and vmcs02, will turn these bits off - and * nested_vmx_exit_reflected() will not pass related exits to L1. * These rules have exceptions below. */ /* pin-based controls */ rdmsr(MSR_IA32_VMX_PINBASED_CTLS, vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high); vmx->nested.nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; vmx->nested.nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; if (kvm_vcpu_apicv_active(&vmx->vcpu)) vmx->nested.nested_vmx_pinbased_ctls_high |= PIN_BASED_POSTED_INTR; /* exit controls */ rdmsr(MSR_IA32_VMX_EXIT_CTLS, vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); vmx->nested.nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_exit_ctls_high &= #ifdef CONFIG_X86_64 VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; if (kvm_mpx_supported()) vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; /* entry controls */ rdmsr(MSR_IA32_VMX_ENTRY_CTLS, vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); vmx->nested.nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_entry_ctls_high &= #ifdef CONFIG_X86_64 VM_ENTRY_IA32E_MODE | #endif VM_ENTRY_LOAD_IA32_PAT; vmx->nested.nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); if (kvm_mpx_supported()) vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; /* cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); vmx->nested.nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_procbased_ctls_high &= CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; /* * We can allow some features even when not supported by the * hardware. For example, L1 can specify an MSR bitmap - and we * can use it to avoid exits to L1 - even when L0 runs L2 * without MSR bitmaps. */ vmx->nested.nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | CPU_BASED_USE_MSR_BITMAPS; /* We support free control of CR3 access interception. */ vmx->nested.nested_vmx_procbased_ctls_low &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); /* * secondary cpu-based controls. Do not include those that * depend on CPUID bits, they are added later by vmx_cpuid_update. */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); vmx->nested.nested_vmx_secondary_ctls_low = 0; vmx->nested.nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_DESC | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_WBINVD_EXITING; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; if (cpu_has_vmx_ept_execute_only()) vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXECUTE_ONLY_BIT; vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept; vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_1GB_PAGE_BIT; if (enable_ept_ad_bits) { vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_PML; vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT; } } else vmx->nested.nested_vmx_ept_caps = 0; if (cpu_has_vmx_vmfunc()) { vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_VMFUNC; /* * Advertise EPTP switching unconditionally * since we emulate it */ vmx->nested.nested_vmx_vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING; } /* * Old versions of KVM use the single-context version without * checking for support, so declare that it is supported even * though it is treated as global context. The alternative is * not failing the single-context invvpid, and it is worse. */ if (enable_vpid) { vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_VPID; vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | VMX_VPID_EXTENT_SUPPORTED_MASK; } else vmx->nested.nested_vmx_vpid_caps = 0; if (enable_unrestricted_guest) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_UNRESTRICTED_GUEST; /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; vmx->nested.nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | VMX_MISC_ACTIVITY_HLT; vmx->nested.nested_vmx_misc_high = 0; /* * This MSR reports some information about VMX support. We * should return information about the VMX we emulate for the * guest, and the VMCS structure we give it - not about the * VMX support of the underlying hardware. */ vmx->nested.nested_vmx_basic = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); if (cpu_has_vmx_basic_inout()) vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT; /* * These MSRs specify bits which the guest must keep fixed on * while L1 is in VMXON mode (in L1's root mode, or running an L2). * We picked the standard core2 setting. */ #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) #define VMXON_CR4_ALWAYSON X86_CR4_VMXE vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON; vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON; /* These MSRs specify bits which the guest must keep fixed off. */ rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1); rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1); /* highest index: VMX_PREEMPTION_TIMER_VALUE */ vmx->nested.nested_vmx_vmcs_enum = 0x2e; } /* * if fixed0[i] == 1: val[i] must be 1 * if fixed1[i] == 0: val[i] must be 0 */ static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1) { return ((val & fixed1) | fixed0) == val; } static inline bool vmx_control_verify(u32 control, u32 low, u32 high) { return fixed_bits_valid(control, low, high); } static inline u64 vmx_control_msr(u32 low, u32 high) { return low | ((u64)high << 32); } static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) { superset &= mask; subset &= mask; return (superset | subset) == superset; } static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) { const u64 feature_and_reserved = /* feature (except bit 48; see below) */ BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | /* reserved */ BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); u64 vmx_basic = vmx->nested.nested_vmx_basic; if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) return -EINVAL; /* * KVM does not emulate a version of VMX that constrains physical * addresses of VMX structures (e.g. VMCS) to 32-bits. */ if (data & BIT_ULL(48)) return -EINVAL; if (vmx_basic_vmcs_revision_id(vmx_basic) != vmx_basic_vmcs_revision_id(data)) return -EINVAL; if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) return -EINVAL; vmx->nested.nested_vmx_basic = data; return 0; } static int vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) { u64 supported; u32 *lowp, *highp; switch (msr_index) { case MSR_IA32_VMX_TRUE_PINBASED_CTLS: lowp = &vmx->nested.nested_vmx_pinbased_ctls_low; highp = &vmx->nested.nested_vmx_pinbased_ctls_high; break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: lowp = &vmx->nested.nested_vmx_procbased_ctls_low; highp = &vmx->nested.nested_vmx_procbased_ctls_high; break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: lowp = &vmx->nested.nested_vmx_exit_ctls_low; highp = &vmx->nested.nested_vmx_exit_ctls_high; break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: lowp = &vmx->nested.nested_vmx_entry_ctls_low; highp = &vmx->nested.nested_vmx_entry_ctls_high; break; case MSR_IA32_VMX_PROCBASED_CTLS2: lowp = &vmx->nested.nested_vmx_secondary_ctls_low; highp = &vmx->nested.nested_vmx_secondary_ctls_high; break; default: BUG(); } supported = vmx_control_msr(*lowp, *highp); /* Check must-be-1 bits are still 1. */ if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) return -EINVAL; /* Check must-be-0 bits are still 0. */ if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) return -EINVAL; *lowp = data; *highp = data >> 32; return 0; } static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) { const u64 feature_and_reserved_bits = /* feature */ BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | /* reserved */ GENMASK_ULL(13, 9) | BIT_ULL(31); u64 vmx_misc; vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) return -EINVAL; if ((vmx->nested.nested_vmx_pinbased_ctls_high & PIN_BASED_VMX_PREEMPTION_TIMER) && vmx_misc_preemption_timer_rate(data) != vmx_misc_preemption_timer_rate(vmx_misc)) return -EINVAL; if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) return -EINVAL; if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) return -EINVAL; if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) return -EINVAL; vmx->nested.nested_vmx_misc_low = data; vmx->nested.nested_vmx_misc_high = data >> 32; return 0; } static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) { u64 vmx_ept_vpid_cap; vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps, vmx->nested.nested_vmx_vpid_caps); /* Every bit is either reserved or a feature bit. */ if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) return -EINVAL; vmx->nested.nested_vmx_ept_caps = data; vmx->nested.nested_vmx_vpid_caps = data >> 32; return 0; } static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) { u64 *msr; switch (msr_index) { case MSR_IA32_VMX_CR0_FIXED0: msr = &vmx->nested.nested_vmx_cr0_fixed0; break; case MSR_IA32_VMX_CR4_FIXED0: msr = &vmx->nested.nested_vmx_cr4_fixed0; break; default: BUG(); } /* * 1 bits (which indicates bits which "must-be-1" during VMX operation) * must be 1 in the restored value. */ if (!is_bitwise_subset(data, *msr, -1ULL)) return -EINVAL; *msr = data; return 0; } /* * Called when userspace is restoring VMX MSRs. * * Returns 0 on success, non-0 otherwise. */ static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { struct vcpu_vmx *vmx = to_vmx(vcpu); switch (msr_index) { case MSR_IA32_VMX_BASIC: return vmx_restore_vmx_basic(vmx, data); case MSR_IA32_VMX_PINBASED_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS: case MSR_IA32_VMX_EXIT_CTLS: case MSR_IA32_VMX_ENTRY_CTLS: /* * The "non-true" VMX capability MSRs are generated from the * "true" MSRs, so we do not support restoring them directly. * * If userspace wants to emulate VMX_BASIC[55]=0, userspace * should restore the "true" MSRs with the must-be-1 bits * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND * DEFAULT SETTINGS". */ return -EINVAL; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS2: return vmx_restore_control_msr(vmx, msr_index, data); case MSR_IA32_VMX_MISC: return vmx_restore_vmx_misc(vmx, data); case MSR_IA32_VMX_CR0_FIXED0: case MSR_IA32_VMX_CR4_FIXED0: return vmx_restore_fixed0_msr(vmx, msr_index, data); case MSR_IA32_VMX_CR0_FIXED1: case MSR_IA32_VMX_CR4_FIXED1: /* * These MSRs are generated based on the vCPU's CPUID, so we * do not support restoring them directly. */ return -EINVAL; case MSR_IA32_VMX_EPT_VPID_CAP: return vmx_restore_vmx_ept_vpid_cap(vmx, data); case MSR_IA32_VMX_VMCS_ENUM: vmx->nested.nested_vmx_vmcs_enum = data; return 0; default: /* * The rest of the VMX capability MSRs do not support restore. */ return -EINVAL; } } /* Returns 0 on success, non-0 otherwise. */ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { struct vcpu_vmx *vmx = to_vmx(vcpu); switch (msr_index) { case MSR_IA32_VMX_BASIC: *pdata = vmx->nested.nested_vmx_basic; break; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high); if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_EXIT_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); if (msr_index == MSR_IA32_VMX_EXIT_CTLS) *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_ENTRY_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_MISC: *pdata = vmx_control_msr( vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); break; case MSR_IA32_VMX_CR0_FIXED0: *pdata = vmx->nested.nested_vmx_cr0_fixed0; break; case MSR_IA32_VMX_CR0_FIXED1: *pdata = vmx->nested.nested_vmx_cr0_fixed1; break; case MSR_IA32_VMX_CR4_FIXED0: *pdata = vmx->nested.nested_vmx_cr4_fixed0; break; case MSR_IA32_VMX_CR4_FIXED1: *pdata = vmx->nested.nested_vmx_cr4_fixed1; break; case MSR_IA32_VMX_VMCS_ENUM: *pdata = vmx->nested.nested_vmx_vmcs_enum; break; case MSR_IA32_VMX_PROCBASED_CTLS2: *pdata = vmx_control_msr( vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: *pdata = vmx->nested.nested_vmx_ept_caps | ((u64)vmx->nested.nested_vmx_vpid_caps << 32); break; case MSR_IA32_VMX_VMFUNC: *pdata = vmx->nested.nested_vmx_vmfunc_controls; break; default: return 1; } return 0; } static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, uint64_t val) { uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; return !(val & ~valid_bits); } /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct shared_msr_entry *msr; switch (msr_info->index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: msr_info->data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: msr_info->data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(to_vmx(vcpu)); msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_info); case MSR_IA32_TSC: msr_info->data = guest_read_tsc(vcpu); break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_EIP: msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); break; case MSR_IA32_SYSENTER_ESP: msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_MCG_EXT_CTL: if (!msr_info->host_initiated && !(to_vmx(vcpu)->msr_ia32_feature_control & FEATURE_CONTROL_LMCE)) return 1; msr_info->data = vcpu->arch.mcg_ext_ctl; break; case MSR_IA32_FEATURE_CONTROL: msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data); case MSR_IA32_XSS: if (!vmx_xsaves_supported()) return 1; msr_info->data = vcpu->arch.ia32_xss; break; case MSR_TSC_AUX: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(to_vmx(vcpu), msr_info->index); if (msr) { msr_info->data = msr->data; break; } return kvm_get_msr_common(vcpu, msr_info); } return 0; } static void vmx_leave_nested(struct kvm_vcpu *vcpu); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr; int ret = 0; u32 msr_index = msr_info->index; u64 data = msr_info->data; switch (msr_index) { case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); break; #ifdef CONFIG_X86_64 case MSR_FS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_FS_BASE, data); break; case MSR_GS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_GS_BASE, data); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(vmx); vmx->msr_guest_kernel_gs_base = data; break; #endif case MSR_IA32_SYSENTER_CS: vmcs_write32(GUEST_SYSENTER_CS, data); break; case MSR_IA32_SYSENTER_EIP: vmcs_writel(GUEST_SYSENTER_EIP, data); break; case MSR_IA32_SYSENTER_ESP: vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) return 1; if (is_noncanonical_address(data & PAGE_MASK, vcpu) || (data & MSR_IA32_BNDCFGS_RSVD)) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) return 1; vmcs_write64(GUEST_IA32_PAT, data); vcpu->arch.pat = data; break; } ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_TSC_ADJUST: ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_MCG_EXT_CTL: if ((!msr_info->host_initiated && !(to_vmx(vcpu)->msr_ia32_feature_control & FEATURE_CONTROL_LMCE)) || (data & ~MCG_EXT_CTL_LMCE_EN)) return 1; vcpu->arch.mcg_ext_ctl = data; break; case MSR_IA32_FEATURE_CONTROL: if (!vmx_feature_control_msr_valid(vcpu, data) || (to_vmx(vcpu)->msr_ia32_feature_control & FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) return 1; vmx->msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!msr_info->host_initiated) return 1; /* they are read-only */ if (!nested_vmx_allowed(vcpu)) return 1; return vmx_set_vmx_msr(vcpu, msr_index, data); case MSR_IA32_XSS: if (!vmx_xsaves_supported()) return 1; /* * The only supported bit as of Skylake is bit 8, but * it is not supported on KVM. */ if (data != 0) return 1; vcpu->arch.ia32_xss = data; if (vcpu->arch.ia32_xss != host_xss) add_atomic_switch_msr(vmx, MSR_IA32_XSS, vcpu->arch.ia32_xss, host_xss); else clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; case MSR_TSC_AUX: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(vmx, msr_index); if (msr) { u64 old_msr_data = msr->data; msr->data = data; if (msr - vmx->guest_msrs < vmx->save_nmsrs) { preempt_disable(); ret = kvm_set_shared_msr(msr->index, msr->data, msr->mask); preempt_enable(); if (ret) msr->data = old_msr_data; } break; } ret = kvm_set_msr_common(vcpu, msr_info); } return ret; } static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); switch (reg) { case VCPU_REGS_RSP: vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); break; case VCPU_REGS_RIP: vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); break; case VCPU_EXREG_PDPTR: if (enable_ept) ept_save_pdptrs(vcpu); break; default: break; } } static __init int cpu_has_kvm_support(void) { return cpu_has_vmx(); } static __init int vmx_disabled_by_bios(void) { u64 msr; rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); if (msr & FEATURE_CONTROL_LOCKED) { /* launched w/ TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && tboot_enabled()) return 1; /* launched w/o TXT and VMX only enabled w/ TXT */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && !tboot_enabled()) { printk(KERN_WARNING "kvm: disable TXT in the BIOS or " "activate TXT before enabling KVM\n"); return 1; } /* launched w/o TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && !tboot_enabled()) return 1; } return 0; } static void kvm_cpu_vmxon(u64 addr) { cr4_set_bits(X86_CR4_VMXE); intel_pt_handle_vmx(1); asm volatile (ASM_VMX_VMXON_RAX : : "a"(&addr), "m"(addr) : "memory", "cc"); } static int hardware_enable(void) { int cpu = raw_smp_processor_id(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 old, test_bits; if (cr4_read_shadow() & X86_CR4_VMXE) return -EBUSY; INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); /* * Now we can enable the vmclear operation in kdump * since the loaded_vmcss_on_cpu list on this cpu * has been initialized. * * Though the cpu is not in VMX operation now, there * is no problem to enable the vmclear operation * for the loaded_vmcss_on_cpu list is empty! */ crash_enable_local_vmclear(cpu); rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; if (tboot_enabled()) test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; if ((old & test_bits) != test_bits) { /* enable and lock */ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); } kvm_cpu_vmxon(phys_addr); ept_sync_global(); return 0; } static void vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v, *n; list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) __loaded_vmcs_clear(v); } /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() * tricks. */ static void kvm_cpu_vmxoff(void) { asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); } static void hardware_disable(void) { vmclear_local_loaded_vmcss(); kvm_cpu_vmxoff(); } static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { u32 vmx_msr_low, vmx_msr_high; u32 ctl = ctl_min | ctl_opt; rdmsr(msr, vmx_msr_low, vmx_msr_high); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ /* Ensure minimum (required) set of control bits are supported. */ if (ctl_min & ~ctl) return -EIO; *result = ctl; return 0; } static __init bool allow_1_setting(u32 msr, u32 ctl) { u32 vmx_msr_low, vmx_msr_high; rdmsr(msr, vmx_msr_low, vmx_msr_high); return vmx_msr_high & ctl; } static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; u32 min, opt, min2, opt2; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; u32 _vmexit_control = 0; u32 _vmentry_control = 0; min = CPU_BASED_HLT_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_INVLPG_EXITING | CPU_BASED_RDPMC_EXITING; if (!kvm_mwait_in_guest()) min |= CPU_BASED_MWAIT_EXITING | CPU_BASED_MONITOR_EXITING; opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, &_cpu_based_exec_control) < 0) return -EIO; #ifdef CONFIG_X86_64 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & ~CPU_BASED_CR8_STORE_EXITING; #endif if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { min2 = 0; opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_RDSEED | SECONDARY_EXEC_RDRAND | SECONDARY_EXEC_ENABLE_PML | SECONDARY_EXEC_TSC_SCALING | SECONDARY_EXEC_ENABLE_VMFUNC; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) return -EIO; } #ifndef CONFIG_X86_64 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_2nd_exec_control &= ~( SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT enabled */ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_INVLPG_EXITING); rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, vmx_capability.ept, vmx_capability.vpid); } min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_CLEAR_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; if (cpu_has_broken_vmx_preemption_timer()) _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; min = VM_ENTRY_LOAD_DEBUG_CONTROLS; opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, &_vmentry_control) < 0) return -EIO; rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) return -EIO; #ifdef CONFIG_X86_64 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ if (vmx_msr_high & (1u<<16)) return -EIO; #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ if (((vmx_msr_high >> 18) & 15) != 6) return -EIO; vmcs_conf->size = vmx_msr_high & 0x1fff; vmcs_conf->order = get_order(vmcs_conf->size); vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmentry_ctrl = _vmentry_control; cpu_has_load_ia32_efer = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_EFER) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_EFER); cpu_has_load_perf_global_ctrl = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); /* * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * but due to errata below it can't be used. Workaround is to use * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] * * AAK155 (model 26) * AAP115 (model 30) * AAT100 (model 37) * BC86,AAY89,BD102 (model 44) * BA97 (model 46) * */ if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { switch (boot_cpu_data.x86_model) { case 26: case 30: case 37: case 44: case 46: cpu_has_load_perf_global_ctrl = false; printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " "does not work properly. Using workaround\n"); break; default: break; } } if (boot_cpu_has(X86_FEATURE_XSAVES)) rdmsrl(MSR_IA32_XSS, host_xss); return 0; } static struct vmcs *alloc_vmcs_cpu(int cpu) { int node = cpu_to_node(cpu); struct page *pages; struct vmcs *vmcs; pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ return vmcs; } static struct vmcs *alloc_vmcs(void) { return alloc_vmcs_cpu(raw_smp_processor_id()); } static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); } /* * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded */ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) { if (!loaded_vmcs->vmcs) return; loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; WARN_ON(loaded_vmcs->shadow_vmcs != NULL); } static void free_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { free_vmcs(per_cpu(vmxarea, cpu)); per_cpu(vmxarea, cpu) = NULL; } } enum vmcs_field_type { VMCS_FIELD_TYPE_U16 = 0, VMCS_FIELD_TYPE_U64 = 1, VMCS_FIELD_TYPE_U32 = 2, VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 }; static inline int vmcs_field_type(unsigned long field) { if (0x1 & field) /* the *_HIGH fields are all 32 bit */ return VMCS_FIELD_TYPE_U32; return (field >> 13) & 0x3 ; } static inline int vmcs_field_readonly(unsigned long field) { return (((field >> 10) & 0x3) == 1); } static void init_vmcs_shadow_fields(void) { int i, j; /* No checks for read only fields yet */ for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: if (!kvm_mpx_supported()) continue; break; default: break; } if (j < i) shadow_read_write_fields[j] = shadow_read_write_fields[i]; j++; } max_shadow_read_write_fields = j; /* shadowed fields guest access without vmexit */ for (i = 0; i < max_shadow_read_write_fields; i++) { unsigned long field = shadow_read_write_fields[i]; clear_bit(field, vmx_vmwrite_bitmap); clear_bit(field, vmx_vmread_bitmap); if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) { clear_bit(field + 1, vmx_vmwrite_bitmap); clear_bit(field + 1, vmx_vmread_bitmap); } } for (i = 0; i < max_shadow_read_only_fields; i++) { unsigned long field = shadow_read_only_fields[i]; clear_bit(field, vmx_vmread_bitmap); if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) clear_bit(field + 1, vmx_vmread_bitmap); } } static __init int alloc_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { struct vmcs *vmcs; vmcs = alloc_vmcs_cpu(cpu); if (!vmcs) { free_kvm_area(); return -ENOMEM; } per_cpu(vmxarea, cpu) = vmcs; } return 0; } static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) { if (!emulate_invalid_guest_state) { /* * CS and SS RPL should be equal during guest entry according * to VMX spec, but in reality it is not always so. Since vcpu * is in the middle of the transition from real mode to * protected mode it is safe to assume that RPL 0 is a good * default value. */ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) save->selector &= ~SEGMENT_RPL_MASK; save->dpl = save->selector & SEGMENT_RPL_MASK; save->s = 1; } vmx_set_segment(vcpu, save, seg); } static void enter_pmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Update real mode segment cache. It may be not up-to-date if sement * register was written while vcpu was in a guest mode. */ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 0; vmx_segment_cache_clear(vmx); vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); flags = vmcs_readl(GUEST_RFLAGS); flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); } static void fix_rmode_seg(int seg, struct kvm_segment *save) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; struct kvm_segment var = *save; var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; if (!emulate_invalid_guest_state) { var.selector = var.base >> 4; var.base = var.base & 0xffff0; var.limit = 0xffff; var.g = 0; var.db = 0; var.present = 1; var.s = 1; var.l = 0; var.unusable = 0; var.type = 0x3; var.avl = 0; if (save->base & 0xf) printk_once(KERN_WARNING "kvm: segment base is not " "paragraph aligned when entering " "protected mode (seg=%d)", seg); } vmcs_write16(sf->selector, var.selector); vmcs_writel(sf->base, var.base); vmcs_write32(sf->limit, var.limit); vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); } static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 1; /* * Very old userspace does not call KVM_SET_TSS_ADDR before entering * vcpu. Warn the user that an update is overdue. */ if (!vcpu->kvm->arch.tss_addr) printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " "called before entering vcpu\n"); vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); vmx->rmode.save_rflags = flags; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); kvm_mmu_reset_context(vcpu); } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); if (!msr) return; /* * Force kernel_gs_base reloading before EFER changes, as control * of this msr depends on is_long_mode(). */ vmx_load_host_state(to_vmx(vcpu)); vcpu->arch.efer = efer; if (efer & EFER_LMA) { vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer; } else { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } setup_msrs(vmx); } #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) { u32 guest_tr_ar; vmx_segment_cache_clear(to_vmx(vcpu)); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { pr_debug_ratelimited("%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, (guest_tr_ar & ~VMX_AR_TYPE_MASK) | VMX_AR_TYPE_BUSY_64_TSS); } vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); } static void exit_lmode(struct kvm_vcpu *vcpu) { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); } #endif static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) { if (enable_ept) { if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); } else { vpid_sync_context(vpid); } } static void vmx_flush_tlb(struct kvm_vcpu *vcpu) { __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); } static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) { if (enable_ept) vmx_flush_tlb(vcpu); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; vcpu->arch.cr0 &= ~cr0_guest_owned_bits; vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; } static void vmx_decache_cr3(struct kvm_vcpu *vcpu) { if (enable_ept && is_paging(vcpu)) vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; vcpu->arch.cr4 &= ~cr4_guest_owned_bits; vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; } static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty)) return; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); } } static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); } static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1; struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_UNRESTRICTED_GUEST && nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); return fixed_bits_valid(val, fixed0, fixed1); } static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1; return fixed_bits_valid(val, fixed0, fixed1); } static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1; return fixed_bits_valid(val, fixed0, fixed1); } /* No difference in the restrictions on guest and host CR4 in VMX operation. */ #define nested_guest_cr4_valid nested_cr4_valid #define nested_host_cr4_valid nested_cr4_valid static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | (CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } if (!(cr0 & X86_CR0_WP)) *hw_cr0 &= ~X86_CR0_WP; } static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) enter_rmode(vcpu); } #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) enter_lmode(vcpu); if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) exit_lmode(vcpu); } #endif if (enable_ept) ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; /* depends on vcpu->arch.cr0 to be set to a new value */ vmx->emulation_required = emulation_required(vcpu); } static int get_ept_level(struct kvm_vcpu *vcpu) { if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) return 5; return 4; } static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) { u64 eptp = VMX_EPTP_MT_WB; eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; if (enable_ept_ad_bits && (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) eptp |= VMX_EPTP_AD_ENABLE_BIT; eptp |= (root_hpa & PAGE_MASK); return eptp; } static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { unsigned long guest_cr3; u64 eptp; guest_cr3 = cr3; if (enable_ept) { eptp = construct_eptp(vcpu, cr3); vmcs_write64(EPT_POINTER, eptp); if (is_paging(vcpu) || is_guest_mode(vcpu)) guest_cr3 = kvm_read_cr3(vcpu); else guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; ept_load_pdptrs(vcpu); } vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { /* * Pass through host's Machine Check Enable value to hw_cr4, which * is in force while we are in guest mode. Do not let guests control * this bit, even if host CR4.MCE == 0. */ unsigned long hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE) | (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); if (cr4 & X86_CR4_VMXE) { /* * To use VMXON (and later other VMX instructions), a guest * must first be able to turn on cr4.VMXE (see handle_vmon()). * So basically the check on whether to allow nested VMX * is here. */ if (!nested_vmx_allowed(vcpu)) return 1; } if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) return 1; vcpu->arch.cr4 = cr4; if (enable_ept) { if (!is_paging(vcpu)) { hw_cr4 &= ~X86_CR4_PAE; hw_cr4 |= X86_CR4_PSE; } else if (!(cr4 & X86_CR4_PAE)) { hw_cr4 &= ~X86_CR4_PAE; } } if (!enable_unrestricted_guest && !is_paging(vcpu)) /* * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in * hardware. To emulate this behavior, SMEP/SMAP/PKU needs * to be manually disabled when guest switches to non-paging * mode. * * If !enable_unrestricted_guest, the CPU is always running * with CR0.PG=1 and CR4 needs to be modified. * If enable_unrestricted_guest, the CPU automatically * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. */ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); return 0; } static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 ar; if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { *var = vmx->rmode.segs[seg]; if (seg == VCPU_SREG_TR || var->selector == vmx_read_guest_seg_selector(vmx, seg)) return; var->base = vmx_read_guest_seg_base(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); return; } var->base = vmx_read_guest_seg_base(vmx, seg); var->limit = vmx_read_guest_seg_limit(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); ar = vmx_read_guest_seg_ar(vmx, seg); var->unusable = (ar >> 16) & 1; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; /* * Some userspaces do not preserve unusable property. Since usable * segment has to be present according to VMX spec we can use present * property to amend userspace bug by making unusable segment always * nonpresent. vmx_segment_access_rights() already marks nonpresent * segment as unusable. */ var->present = !var->unusable; var->avl = (ar >> 12) & 1; var->l = (ar >> 13) & 1; var->db = (ar >> 14) & 1; var->g = (ar >> 15) & 1; } static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment s; if (to_vmx(vcpu)->rmode.vm86_active) { vmx_get_segment(vcpu, &s, seg); return s.base; } return vmx_read_guest_seg_base(to_vmx(vcpu), seg); } static int vmx_get_cpl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (unlikely(vmx->rmode.vm86_active)) return 0; else { int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); return VMX_AR_DPL(ar); } } static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; if (var->unusable || !var->present) ar = 1 << 16; else { ar = var->type & 15; ar |= (var->s & 1) << 4; ar |= (var->dpl & 3) << 5; ar |= (var->present & 1) << 7; ar |= (var->avl & 1) << 12; ar |= (var->l & 1) << 13; ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } return ar; } static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; vmx_segment_cache_clear(vmx); if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { vmx->rmode.segs[seg] = *var; if (seg == VCPU_SREG_TR) vmcs_write16(sf->selector, var->selector); else if (var->s) fix_rmode_seg(seg, &vmx->rmode.segs[seg]); goto out; } vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); /* * Fix the "Accessed" bit in AR field of segment registers for older * qemu binaries. * IA32 arch specifies that at the time of processor reset the * "Accessed" bit in the AR field of segment registers is 1. And qemu * is setting it to 0 in the userland code. This causes invalid guest * state vmexit when "unrestricted guest" mode is turned on. * Fix for this setup issue in cpu_reset is being pushed in the qemu * tree. Newer qemu binaries with that qemu fix would not need this * kvm hack. */ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) var->type |= 0x1; /* Accessed */ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); out: vmx->emulation_required = emulation_required(vcpu); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); *db = (ar >> 14) & 1; *l = (ar >> 13) & 1; } static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_IDTR_LIMIT); dt->address = vmcs_readl(GUEST_IDTR_BASE); } static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_IDTR_LIMIT, dt->size); vmcs_writel(GUEST_IDTR_BASE, dt->address); } static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_GDTR_LIMIT); dt->address = vmcs_readl(GUEST_GDTR_BASE); } static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_GDTR_LIMIT, dt->size); vmcs_writel(GUEST_GDTR_BASE, dt->address); } static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; u32 ar; vmx_get_segment(vcpu, &var, seg); var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) return false; if (var.limit != 0xffff) return false; if (ar != 0xf3) return false; return true; } static bool code_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment cs; unsigned int cs_rpl; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SEGMENT_RPL_MASK; if (cs.unusable) return false; if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; } else { if (cs.dpl != cs_rpl) return false; } if (!cs.present) return false; /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ return true; } static bool stack_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ss; unsigned int ss_rpl; vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SEGMENT_RPL_MASK; if (ss.unusable) return true; if (ss.type != 3 && ss.type != 7) return false; if (!ss.s) return false; if (ss.dpl != ss_rpl) /* DPL != RPL */ return false; if (!ss.present) return false; return true; } static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; unsigned int rpl; vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SEGMENT_RPL_MASK; if (var.unusable) return true; if (!var.s) return false; if (!var.present) return false; if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } /* TODO: Add other members to kvm_segment_field to allow checking for other access * rights flags */ return true; } static bool tr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment tr; vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); if (tr.unusable) return false; if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; return true; } static bool ldtr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ldtr; vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); if (ldtr.unusable) return true; if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) return false; if (!ldtr.present) return false; return true; } static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ss; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); return ((cs.selector & SEGMENT_RPL_MASK) == (ss.selector & SEGMENT_RPL_MASK)); } /* * Check if guest state is valid. Returns true if valid, false if * not. * We assume that registers are always usable */ static bool guest_state_valid(struct kvm_vcpu *vcpu) { if (enable_unrestricted_guest) return true; /* real mode guest state checks */ if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) return false; } else { /* protected mode guest state checks */ if (!cs_ss_rpl_check(vcpu)) return false; if (!code_segment_valid(vcpu)) return false; if (!stack_segment_valid(vcpu)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_GS)) return false; if (!tr_valid(vcpu)) return false; if (!ldtr_valid(vcpu)) return false; } /* TODO: * - Add checks on RIP * - Add checks on RFLAGS */ return true; } static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) { return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); } static int init_rmode_tss(struct kvm *kvm) { gfn_t fn; u16 data = 0; int idx, r; idx = srcu_read_lock(&kvm->srcu); fn = kvm->arch.tss_addr >> PAGE_SHIFT; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; r = kvm_write_guest_page(kvm, fn++, &data, TSS_IOPB_BASE_OFFSET, sizeof(u16)); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = ~0; r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, sizeof(u8)); out: srcu_read_unlock(&kvm->srcu, idx); return r; } static int init_rmode_identity_map(struct kvm *kvm) { int i, idx, r = 0; kvm_pfn_t identity_map_pfn; u32 tmp; if (!enable_ept) return 0; /* Protect kvm->arch.ept_identity_pagetable_done. */ mutex_lock(&kvm->slots_lock); if (likely(kvm->arch.ept_identity_pagetable_done)) goto out2; identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; r = alloc_identity_pagetable(kvm); if (r < 0) goto out2; idx = srcu_read_lock(&kvm->srcu); r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); if (r < 0) goto out; /* Set up identity-mapping pagetable for EPT in real mode */ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); r = kvm_write_guest_page(kvm, identity_map_pfn, &tmp, i * sizeof(tmp), sizeof(tmp)); if (r < 0) goto out; } kvm->arch.ept_identity_pagetable_done = true; out: srcu_read_unlock(&kvm->srcu, idx); out2: mutex_unlock(&kvm->slots_lock); return r; } static void seg_setup(int seg) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; unsigned int ar; vmcs_write16(sf->selector, 0); vmcs_writel(sf->base, 0); vmcs_write32(sf->limit, 0xffff); ar = 0x93; if (seg == VCPU_SREG_CS) ar |= 0x08; /* code segment */ vmcs_write32(sf->ar_bytes, ar); } static int alloc_apic_access_page(struct kvm *kvm) { struct page *page; int r = 0; mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) goto out; r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); if (r) goto out; page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); if (is_error_page(page)) { r = -EFAULT; goto out; } /* * Do not pin the page in memory, so that memory hot-unplug * is able to migrate it. */ put_page(page); kvm->arch.apic_access_page_done = true; out: mutex_unlock(&kvm->slots_lock); return r; } static int alloc_identity_pagetable(struct kvm *kvm) { /* Called with kvm->slots_lock held. */ int r = 0; BUG_ON(kvm->arch.ept_identity_pagetable_done); r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, kvm->arch.ept_identity_map_addr, PAGE_SIZE); return r; } static int allocate_vpid(void) { int vpid; if (!enable_vpid) return 0; spin_lock(&vmx_vpid_lock); vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); if (vpid < VMX_NR_VPIDS) __set_bit(vpid, vmx_vpid_bitmap); else vpid = 0; spin_unlock(&vmx_vpid_lock); return vpid; } static void free_vpid(int vpid) { if (!enable_vpid || vpid == 0) return; spin_lock(&vmx_vpid_lock); __clear_bit(vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); } #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __clear_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __clear_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __clear_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __clear_bit(msr, msr_bitmap + 0xc00 / f); } } /* * If a msr is allowed by L0, we should check whether it is allowed by L1. * The corresponding bit will be cleared unless both of L0 and L1 allow it. */ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_nested, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) { WARN_ON(1); return; } /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R && !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) /* read-low */ __clear_bit(msr, msr_bitmap_nested + 0x000 / f); if (type & MSR_TYPE_W && !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) /* write-low */ __clear_bit(msr, msr_bitmap_nested + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R && !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) /* read-high */ __clear_bit(msr, msr_bitmap_nested + 0x400 / f); if (type & MSR_TYPE_W && !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) /* write-high */ __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); } } static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) { if (!longmode_only) __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr, MSR_TYPE_R | MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr, MSR_TYPE_R | MSR_TYPE_W); } static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active) { if (apicv_active) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv, msr, type); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv, msr, type); } else { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, type); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, type); } } static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) { return enable_apicv; } static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); gfn_t gfn; /* * Don't need to mark the APIC access page dirty; it is never * written to by the CPU during APIC virtualization. */ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; kvm_vcpu_mark_page_dirty(vcpu, gfn); } if (nested_cpu_has_posted_intr(vmcs12)) { gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; kvm_vcpu_mark_page_dirty(vcpu, gfn); } } static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; void *vapic_page; u16 status; if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) return; vmx->nested.pi_pending = false; if (!pi_test_and_clear_on(vmx->nested.pi_desc)) return; max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); if (max_irr != 256) { vapic_page = kmap(vmx->nested.virtual_apic_page); __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); kunmap(vmx->nested.virtual_apic_page); status = vmcs_read16(GUEST_INTR_STATUS); if ((u8)max_irr > ((u8)status & 0xff)) { status &= ~0xff; status |= (u8)max_irr; vmcs_write16(GUEST_INTR_STATUS, status); } } nested_mark_vmcs12_pages_dirty(vcpu); } static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, bool nested) { #ifdef CONFIG_SMP int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; if (vcpu->mode == IN_GUEST_MODE) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently, we don't support urgent interrupt, * all interrupts are recognized as non-urgent * interrupt, so we cannot post interrupts when * 'SN' is set. * * If the vcpu is in guest mode, it means it is * running instead of being scheduled out and * waiting in the run queue, and that's the only * case when 'SN' is set currently, warning if * 'SN' is set. */ WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); return true; } #endif return false; } static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { /* the PIR and ON have been set by L1. */ kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } return -1; } /* * Send interrupt to vcpu via posted interrupt way. * 1. If target vcpu is running(non-root mode), send posted interrupt * notification to vcpu and hardware will sync PIR to vIRR atomically. * 2. If target vcpu isn't running(root mode), kick it to pick up the * interrupt from PIR in next vmentry. */ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vmx_deliver_nested_posted_interrupt(vcpu, vector); if (!r) return; if (pi_test_and_set_pir(vector, &vmx->pi_desc)) return; /* If a previous notification has sent the IPI, nothing to do. */ if (pi_test_and_set_on(&vmx->pi_desc)) return; if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); } /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. * Note that host-state that does change is set elsewhere. E.g., host-state * that is set differently for each CPU is set in vmx_vcpu_load(), not here. */ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) { u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; unsigned long cr0, cr3, cr4; cr0 = read_cr0(); WARN_ON(cr0 & X86_CR0_TS); vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ /* * Save the most likely value for this task's CR3 in the VMCS. * We can't use __get_current_cr3_fast() because we're not atomic. */ cr3 = __read_cr3(); vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ vmx->host_state.vmcs_host_cr3 = cr3; /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = cr4_read_shadow(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ vmx->host_state.vmcs_host_cr4 = cr4; vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 /* * Load null selectors, so we can avoid reloading them in * __vmx_load_host_state(), in case userspace uses the null selectors * too (the expected case). */ vmcs_write16(HOST_DS_SELECTOR, 0); vmcs_write16(HOST_ES_SELECTOR, 0); #else vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #endif vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } } static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; if (is_guest_mode(&vmx->vcpu)) vmx->vcpu.arch.cr4_guest_owned_bits &= ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); } static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) { u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; if (!kvm_vcpu_apicv_active(&vmx->vcpu)) pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; /* Enable the preemption timer dynamically */ pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; return pin_based_exec_ctrl; } static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); if (cpu_has_secondary_exec_ctrls()) { if (kvm_vcpu_apicv_active(vcpu)) vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); else vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); } if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(vcpu); } static u32 vmx_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_exec_ctrl; if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) exec_control &= ~CPU_BASED_MOV_DR_EXITING; if (!cpu_need_tpr_shadow(&vmx->vcpu)) { exec_control &= ~CPU_BASED_TPR_SHADOW; #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING; #endif } if (!enable_ept) exec_control |= CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_INVLPG_EXITING; return exec_control; } static bool vmx_rdrand_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDRAND; } static bool vmx_rdseed_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDSEED; } static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) { struct kvm_vcpu *vcpu = &vmx->vcpu; u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; if (!cpu_need_virtualize_apic_accesses(vcpu)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; if (!enable_ept) { exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; enable_unrestricted_guest = 0; /* Enable INVPCID for non-ept guests may cause performance regression. */ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; } if (!enable_unrestricted_guest) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; if (!kvm_vcpu_apicv_active(vcpu)) exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD (handle_vmptrld). We can NOT enable shadow_vmcs here because we don't have yet a current VMCS12 */ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; if (!enable_pml) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; if (vmx_xsaves_supported()) { /* Exposing XSAVES only when XSAVE is exposed */ bool xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); if (!xsaves_enabled) exec_control &= ~SECONDARY_EXEC_XSAVES; if (nested) { if (xsaves_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_XSAVES; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_XSAVES; } } if (vmx_rdtscp_supported()) { bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); if (!rdtscp_enabled) exec_control &= ~SECONDARY_EXEC_RDTSCP; if (nested) { if (rdtscp_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_RDTSCP; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_RDTSCP; } } if (vmx_invpcid_supported()) { /* Exposing INVPCID only when PCID is exposed */ bool invpcid_enabled = guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && guest_cpuid_has(vcpu, X86_FEATURE_PCID); if (!invpcid_enabled) { exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); } if (nested) { if (invpcid_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_INVPCID; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_ENABLE_INVPCID; } } if (vmx_rdrand_supported()) { bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); if (rdrand_enabled) exec_control &= ~SECONDARY_EXEC_RDRAND; if (nested) { if (rdrand_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_RDRAND; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_RDRAND; } } if (vmx_rdseed_supported()) { bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); if (rdseed_enabled) exec_control &= ~SECONDARY_EXEC_RDSEED; if (nested) { if (rdseed_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_RDSEED; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_RDSEED; } } vmx->secondary_exec_control = exec_control; } static void ept_set_mmio_spte_mask(void) { /* * EPT Misconfigurations can be generated if the value of bits 2:0 * of an EPT paging-structure entry is 110b (write/execute). */ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, VMX_EPT_MISCONFIG_WX_VALUE); } #define VMX_XSS_EXIT_BITMAP 0 /* * Sets up the vmcs for emulated real mode. */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { #ifdef CONFIG_X86_64 unsigned long a; #endif int i; /* I/O */ vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); if (enable_shadow_vmcs) { vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ /* Control */ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); vmx->hv_deadline_tsc = -1; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); if (cpu_has_secondary_exec_ctrls()) { vmx_compute_secondary_exec_control(vmx); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, vmx->secondary_exec_control); } if (kvm_vcpu_apicv_active(&vmx->vcpu)) { vmcs_write64(EOI_EXIT_BITMAP0, 0); vmcs_write64(EOI_EXIT_BITMAP1, 0); vmcs_write64(EOI_EXIT_BITMAP2, 0); vmcs_write64(EOI_EXIT_BITMAP3, 0); vmcs_write16(GUEST_INTR_STATUS, 0); vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); } if (ple_gap) { vmcs_write32(PLE_GAP, ple_gap); vmx->ple_window = ple_window; vmx->ple_window_dirty = true; } vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmx_set_constant_host_state(vmx); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ #else vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif if (cpu_has_vmx_vmfunc()) vmcs_write64(VM_FUNCTION_CONTROL, 0); vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; vmx->guest_msrs[j].index = i; vmx->guest_msrs[j].data = 0; vmx->guest_msrs[j].mask = -1ull; ++vmx->nmsrs; } vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); set_cr4_guest_host_mask(vmx); if (vmx_xsaves_supported()) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); if (enable_pml) { ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } return 0; } static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct msr_data apic_base_msr; u64 cr0; vmx->rmode.vm86_active = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); if (!init_event) { apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_reset_bsp(vcpu)) apic_base_msr.data |= MSR_IA32_APICBASE_BSP; apic_base_msr.host_initiated = true; kvm_set_apic_base(vcpu, &apic_base_msr); } vmx_segment_cache_clear(vmx); seg_setup(VCPU_SREG_CS); vmcs_write16(GUEST_CS_SELECTOR, 0xf000); vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); seg_setup(VCPU_SREG_DS); seg_setup(VCPU_SREG_ES); seg_setup(VCPU_SREG_FS); seg_setup(VCPU_SREG_GS); seg_setup(VCPU_SREG_SS); vmcs_write16(GUEST_TR_SELECTOR, 0); vmcs_writel(GUEST_TR_BASE, 0); vmcs_write32(GUEST_TR_LIMIT, 0xffff); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); vmcs_write16(GUEST_LDTR_SELECTOR, 0); vmcs_writel(GUEST_LDTR_BASE, 0); vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); if (!init_event) { vmcs_write32(GUEST_SYSENTER_CS, 0); vmcs_writel(GUEST_SYSENTER_ESP, 0); vmcs_writel(GUEST_SYSENTER_EIP, 0); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } vmcs_writel(GUEST_RFLAGS, 0x02); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); setup_msrs(vmx); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ if (cpu_has_vmx_tpr_shadow() && !init_event) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); if (cpu_need_tpr_shadow(vcpu)) vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, __pa(vcpu->arch.apic->regs)); vmcs_write32(TPR_THRESHOLD, 0); } kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); if (kvm_vcpu_apicv_active(vcpu)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); if (vmx->vpid != 0) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx->vcpu.arch.cr0 = cr0; vmx_set_cr0(vcpu, cr0); /* enter rmode */ vmx_set_cr4(vcpu, 0); vmx_set_efer(vcpu, 0); update_exception_bitmap(vcpu); vpid_sync_context(vmx->vpid); } /* * In nested virtualization, check if L1 asked to exit on external interrupts. * For most existing hypervisors, this will always return true. */ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_EXT_INTR_MASK; } /* * In nested virtualization, check if L1 has set * VM_EXIT_ACK_INTR_ON_EXIT */ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_ACK_INTR_ON_EXIT; } static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; } static void enable_irq_window(struct kvm_vcpu *vcpu) { vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_INTR_PENDING); } static void enable_nmi_window(struct kvm_vcpu *vcpu) { if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_NMI_PENDING); } static void vmx_inject_irq(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); uint32_t intr; int irq = vcpu->arch.interrupt.nr; trace_kvm_inj_virq(irq); ++vcpu->stat.irq_injections; if (vmx->rmode.vm86_active) { int inc_eip = 0; if (vcpu->arch.interrupt.soft) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } intr = irq | INTR_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { intr |= INTR_TYPE_SOFT_INTR; vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); } else intr |= INTR_TYPE_EXT_INTR; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); ++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false; if (vmx->rmode.vm86_active) { if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); } static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); bool masked; if (vmx->loaded_vmcs->nmi_known_unmasked) return false; masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; vmx->loaded_vmcs->nmi_known_unmasked = !masked; return masked; } static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); vmx->loaded_vmcs->nmi_known_unmasked = !masked; if (masked) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) { if (to_vmx(vcpu)->nested.nested_run_pending) return 0; return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); } static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { return (!to_vmx(vcpu)->nested.nested_run_pending && vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, PAGE_SIZE * 3); if (ret) return ret; kvm->arch.tss_addr = addr; return init_rmode_tss(kvm); } static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) { switch (vec) { case BP_VECTOR: /* * Update instruction length as we may reinject the exception * from user space while in guest debugging mode. */ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; /* fall through */ case DB_VECTOR: if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; /* fall through */ case DE_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: case DF_VECTOR: case SS_VECTOR: case GP_VECTOR: case MF_VECTOR: return true; break; } return false; } static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code) { /* * Instruction with address size override prefix opcode 0x67 * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; return kvm_vcpu_halt(vcpu); } return 1; } return 0; } /* * Forward all other exceptions that are valid in real mode. * FIXME: Breaks guest debugging in real mode, needs to be fixed with * the required debugging infrastructure rework. */ kvm_queue_exception(vcpu, vec); return 1; } /* * Trigger machine check on the host. We assume all the MSRs are already set up * by the CPU and that we still run on the same CPU as the MCE occurred on. * We pass a fake environment to the machine check handler because we want * the guest to be always treated like user space, no matter what context * it used internally. */ static void kvm_machine_check(void) { #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, }; do_machine_check(&regs, 0); #endif } static int handle_machine_check(struct kvm_vcpu *vcpu) { /* already handled by vcpu_run */ return 1; } static int handle_exception(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 intr_info, ex_no, error_code; unsigned long cr2, rip, dr6; u32 vect_info; enum emulation_result er; vect_info = vmx->idt_vectoring_info; intr_info = vmx->exit_intr_info; if (is_machine_check(intr_info)) return handle_machine_check(vcpu); if (is_nmi(intr_info)) return 1; /* already handled by vmx_vcpu_run() */ if (is_invalid_opcode(intr_info)) { if (is_guest_mode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; } error_code = 0; if (intr_info & INTR_INFO_DELIVER_CODE_MASK) error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); /* * The #PF with PFEC.RSVD = 1 indicates the guest is accessing * MMIO, it is better to report an internal error. * See the comments in vmx_handle_exit. */ if ((vect_info & VECTORING_INFO_VALID_MASK) && !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; vcpu->run->internal.data[2] = error_code; return 0; } if (is_page_fault(intr_info)) { cr2 = vmcs_readl(EXIT_QUALIFICATION); /* EPT won't cause page fault directly */ WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0, true); } ex_no = intr_info & INTR_INFO_VECTOR_MASK; if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) return handle_rmode_exception(vcpu, ex_no, error_code); switch (ex_no) { case AC_VECTOR: kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); return 1; case DB_VECTOR: dr6 = vmcs_readl(EXIT_QUALIFICATION); if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; if (!(dr6 & ~DR6_RESERVED)) /* icebp */ skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); return 1; } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); /* fall through */ case BP_VECTOR: /* * Update instruction length as we may reinject #BP from * user space while in guest debugging mode. Reading it for * #DB as well causes no harm, it is not used in that case. */ vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_run->exit_reason = KVM_EXIT_DEBUG; rip = kvm_rip_read(vcpu); kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; break; default: kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = ex_no; kvm_run->ex.error_code = error_code; break; } return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; return 1; } static int handle_triple_fault(struct kvm_vcpu *vcpu) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; vcpu->mmio_needed = 0; return 0; } static int handle_io(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int size, in, string, ret; unsigned port; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); string = (exit_qualification & 16) != 0; in = (exit_qualification & 8) != 0; ++vcpu->stat.io_exits; if (string || in) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; ret = kvm_skip_emulated_instruction(vcpu); /* * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered * KVM_EXIT_DEBUG here. */ return kvm_fast_pio_out(vcpu, size, port) && ret; } static void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xc1; } /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* * We get here when L2 changed cr0 in a way that did not change * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), * but did change L0 shadowed bits. So we first calculate the * effective cr0 value that L1 would like to write into the * hardware. It consists of the L2-owned bits from the new * value combined with the L1-owned bits from L1's guest_cr0. */ val = (val & ~vmcs12->cr0_guest_host_mask) | (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); if (!nested_guest_cr0_valid(vcpu, val)) return 1; if (kvm_set_cr0(vcpu, val)) return 1; vmcs_writel(CR0_READ_SHADOW, orig_val); return 0; } else { if (to_vmx(vcpu)->nested.vmxon && !nested_host_cr0_valid(vcpu, val)) return 1; return kvm_set_cr0(vcpu, val); } } static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* analogously to handle_set_cr0 */ val = (val & ~vmcs12->cr4_guest_host_mask) | (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); if (kvm_set_cr4(vcpu, val)) return 1; vmcs_writel(CR4_READ_SHADOW, orig_val); return 0; } else return kvm_set_cr4(vcpu, val); } static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; int cr; int reg; int err; int ret; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ val = kvm_register_readl(vcpu, reg); trace_kvm_cr_write(cr, val); switch (cr) { case 0: err = handle_set_cr0(vcpu, val); return kvm_complete_insn_gp(vcpu, err); case 3: err = kvm_set_cr3(vcpu, val); return kvm_complete_insn_gp(vcpu, err); case 4: err = handle_set_cr4(vcpu, val); return kvm_complete_insn_gp(vcpu, err); case 8: { u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8 = (u8)val; err = kvm_set_cr8(vcpu, cr8); ret = kvm_complete_insn_gp(vcpu, err); if (lapic_in_kernel(vcpu)) return ret; if (cr8_prev <= cr8) return ret; /* * TODO: we might be squashing a * KVM_GUESTDBG_SINGLESTEP-triggered * KVM_EXIT_DEBUG here. */ vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } } break; case 2: /* clts */ WARN_ONCE(1, "Guest should always own CR0.TS"); vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); return kvm_skip_emulated_instruction(vcpu); case 1: /*mov from cr*/ switch (cr) { case 3: val = kvm_read_cr3(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); return kvm_skip_emulated_instruction(vcpu); case 8: val = kvm_get_cr8(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); return kvm_skip_emulated_instruction(vcpu); } break; case 3: /* lmsw */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); kvm_lmsw(vcpu, val); return kvm_skip_emulated_instruction(vcpu); default: break; } vcpu->run->exit_reason = 0; vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", (int)(exit_qualification >> 4) & 3, cr); return 0; } static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, dr7, reg; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; /* First, if DR does not exist, trigger UD */ if (!kvm_require_dr(vcpu, dr)) return 1; /* Do not handle if the CPL > 0, will trigger GP on re-entry */ if (!kvm_require_cpl(vcpu, 0)) return 1; dr7 = vmcs_readl(GUEST_DR7); if (dr7 & DR7_GD) { /* * As the vm-exit takes precedence over the debug trap, we * need to emulate the latter, either for the host or the * guest debugging itself. */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; vcpu->run->debug.arch.dr7 = dr7; vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); vcpu->run->debug.arch.exception = DB_VECTOR; vcpu->run->exit_reason = KVM_EXIT_DEBUG; return 0; } else { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BD | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); return 1; } } if (vcpu->guest_debug == 0) { vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; if (kvm_get_dr(vcpu, dr, &val)) return 1; kvm_register_write(vcpu, reg, val); } else if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) return 1; return kvm_skip_emulated_instruction(vcpu); } static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) { return vcpu->arch.dr6; } static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) { } static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); get_debugreg(vcpu->arch.dr6, 6); vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); } static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) { vmcs_writel(GUEST_DR7, val); } static int handle_cpuid(struct kvm_vcpu *vcpu) { return kvm_emulate_cpuid(vcpu); } static int handle_rdmsr(struct kvm_vcpu *vcpu) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; struct msr_data msr_info; msr_info.index = ecx; msr_info.host_initiated = false; if (vmx_get_msr(vcpu, &msr_info)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_read(ecx, msr_info.data); /* FIXME: handling of bits 32:63 of rax, rdx */ vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; return kvm_skip_emulated_instruction(vcpu); } static int handle_wrmsr(struct kvm_vcpu *vcpu) { struct msr_data msr; u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); msr.data = data; msr.index = ecx; msr.host_initiated = false; if (kvm_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_write(ecx, data); return kvm_skip_emulated_instruction(vcpu); } static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) { kvm_apic_update_ppr(vcpu); return 1; } static int handle_interrupt_window(struct kvm_vcpu *vcpu) { vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_INTR_PENDING); kvm_make_request(KVM_REQ_EVENT, vcpu); ++vcpu->stat.irq_window_exits; return 1; } static int handle_halt(struct kvm_vcpu *vcpu) { return kvm_emulate_halt(vcpu); } static int handle_vmcall(struct kvm_vcpu *vcpu) { return kvm_emulate_hypercall(vcpu); } static int handle_invd(struct kvm_vcpu *vcpu) { return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_invlpg(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); kvm_mmu_invlpg(vcpu, exit_qualification); return kvm_skip_emulated_instruction(vcpu); } static int handle_rdpmc(struct kvm_vcpu *vcpu) { int err; err = kvm_rdpmc(vcpu); return kvm_complete_insn_gp(vcpu, err); } static int handle_wbinvd(struct kvm_vcpu *vcpu) { return kvm_emulate_wbinvd(vcpu); } static int handle_xsetbv(struct kvm_vcpu *vcpu) { u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(vcpu, index, new_bv) == 0) return kvm_skip_emulated_instruction(vcpu); return 1; } static int handle_xsaves(struct kvm_vcpu *vcpu) { kvm_skip_emulated_instruction(vcpu); WARN(1, "this should never happen\n"); return 1; } static int handle_xrstors(struct kvm_vcpu *vcpu) { kvm_skip_emulated_instruction(vcpu); WARN(1, "this should never happen\n"); return 1; } static int handle_apic_access(struct kvm_vcpu *vcpu) { if (likely(fasteoi)) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int access_type, offset; access_type = exit_qualification & APIC_ACCESS_TYPE; offset = exit_qualification & APIC_ACCESS_OFFSET; /* * Sane guest uses MOV to write EOI, with written value * not cared. So make a short-circuit here by avoiding * heavy instruction emulation. */ if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && (offset == APIC_EOI)) { kvm_lapic_set_eoi(vcpu); return kvm_skip_emulated_instruction(vcpu); } } return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int vector = exit_qualification & 0xff; /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ kvm_apic_set_eoi_accelerated(vcpu, vector); return 1; } static int handle_apic_write(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 offset = exit_qualification & 0xfff; /* APIC-write VM exit is trap-like and thus no need to adjust IP */ kvm_apic_write_nodecode(vcpu, offset); return 1; } static int handle_task_switch(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification; bool has_error_code = false; u32 error_code = 0; u16 tss_selector; int reason, type, idt_v, idt_index; idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); reason = (u32)exit_qualification >> 30; if (reason == TASK_SWITCH_GATE && idt_v) { switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = false; vmx_set_nmi_mask(vcpu, true); break; case INTR_TYPE_EXT_INTR: case INTR_TYPE_SOFT_INTR: kvm_clear_interrupt_queue(vcpu); break; case INTR_TYPE_HARD_EXCEPTION: if (vmx->idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { has_error_code = true; error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } /* fall through */ case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; default: break; } } tss_selector = exit_qualification; if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && type != INTR_TYPE_EXT_INTR && type != INTR_TYPE_NMI_INTR)) skip_emulated_instruction(vcpu); if (kvm_task_switch(vcpu, tss_selector, type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, has_error_code, error_code) == EMULATE_FAIL) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } /* * TODO: What about debug traps on tss switch? * Are we supposed to inject them and update dr6? */ return 1; } static int handle_ept_violation(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; gpa_t gpa; u64 error_code; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); /* * EPT violation happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. * There are errata that may cause this bit to not be set: * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); trace_kvm_page_fault(gpa, exit_qualification); /* Is it a read fault? */ error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) ? PFERR_USER_MASK : 0; /* Is it a write fault? */ error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) ? PFERR_WRITE_MASK : 0; /* Is it a fetch fault? */ error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) ? PFERR_FETCH_MASK : 0; /* ept page table entry is present? */ error_code |= (exit_qualification & (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | EPT_VIOLATION_EXECUTABLE)) ? PFERR_PRESENT_MASK : 0; error_code |= (exit_qualification & 0x100) != 0 ? PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; vcpu->arch.exit_qualification = exit_qualification; return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { int ret; gpa_t gpa; /* * A nested guest cannot optimize MMIO vmexits, because we have an * nGPA here instead of the required GPA. */ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); if (!is_guest_mode(vcpu) && !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { trace_kvm_fast_mmio(gpa); return kvm_skip_emulated_instruction(vcpu); } ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); if (ret >= 0) return ret; /* It is the real ept misconfig */ WARN_ON(1); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; return 0; } static int handle_nmi_window(struct kvm_vcpu *vcpu) { vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_NMI_PENDING); ++vcpu->stat.nmi_window_exits; kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); enum emulation_result err = EMULATE_DONE; int ret = 1; u32 cpu_exec_ctrl; bool intr_window_requested; unsigned count = 130; cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; while (vmx->emulation_required && count-- != 0) { if (intr_window_requested && vmx_interrupt_allowed(vcpu)) return handle_interrupt_window(&vmx->vcpu); if (kvm_test_request(KVM_REQ_EVENT, vcpu)) return 1; err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; ret = 0; goto out; } if (err != EMULATE_DONE) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; ret = kvm_vcpu_halt(vcpu); goto out; } if (signal_pending(current)) goto out; if (need_resched()) schedule(); } out: return ret; } static int __grow_ple_window(int val) { if (ple_window_grow < 1) return ple_window; val = min(val, ple_window_actual_max); if (ple_window_grow < ple_window) val *= ple_window_grow; else val += ple_window_grow; return val; } static int __shrink_ple_window(int val, int modifier, int minimum) { if (modifier < 1) return ple_window; if (modifier < ple_window) val /= modifier; else val -= modifier; return max(val, minimum); } static void grow_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __grow_ple_window(old); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); } static void shrink_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __shrink_ple_window(old, ple_window_shrink, ple_window); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); } /* * ple_window_actual_max is computed to be one grow_ple_window() below * ple_window_max. (See __grow_ple_window for the reason.) * This prevents overflows, because ple_window_max is int. * ple_window_max effectively rounded down to a multiple of ple_window_grow in * this process. * ple_window_max is also prevented from setting vmx->ple_window < ple_window. */ static void update_ple_window_actual_max(void) { ple_window_actual_max = __shrink_ple_window(max(ple_window_max, ple_window), ple_window_grow, INT_MIN); } /* * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. */ static void wakeup_handler(void) { struct kvm_vcpu *vcpu; int cpu = smp_processor_id(); spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), blocked_vcpu_list) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (pi_test_on(pi_desc) == 1) kvm_vcpu_kick(vcpu); } spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); } void vmx_enable_tdp(void) { kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK, cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, VMX_EPT_RWX_MASK, 0ull); ept_set_mmio_spte_mask(); kvm_enable_tdp(); } static __init int hardware_setup(void) { int r = -ENOMEM, i, msr; rdmsrl_safe(MSR_EFER, &host_efer); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) kvm_define_shared_msr(i, vmx_msr_index[i]); for (i = 0; i < VMX_BITMAP_NR; i++) { vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_bitmap[i]) goto out; } vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out; } if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) enable_vpid = 0; if (!cpu_has_vmx_shadow_vmcs()) enable_shadow_vmcs = 0; if (enable_shadow_vmcs) init_vmcs_shadow_fields(); if (!cpu_has_vmx_ept() || !cpu_has_vmx_ept_4levels() || !cpu_has_vmx_ept_mt_wb()) { enable_ept = 0; enable_unrestricted_guest = 0; enable_ept_ad_bits = 0; } if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) enable_ept_ad_bits = 0; if (!cpu_has_vmx_unrestricted_guest()) enable_unrestricted_guest = 0; if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; /* * set_apic_access_page_addr() is used to reload apic access * page upon invalidation. No need to do anything if not * using the APIC_ACCESS_ADDR VMCS field. */ if (!flexpriority_enabled) kvm_x86_ops->set_apic_access_page_addr = NULL; if (!cpu_has_vmx_tpr_shadow()) kvm_x86_ops->update_cr8_intercept = NULL; if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); if (!cpu_has_vmx_ple()) ple_gap = 0; if (!cpu_has_vmx_apicv()) { enable_apicv = 0; kvm_x86_ops->sync_pir_to_irr = NULL; } if (cpu_has_vmx_tsc_scaling()) { kvm_has_tsc_control = true; kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; kvm_tsc_scaling_ratio_frac_bits = 48; } vmx_disable_intercept_for_msr(MSR_FS_BASE, false); vmx_disable_intercept_for_msr(MSR_GS_BASE, false); vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); memcpy(vmx_msr_bitmap_legacy_x2apic_apicv, vmx_msr_bitmap_legacy, PAGE_SIZE); memcpy(vmx_msr_bitmap_longmode_x2apic_apicv, vmx_msr_bitmap_longmode, PAGE_SIZE); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); memcpy(vmx_msr_bitmap_longmode_x2apic, vmx_msr_bitmap_longmode, PAGE_SIZE); set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ for (msr = 0x800; msr <= 0x8ff; msr++) { if (msr == 0x839 /* TMCCT */) continue; vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true); } /* * TPR reads and writes can be virtualized even if virtual interrupt * delivery is not in use. */ vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true); vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false); /* EOI */ vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true); /* SELF-IPI */ vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true); if (enable_ept) vmx_enable_tdp(); else kvm_disable_tdp(); update_ple_window_actual_max(); /* * Only enable PML when hardware supports PML feature, and both EPT * and EPT A/D bit features are enabled -- PML depends on them to work. */ if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) enable_pml = 0; if (!enable_pml) { kvm_x86_ops->slot_enable_log_dirty = NULL; kvm_x86_ops->slot_disable_log_dirty = NULL; kvm_x86_ops->flush_log_dirty = NULL; kvm_x86_ops->enable_log_dirty_pt_masked = NULL; } if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { u64 vmx_msr; rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); cpu_preemption_timer_multi = vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; } else { kvm_x86_ops->set_hv_timer = NULL; kvm_x86_ops->cancel_hv_timer = NULL; } kvm_set_posted_intr_wakeup_handler(wakeup_handler); kvm_mce_cap_supported |= MCG_LMCE_P; return alloc_kvm_area(); out: for (i = 0; i < VMX_BITMAP_NR; i++) free_page((unsigned long)vmx_bitmap[i]); return r; } static __exit void hardware_unsetup(void) { int i; for (i = 0; i < VMX_BITMAP_NR; i++) free_page((unsigned long)vmx_bitmap[i]); free_kvm_area(); } /* * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE * exiting, so only get here on cpu with PAUSE-Loop-Exiting. */ static int handle_pause(struct kvm_vcpu *vcpu) { if (ple_gap) grow_ple_window(vcpu); /* * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" * VM-execution control is ignored if CPL > 0. OTOH, KVM * never set PAUSE_EXITING and just set PLE if supported, * so the vcpu must be CPL=0 if it gets a PAUSE exit. */ kvm_vcpu_on_spin(vcpu, true); return kvm_skip_emulated_instruction(vcpu); } static int handle_nop(struct kvm_vcpu *vcpu) { return kvm_skip_emulated_instruction(vcpu); } static int handle_mwait(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return handle_nop(vcpu); } static int handle_invalid_op(struct kvm_vcpu *vcpu) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } static int handle_monitor_trap(struct kvm_vcpu *vcpu) { return 1; } static int handle_monitor(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return handle_nop(vcpu); } /* * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. * We could reuse a single VMCS for all the L2 guests, but we also want the * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this * allows keeping them loaded on the processor, and in the future will allow * optimizations where prepare_vmcs02 doesn't need to set all the fields on * every entry if they never change. * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. * * The following functions allocate and free a vmcs02 in this pool. */ /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmx->nested.current_vmptr) { list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { /* Recycle the least recently used VMCS. */ item = list_last_entry(&vmx->nested.vmcs02_pool, struct vmcs02_list, list); item->vmptr = vmx->nested.current_vmptr; list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } /* Create a new VMCS */ item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); if (!item) return NULL; item->vmcs02.vmcs = alloc_vmcs(); item->vmcs02.shadow_vmcs = NULL; if (!item->vmcs02.vmcs) { kfree(item); return NULL; } loaded_vmcs_init(&item->vmcs02); item->vmptr = vmx->nested.current_vmptr; list_add(&(item->list), &(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num++; return &item->vmcs02; } /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmptr) { free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; return; } } /* * Free all VMCSs saved for this vcpu, except the one pointed by * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs * must be &vmx->vmcs01. */ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) { struct vmcs02_list *item, *n; WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { /* * Something will leak if the above WARN triggers. Better than * a use-after-free. */ if (vmx->loaded_vmcs == &item->vmcs02) continue; free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; } } /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified * by Vol 2B, VMX Instruction Reference, "Conventions". */ static void nested_vmx_succeed(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); } static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_CF); } static void nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error) { if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { /* * failValid writes the error number to the current VMCS, which * can't be done there isn't a current VMCS. */ nested_vmx_failInvalid(vcpu); return; } vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_ZF); get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; /* * We don't need to force a shadow sync because * VM_INSTRUCTION_ERROR is not shadowed */ } static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) { /* TODO: not to reset guest simply here. */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); } static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) { struct vcpu_vmx *vmx = container_of(timer, struct vcpu_vmx, nested.preemption_timer); vmx->nested.preemption_timer_expired = true; kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); kvm_vcpu_kick(&vmx->vcpu); return HRTIMER_NORESTART; } /* * Decode the memory-address operand of a vmx instruction, as recorded on an * exit caused by such an instruction (run by a guest hypervisor). * On success, returns 0. When the operand is invalid, returns 1 and throws * #UD or #GP. */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, gva_t *ret) { gva_t off; bool exn; struct kvm_segment s; /* * According to Vol. 3B, "Information for VM Exits Due to Instruction * Execution", on an exit, vmx_instruction_info holds most of the * addressing components of the operand. Only the displacement part * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). * For how an actual address is calculated from all these components, * refer to Vol. 1, "Operand Addressing". */ int scaling = vmx_instruction_info & 3; int addr_size = (vmx_instruction_info >> 7) & 7; bool is_reg = vmx_instruction_info & (1u << 10); int seg_reg = (vmx_instruction_info >> 15) & 7; int index_reg = (vmx_instruction_info >> 18) & 0xf; bool index_is_valid = !(vmx_instruction_info & (1u << 22)); int base_reg = (vmx_instruction_info >> 23) & 0xf; bool base_is_valid = !(vmx_instruction_info & (1u << 27)); if (is_reg) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ off = exit_qualification; /* holds the displacement */ if (base_is_valid) off += kvm_register_read(vcpu, base_reg); if (index_is_valid) off += kvm_register_read(vcpu, index_reg)<<scaling; vmx_get_segment(vcpu, &s, seg_reg); *ret = s.base + off; if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; /* Checks for #GP/#SS exceptions. */ exn = false; if (is_long_mode(vcpu)) { /* Long mode: #GP(0)/#SS(0) if the memory address is in a * non-canonical form. This is the only check on the memory * destination for long mode! */ exn = is_noncanonical_address(*ret, vcpu); } else if (is_protmode(vcpu)) { /* Protected mode: apply checks for segment validity in the * following order: * - segment type check (#GP(0) may be thrown) * - usability check (#GP(0)/#SS(0)) * - limit check (#GP(0)/#SS(0)) */ if (wr) /* #GP(0) if the destination operand is located in a * read-only data segment or any code segment. */ exn = ((s.type & 0xa) == 0 || (s.type & 8)); else /* #GP(0) if the source operand is located in an * execute-only code segment */ exn = ((s.type & 0xa) == 8); if (exn) { kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return 1; } /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. */ exn = (s.unusable != 0); /* Protected mode: #GP(0)/#SS(0) if the memory * operand is outside the segment limit. */ exn = exn || (off + sizeof(u64) > s.limit); } if (exn) { kvm_queue_exception_e(vcpu, seg_reg == VCPU_SREG_SS ? SS_VECTOR : GP_VECTOR, 0); return 1; } return 0; } static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) { gva_t gva; struct x86_exception e; if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, sizeof(*vmpointer), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } return 0; } static int enter_vmx_operation(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; if (cpu_has_vmx_msr_bitmap()) { vmx->nested.msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx->nested.msr_bitmap) goto out_msr_bitmap; } vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; if (enable_shadow_vmcs) { shadow_vmcs = alloc_vmcs(); if (!shadow_vmcs) goto out_shadow_vmcs; /* mark vmcs as shadow */ shadow_vmcs->revision_id |= (1u << 31); /* init shadow vmcs */ vmcs_clear(shadow_vmcs); vmx->vmcs01.shadow_vmcs = shadow_vmcs; } INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num = 0; hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; vmx->nested.vmxon = true; return 0; out_shadow_vmcs: kfree(vmx->nested.cached_vmcs12); out_cached_vmcs12: free_page((unsigned long)vmx->nested.msr_bitmap); out_msr_bitmap: return -ENOMEM; } /* * Emulate the VMXON instruction. * Currently, we just remember that VMX is active, and do not save or even * inspect the argument to VMXON (the so-called "VMXON pointer") because we * do not currently need to store anything in that guest-allocated memory * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their * argument is different from the VMXON pointer (which the spec says they do). */ static int handle_vmon(struct kvm_vcpu *vcpu) { int ret; gpa_t vmptr; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* * The Intel VMX Instruction Reference lists a bunch of bits that are * prerequisite to running VMXON, most notably cr4.VMXE must be set to * 1 (see vmx_set_cr4() for when we allow the guest to set this). * Otherwise, we should fail with #UD. But most faulting conditions * have already been checked by hardware, prior to the VM-exit for * VMXON. We do test guest cr4.VMXE because processor CR4 always has * that bit set to 1 in non-root mode. */ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return kvm_skip_emulated_instruction(vcpu); } if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); return 1; } if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; * which replaces physical address width with 32 */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } page = kvm_vcpu_gpa_to_page(vcpu, vmptr); if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } if (*(u32 *)kmap(page) != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } kunmap(page); kvm_release_page_clean(page); vmx->nested.vmxon_ptr = vmptr; ret = enter_vmx_operation(vcpu); if (ret) return ret; nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* * Intel's VMX Instruction Reference specifies a common set of prerequisites * for running VMX instructions (except VMXON, whose prerequisites are * slightly different). It also specifies what exception to inject otherwise. * Note that many of these exceptions have priority over VM exits, so they * don't have to be checked again here. */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { if (!to_vmx(vcpu)->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } return 1; } static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) { vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, -1ull); } static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) { if (vmx->nested.current_vmptr == -1ull) return; if (enable_shadow_vmcs) { /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); vmx->nested.sync_shadow_vmcs = false; vmx_disable_shadow_vmcs(vmx); } vmx->nested.posted_intr_nv = -1; /* Flush VMCS12 to guest memory */ kvm_vcpu_write_guest_page(&vmx->vcpu, vmx->nested.current_vmptr >> PAGE_SHIFT, vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); vmx->nested.current_vmptr = -1ull; } /* * Free whatever needs to be freed from vmx->nested when L1 goes down, or * just stops using VMX. */ static void free_nested(struct vcpu_vmx *vmx) { if (!vmx->nested.vmxon) return; vmx->nested.vmxon = false; free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; if (vmx->nested.msr_bitmap) { free_page((unsigned long)vmx->nested.msr_bitmap); vmx->nested.msr_bitmap = NULL; } if (enable_shadow_vmcs) { vmx_disable_shadow_vmcs(vmx); vmcs_clear(vmx->vmcs01.shadow_vmcs); free_vmcs(vmx->vmcs01.shadow_vmcs); vmx->vmcs01.shadow_vmcs = NULL; } kfree(vmx->nested.cached_vmcs12); /* Unpin physical memory we referred to in current vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } nested_free_all_saved_vmcss(vmx); } /* Emulate the VMXOFF instruction */ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; free_nested(to_vmx(vcpu)); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* Emulate the VMCLEAR instruction */ static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 zero = 0; gpa_t vmptr; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); return kvm_skip_emulated_instruction(vcpu); } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); return kvm_skip_emulated_instruction(vcpu); } if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); kvm_vcpu_write_guest(vcpu, vmptr + offsetof(struct vmcs12, launch_state), &zero, sizeof(zero)); nested_free_vmcs02(vmx, vmptr); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); /* Emulate the VMLAUNCH instruction */ static int handle_vmlaunch(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, true); } /* Emulate the VMRESUME instruction */ static int handle_vmresume(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, false); } /* * Read a vmcs12 field. Since these can have varying lengths and we return * one type, we chose the biggest type (u64) and zero-extend the return value * to that size. Note that the caller, handle_vmread, might need to use only * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of * 64-bit fields are to be returned). */ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, unsigned long field, u64 *ret) { short offset = vmcs_field_to_offset(field); char *p; if (offset < 0) return offset; p = ((char *)(get_vmcs12(vcpu))) + offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_NATURAL_WIDTH: *ret = *((natural_width *)p); return 0; case VMCS_FIELD_TYPE_U16: *ret = *((u16 *)p); return 0; case VMCS_FIELD_TYPE_U32: *ret = *((u32 *)p); return 0; case VMCS_FIELD_TYPE_U64: *ret = *((u64 *)p); return 0; default: WARN_ON(1); return -ENOENT; } } static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, unsigned long field, u64 field_value){ short offset = vmcs_field_to_offset(field); char *p = ((char *) get_vmcs12(vcpu)) + offset; if (offset < 0) return offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: *(u16 *)p = field_value; return 0; case VMCS_FIELD_TYPE_U32: *(u32 *)p = field_value; return 0; case VMCS_FIELD_TYPE_U64: *(u64 *)p = field_value; return 0; case VMCS_FIELD_TYPE_NATURAL_WIDTH: *(natural_width *)p = field_value; return 0; default: WARN_ON(1); return -ENOENT; } } static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) { int i; unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; preempt_disable(); vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { field = fields[i]; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: field_value = vmcs_read16(field); break; case VMCS_FIELD_TYPE_U32: field_value = vmcs_read32(field); break; case VMCS_FIELD_TYPE_U64: field_value = vmcs_read64(field); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: field_value = vmcs_readl(field); break; default: WARN_ON(1); continue; } vmcs12_write_any(&vmx->vcpu, field, field_value); } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); preempt_enable(); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) { const unsigned long *fields[] = { shadow_read_write_fields, shadow_read_only_fields }; const int max_fields[] = { max_shadow_read_write_fields, max_shadow_read_only_fields }; int i, q; unsigned long field; u64 field_value = 0; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; vmcs_load(shadow_vmcs); for (q = 0; q < ARRAY_SIZE(fields); q++) { for (i = 0; i < max_fields[q]; i++) { field = fields[q][i]; vmcs12_read_any(&vmx->vcpu, field, &field_value); switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: vmcs_write16(field, (u16)field_value); break; case VMCS_FIELD_TYPE_U32: vmcs_write32(field, (u32)field_value); break; case VMCS_FIELD_TYPE_U64: vmcs_write64(field, (u64)field_value); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: vmcs_writel(field, (long)field_value); break; default: WARN_ON(1); break; } } } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); } /* * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was * used before) all generate the same failure when it is missing. */ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->nested.current_vmptr == -1ull) { nested_vmx_failInvalid(vcpu); return 0; } return 1; } static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; u64 field_value; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; if (!nested_vmx_check_permission(vcpu)) return 1; if (!nested_vmx_check_vmcs12(vcpu)) return kvm_skip_emulated_instruction(vcpu); /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ if (vmcs12_read_any(vcpu, field, &field_value) < 0) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); return kvm_skip_emulated_instruction(vcpu); } /* * Now copy part of this value to register or memory, as requested. * Note that the number of bits actually copied is 32 or 64 depending * on the guest's mode (32 or 64 bit), not on the given field's length. */ if (vmx_instruction_info & (1u << 10)) { kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &gva)) return 1; /* _system ok, as hardware has verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static int handle_vmwrite(struct kvm_vcpu *vcpu) { unsigned long field; gva_t gva; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 * bit (field_value), and then copies only the appropriate number of * bits into the vmcs12 field. */ u64 field_value = 0; struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; if (!nested_vmx_check_vmcs12(vcpu)) return kvm_skip_emulated_instruction(vcpu); if (vmx_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } } field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); if (vmcs_field_readonly(field)) { nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); return kvm_skip_emulated_instruction(vcpu); } if (vmcs12_write_any(vcpu, field, field_value) < 0) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); return kvm_skip_emulated_instruction(vcpu); } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) { vmx->nested.current_vmptr = vmptr; if (enable_shadow_vmcs) { vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->vmcs01.shadow_vmcs)); vmx->nested.sync_shadow_vmcs = true; } } /* Emulate the VMPTRLD instruction */ static int handle_vmptrld(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); return kvm_skip_emulated_instruction(vcpu); } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); return kvm_skip_emulated_instruction(vcpu); } if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; page = kvm_vcpu_gpa_to_page(vcpu, vmptr); if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } new_vmcs12 = kmap(page); if (new_vmcs12->revision_id != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); return kvm_skip_emulated_instruction(vcpu); } nested_release_vmcs12(vmx); /* * Load VMCS12 from guest memory since it is not already * cached. */ memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); kunmap(page); kvm_release_page_clean(page); set_current_vmptr(vmx, vmptr); } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t vmcs_gva; struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &vmcs_gva)) return 1; /* ok to use *_system, as hardware has verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, (void *)&to_vmx(vcpu)->nested.current_vmptr, sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* Emulate the INVEPT instruction */ static int handle_invept(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmx_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; struct { u64 eptp, gpa; } operand; if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (type >= 32 || !(types & (1 << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (type) { case VMX_EPT_EXTENT_GLOBAL: /* * TODO: track mappings and invalidate * single context requests appropriately */ case VMX_EPT_EXTENT_CONTEXT: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); nested_vmx_succeed(vcpu); break; default: BUG_ON(1); break; } return kvm_skip_emulated_instruction(vcpu); } static int handle_invvpid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmx_instruction_info; unsigned long type, types; gva_t gva; struct x86_exception e; struct { u64 vpid; u64 gla; } operand; if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_VPID) || !(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.nested_vmx_vpid_caps & VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; if (type >= 32 || !(types & (1 << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } /* according to the intel vmx instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } if (operand.vpid >> 16) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: if (is_noncanonical_address(operand.gla, vcpu)) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } /* fall through */ case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: if (!operand.vpid) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } break; case VMX_VPID_EXTENT_ALL_CONTEXT: break; default: WARN_ON_ONCE(1); return kvm_skip_emulated_instruction(vcpu); } __vmx_flush_tlb(vcpu, vmx->nested.vpid02); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static int handle_pml_full(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; trace_kvm_pml_full(vcpu->vcpu_id); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); /* * PML buffer FULL happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); /* * PML buffer already flushed at beginning of VMEXIT. Nothing to do * here.., and there's no userspace involvement needed for PML. */ return 1; } static int handle_preemption_timer(struct kvm_vcpu *vcpu) { kvm_lapic_expired_hv_timer(vcpu); return 1; } static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) { struct vcpu_vmx *vmx = to_vmx(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu); /* Check for memory type validity */ switch (address & VMX_EPTP_MT_MASK) { case VMX_EPTP_MT_UC: if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT)) return false; break; case VMX_EPTP_MT_WB: if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT)) return false; break; default: return false; } /* only 4 levels page-walk length are valid */ if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) return false; /* Reserved bits should not be set */ if (address >> maxphyaddr || ((address >> 7) & 0x1f)) return false; /* AD, if set, should be supported */ if (address & VMX_EPTP_AD_ENABLE_BIT) { if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT)) return false; } return true; } static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; u64 address; bool accessed_dirty; struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (!nested_cpu_has_eptp_switching(vmcs12) || !nested_cpu_has_ept(vmcs12)) return 1; if (index >= VMFUNC_EPTP_ENTRIES) return 1; if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, &address, index * 8, 8)) return 1; accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); /* * If the (L2) guest does a vmfunc to the currently * active ept pointer, we don't have to do anything else */ if (vmcs12->ept_pointer != address) { if (!valid_ept_address(vcpu, address)) return 1; kvm_mmu_unload(vcpu); mmu->ept_ad = accessed_dirty; mmu->base_role.ad_disabled = !accessed_dirty; vmcs12->ept_pointer = address; /* * TODO: Check what's the correct approach in case * mmu reload fails. Currently, we just let the next * reload potentially fail */ kvm_mmu_reload(vcpu); } return 0; } static int handle_vmfunc(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12; u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; /* * VMFUNC is only supported for nested guests, but we always enable the * secondary control for simplicity; for non-nested mode, fake that we * didn't by injecting #UD. */ if (!is_guest_mode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmcs12 = get_vmcs12(vcpu); if ((vmcs12->vm_function_control & (1 << function)) == 0) goto fail; switch (function) { case 0: if (nested_vmx_eptp_switching(vcpu, vmcs12)) goto fail; break; default: goto fail; } return kvm_skip_emulated_instruction(vcpu); fail: nested_vmx_vmexit(vcpu, vmx->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs * to be done to userspace and return 0. */ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EXCEPTION_NMI] = handle_exception, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, [EXIT_REASON_IO_INSTRUCTION] = handle_io, [EXIT_REASON_CR_ACCESS] = handle_cr, [EXIT_REASON_DR_ACCESS] = handle_dr, [EXIT_REASON_CPUID] = handle_cpuid, [EXIT_REASON_MSR_READ] = handle_rdmsr, [EXIT_REASON_MSR_WRITE] = handle_wrmsr, [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, [EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_RDPMC] = handle_rdpmc, [EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, [EXIT_REASON_VMPTRLD] = handle_vmptrld, [EXIT_REASON_VMPTRST] = handle_vmptrst, [EXIT_REASON_VMREAD] = handle_vmread, [EXIT_REASON_VMRESUME] = handle_vmresume, [EXIT_REASON_VMWRITE] = handle_vmwrite, [EXIT_REASON_VMOFF] = handle_vmoff, [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, [EXIT_REASON_INVVPID] = handle_invvpid, [EXIT_REASON_RDRAND] = handle_invalid_op, [EXIT_REASON_RDSEED] = handle_invalid_op, [EXIT_REASON_XSAVES] = handle_xsaves, [EXIT_REASON_XRSTORS] = handle_xrstors, [EXIT_REASON_PML_FULL] = handle_pml_full, [EXIT_REASON_VMFUNC] = handle_vmfunc, [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, }; static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification; gpa_t bitmap, last_bitmap; unsigned int port; int size; u8 b; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; last_bitmap = (gpa_t)-1; b = -1; while (size > 0) { if (port < 0x8000) bitmap = vmcs12->io_bitmap_a; else if (port < 0x10000) bitmap = vmcs12->io_bitmap_b; else return true; bitmap += (port & 0x7fff) / 8; if (last_bitmap != bitmap) if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) return true; if (b & (1 << (port & 7))) return true; port++; size--; last_bitmap = bitmap; } return false; } /* * Return 1 if we should exit from L2 to L1 to handle an MSR access access, * rather than handle it ourselves in L0. I.e., check whether L1 expressed * disinterest in the current event (read or write a specific MSR) by using an * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. */ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason) { u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; gpa_t bitmap; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return true; /* * The MSR_BITMAP page is divided into four 1024-byte bitmaps, * for the four combinations of read/write and low/high MSR numbers. * First we need to figure out which of the four to use: */ bitmap = vmcs12->msr_bitmap; if (exit_reason == EXIT_REASON_MSR_WRITE) bitmap += 2048; if (msr_index >= 0xc0000000) { msr_index -= 0xc0000000; bitmap += 1024; } /* Then read the msr_index'th bit from this bitmap: */ if (msr_index < 1024*8) { unsigned char b; if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) return true; return 1 & (b >> (msr_index & 7)); } else return true; /* let L1 handle the wrong parameter */ } /* * Return 1 if we should exit from L2 to L1 to handle a CR access exit, * rather than handle it ourselves in L0. I.e., check if L1 wanted to * intercept (via guest_host_mask etc.) the current event. */ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int cr = exit_qualification & 15; int reg; unsigned long val; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ reg = (exit_qualification >> 8) & 15; val = kvm_register_readl(vcpu, reg); switch (cr) { case 0: if (vmcs12->cr0_guest_host_mask & (val ^ vmcs12->cr0_read_shadow)) return true; break; case 3: if ((vmcs12->cr3_target_count >= 1 && vmcs12->cr3_target_value0 == val) || (vmcs12->cr3_target_count >= 2 && vmcs12->cr3_target_value1 == val) || (vmcs12->cr3_target_count >= 3 && vmcs12->cr3_target_value2 == val) || (vmcs12->cr3_target_count >= 4 && vmcs12->cr3_target_value3 == val)) return false; if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) return true; break; case 4: if (vmcs12->cr4_guest_host_mask & (vmcs12->cr4_read_shadow ^ val)) return true; break; case 8: if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) return true; break; } break; case 2: /* clts */ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && (vmcs12->cr0_read_shadow & X86_CR0_TS)) return true; break; case 1: /* mov from cr */ switch (cr) { case 3: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR3_STORE_EXITING) return true; break; case 8: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR8_STORE_EXITING) return true; break; } break; case 3: /* lmsw */ /* * lmsw can change bits 1..3 of cr0, and only set bit 0 of * cr0. Other attempted changes are ignored, with no exit. */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; if (vmcs12->cr0_guest_host_mask & 0xe & (val ^ vmcs12->cr0_read_shadow)) return true; if ((vmcs12->cr0_guest_host_mask & 0x1) && !(vmcs12->cr0_read_shadow & 0x1) && (val & 0x1)) return true; break; } return false; } /* * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we * should handle it ourselves in L0 (and then continue L2). Only call this * when in is_guest_mode (L2). */ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) { u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, vmcs_readl(EXIT_QUALIFICATION), vmx->idt_vectoring_info, intr_info, vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); /* * The host physical addresses of some pages of guest memory * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU * may write to these pages via their host physical address while * L2 is running, bypassing any address-translation-based dirty * tracking (e.g. EPT write protection). * * Mark them dirty on every exit from L2 to prevent them from * getting out of sync with dirty tracking. */ nested_mark_vmcs12_pages_dirty(vcpu); if (vmx->nested.nested_run_pending) return false; if (unlikely(vmx->fail)) { pr_info_ratelimited("%s failed vm entry %x\n", __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); return true; } switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (is_nmi(intr_info)) return false; else if (is_page_fault(intr_info)) return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; else if (is_no_device(intr_info) && !(vmcs12->guest_cr0 & X86_CR0_TS)) return false; else if (is_debug(intr_info) && vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; else if (is_breakpoint(intr_info) && vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: return false; case EXIT_REASON_TRIPLE_FAULT: return true; case EXIT_REASON_PENDING_INTERRUPT: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); case EXIT_REASON_NMI_WINDOW: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); case EXIT_REASON_TASK_SWITCH: return true; case EXIT_REASON_CPUID: return true; case EXIT_REASON_HLT: return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); case EXIT_REASON_INVD: return true; case EXIT_REASON_INVLPG: return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_RDPMC: return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); case EXIT_REASON_RDRAND: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND); case EXIT_REASON_RDSEED: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED); case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! */ return true; case EXIT_REASON_CR_ACCESS: return nested_vmx_exit_handled_cr(vcpu, vmcs12); case EXIT_REASON_DR_ACCESS: return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); case EXIT_REASON_IO_INSTRUCTION: return nested_vmx_exit_handled_io(vcpu, vmcs12); case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); case EXIT_REASON_MSR_READ: case EXIT_REASON_MSR_WRITE: return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); case EXIT_REASON_INVALID_STATE: return true; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); case EXIT_REASON_MONITOR_TRAP_FLAG: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); case EXIT_REASON_MONITOR_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || nested_cpu_has2(vmcs12, SECONDARY_EXEC_PAUSE_LOOP_EXITING); case EXIT_REASON_MCE_DURING_VMENTRY: return false; case EXIT_REASON_TPR_BELOW_THRESHOLD: return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); case EXIT_REASON_APIC_ACCESS: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_APIC_WRITE: case EXIT_REASON_EOI_INDUCED: /* apic_write and eoi_induced should exit unconditionally. */ return true; case EXIT_REASON_EPT_VIOLATION: /* * L0 always deals with the EPT violation. If nested EPT is * used, and the nested mmu code discovers that the address is * missing in the guest EPT table (EPT12), the EPT violation * will be injected with nested_ept_inject_page_fault() */ return false; case EXIT_REASON_EPT_MISCONFIG: /* * L2 never uses directly L1's EPT, but rather L0's own EPT * table (shadow on EPT) or a merged EPT table that L0 built * (EPT on EPT). So any problems with the structure of the * table is L0's fault. */ return false; case EXIT_REASON_INVPCID: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_WBINVD: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: return true; case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: /* * This should never happen, since it is not possible to * set XSS to a non-zero value---neither in L1 nor in L2. * If if it were, XSS would have to be checked against * the XSS exit bitmap in vmcs12. */ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); case EXIT_REASON_PREEMPTION_TIMER: return false; case EXIT_REASON_PML_FULL: /* We emulate PML support to L1. */ return false; case EXIT_REASON_VMFUNC: /* VM functions are emulated through L2->L0 vmexits. */ return false; default: return true; } } static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); /* * At this point, the exit interruption info in exit_intr_info * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT * we need to query the in-kernel LAPIC. */ WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT); if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); } nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, vmcs_readl(EXIT_QUALIFICATION)); return 1; } static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); *info2 = vmcs_read32(VM_EXIT_INTR_INFO); } static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) { if (vmx->pml_pg) { __free_page(vmx->pml_pg); vmx->pml_pg = NULL; } } static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 *pml_buf; u16 pml_idx; pml_idx = vmcs_read16(GUEST_PML_INDEX); /* Do nothing if PML buffer is empty */ if (pml_idx == (PML_ENTITY_NUM - 1)) return; /* PML index always points to next available PML buffer entity */ if (pml_idx >= PML_ENTITY_NUM) pml_idx = 0; else pml_idx++; pml_buf = page_address(vmx->pml_pg); for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { u64 gpa; gpa = pml_buf[pml_idx]; WARN_ON(gpa & (PAGE_SIZE - 1)); kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); } /* reset PML index */ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } /* * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. * Called before reporting dirty_bitmap to userspace. */ static void kvm_flush_pml_buffers(struct kvm *kvm) { int i; struct kvm_vcpu *vcpu; /* * We only need to kick vcpu out of guest mode here, as PML buffer * is flushed at beginning of all VMEXITs, and it's obvious that only * vcpus running in guest are possible to have unflushed GPAs in PML * buffer. */ kvm_for_each_vcpu(i, vcpu, kvm) kvm_vcpu_kick(vcpu); } static void vmx_dump_sel(char *name, uint32_t sel) { pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", name, vmcs_read16(sel), vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); } static void vmx_dump_dtsel(char *name, uint32_t limit) { pr_err("%s limit=0x%08x, base=0x%016lx\n", name, vmcs_read32(limit), vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); } static void dump_vmcs(void) { u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); u32 secondary_exec_control = 0; unsigned long cr4 = vmcs_readl(GUEST_CR4); u64 efer = vmcs_read64(GUEST_IA32_EFER); int i, n; if (cpu_has_secondary_exec_ctrls()) secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); pr_err("*** Guest State ***\n"); pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), vmcs_readl(CR0_GUEST_HOST_MASK)); pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) { pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); } pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmcs_readl(GUEST_SYSENTER_ESP), vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", efer, vmcs_read64(GUEST_IA32_PAT)); pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", vmcs_read64(GUEST_IA32_DEBUGCTL), vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) pr_err("PerfGlobCtl = 0x%016llx\n", vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); pr_err("Interruptibility = %08x ActivityState = %08x\n", vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), vmcs_read32(GUEST_ACTIVITY_STATE)); if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) pr_err("InterruptStatus = %04x\n", vmcs_read16(GUEST_INTR_STATUS)); pr_err("*** Host State ***\n"); pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), vmcs_read16(HOST_TR_SELECTOR)); pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), vmcs_readl(HOST_TR_BASE)); pr_err("GDTBase=%016lx IDTBase=%016lx\n", vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), vmcs_readl(HOST_CR4)); pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmcs_readl(HOST_IA32_SYSENTER_ESP), vmcs_read32(HOST_IA32_SYSENTER_CS), vmcs_readl(HOST_IA32_SYSENTER_EIP)); if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_EFER), vmcs_read64(HOST_IA32_PAT)); if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) pr_err("PerfGlobCtl = 0x%016llx\n", vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); pr_err("*** Control State ***\n"); pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", vmcs_read32(EXCEPTION_BITMAP), vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", vmcs_read32(VM_EXIT_INTR_INFO), vmcs_read32(VM_EXIT_INTR_ERROR_CODE), vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); pr_err(" reason=%08x qualification=%016lx\n", vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); pr_err("IDTVectoring: info=%08x errcode=%08x\n", vmcs_read32(IDT_VECTORING_INFO_FIELD), vmcs_read32(IDT_VECTORING_ERROR_CODE)); pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) pr_err("TSC Multiplier = 0x%016llx\n", vmcs_read64(TSC_MULTIPLIER)); if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); n = vmcs_read32(CR3_TARGET_COUNT); for (i = 0; i + 1 < n; i += 4) pr_err("CR3 target%u=%016lx target%u=%016lx\n", i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); if (i < n) pr_err("CR3 target%u=%016lx\n", i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) pr_err("PLE Gap=%08x Window=%08x\n", vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) pr_err("Virtual processor ID = 0x%04x\n", vmcs_read16(VIRTUAL_PROCESSOR_ID)); } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int vmx_handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); /* * Flush logged GPAs PML buffer, this will make dirty_bitmap more * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before * querying dirty_bitmap, we only need to kick all vcpus out of guest * mode as if vcpus is in root mode, the PML buffer must has been * flushed already. */ if (enable_pml) vmx_flush_pml_buffer(vcpu); /* If guest state is invalid, start emulating */ if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) return nested_vmx_reflect_vmexit(vcpu, exit_reason); if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { dump_vmcs(); vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; return 0; } if (unlikely(vmx->fail)) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); return 0; } /* * Note: * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by * delivery event since it indicates guest is accessing MMIO. * The vm-exit can be triggered again after return to guest that * will cause infinite loop. */ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_PML_FULL && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vectoring_info; vcpu->run->internal.data[1] = exit_reason; vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { vcpu->run->internal.ndata++; vcpu->run->internal.data[3] = vmcs_read64(GUEST_PHYSICAL_ADDRESS); } return 0; } if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (is_guest_mode(vcpu) && nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return; if (irr == -1 || tpr < irr) { vmcs_write32(TPR_THRESHOLD, 0); return; } vmcs_write32(TPR_THRESHOLD, irr); } static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { u32 sec_exec_control; /* Postpone execution until vmcs01 is the current VMCS. */ if (is_guest_mode(vcpu)) { to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; return; } if (!cpu_has_vmx_virtualize_x2apic_mode()) return; if (!cpu_need_tpr_shadow(vcpu)) return; sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); if (set) { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; vmx_flush_tlb_ept_only(vcpu); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); vmx_set_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently we do not handle the nested case where L2 has an * APIC access page of its own; that page is still pinned. * Hence, we skip the case where the VCPU is in guest mode _and_ * L1 prepared an APIC access page for L2. * * For the case where L1 and L2 share the same APIC access page * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear * in the vmcs12), this function will only update either the vmcs01 * or the vmcs02. If the former, the vmcs02 will be updated by * prepare_vmcs02. If the latter, the vmcs01 will be updated in * the next L2->L1 exit. */ if (!is_guest_mode(vcpu) || !nested_cpu_has2(get_vmcs12(&vmx->vcpu), SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmcs_write64(APIC_ACCESS_ADDR, hpa); vmx_flush_tlb_ept_only(vcpu); } } static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) { u16 status; u8 old; if (max_isr == -1) max_isr = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = status >> 8; if (max_isr != old) { status &= 0xff; status |= max_isr << 8; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_set_rvi(int vector) { u16 status; u8 old; if (vector == -1) vector = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = (u8)status & 0xff; if ((u8)vector != old) { status &= ~0xff; status |= (u8)vector; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { if (!is_guest_mode(vcpu)) { vmx_set_rvi(max_irr); return; } if (max_irr == -1) return; /* * In guest mode. If a vmexit is needed, vmx_check_nested_events * handles it. */ if (nested_exit_on_intr(vcpu)) return; /* * Else, fall back to pre-APICv interrupt injection since L2 * is run without virtual interrupt delivery. */ if (!kvm_event_needs_reinjection(vcpu) && vmx_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, max_irr, false); vmx_inject_irq(vcpu); } } static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; WARN_ON(!vcpu->arch.apicv_active); if (pi_test_on(&vmx->pi_desc)) { pi_clear_on(&vmx->pi_desc); /* * IOMMU can write to PIR.ON, so the barrier matters even on UP. * But on x86 this is just a compiler barrier anyway. */ smp_mb__after_atomic(); max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); } else { max_irr = kvm_lapic_find_highest_irr(vcpu); } vmx_hwapic_irr_update(vcpu, max_irr); return max_irr; } static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { if (!kvm_vcpu_apicv_active(vcpu)) return; vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); } static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); pi_clear_on(&vmx->pi_desc); memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); } static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { u32 exit_intr_info = 0; u16 basic_exit_reason = (u16)vmx->exit_reason; if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI)) return; if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); vmx->exit_intr_info = exit_intr_info; /* if exit due to PF check for async PF */ if (is_page_fault(exit_intr_info)) vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); /* Handle machine checks before interrupts are enabled */ if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || is_machine_check(exit_intr_info)) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ if (is_nmi(exit_intr_info)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); } } static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); register void *__sp asm(_ASM_SP); if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { unsigned int vector; unsigned long entry; gate_desc *desc; struct vcpu_vmx *vmx = to_vmx(vcpu); #ifdef CONFIG_X86_64 unsigned long tmp; #endif vector = exit_intr_info & INTR_INFO_VECTOR_MASK; desc = (gate_desc *)vmx->host_idt_base + vector; entry = gate_offset(desc); asm volatile( #ifdef CONFIG_X86_64 "mov %%" _ASM_SP ", %[sp]\n\t" "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" "push $%c[ss]\n\t" "push %[sp]\n\t" #endif "pushf\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" "call *%[entry]\n\t" : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif "+r"(__sp) : [entry]"r"(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); } } STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); static bool vmx_has_high_real_mode_segbase(void) { return enable_unrestricted_guest || emulate_invalid_guest_state; } static bool vmx_mpx_supported(void) { return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); } static bool vmx_xsaves_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_XSAVES; } static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; bool unblock_nmi; u8 vector; bool idtv_info_valid; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; if (vmx->loaded_vmcs->nmi_known_unmasked) return; /* * Can't use vmx->exit_intr_info since we're not sure what * the exit reason is. */ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; vector = exit_intr_info & INTR_INFO_VECTOR_MASK; /* * SDM 3: 27.7.1.2 (September 2008) * Re-set bit "block by NMI" before VM entry if vmexit caused by * a guest IRET fault. * SDM 3: 23.2.2 (September 2008) * Bit 12 is undefined in any of the following cases: * If the VM exit sets the valid bit in the IDT-vectoring * information field. * If the VM exit is due to a double fault. */ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && vector != DF_VECTOR && !idtv_info_valid) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmx->loaded_vmcs->nmi_known_unmasked = !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, u32 idt_vectoring_info, int instr_len_field, int error_code_field) { u8 vector; int type; bool idtv_info_valid; idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); if (!idtv_info_valid) return; kvm_make_request(KVM_REQ_EVENT, vcpu); vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = true; /* * SDM 3: 27.7.1.2 (September 2008) * Clear bit "block by NMI" before VM entry if a NMI * delivery faulted. */ vmx_set_nmi_mask(vcpu, false); break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); kvm_requeue_exception_e(vcpu, vector, err); } else kvm_requeue_exception(vcpu, vector); break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; default: break; } } static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_ERROR_CODE); } static void vmx_cancel_injection(struct kvm_vcpu *vcpu) { __vmx_complete_interrupts(vcpu, vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) { int i, nr_msrs; struct perf_guest_switch_msr *msrs; msrs = perf_guest_get_msrs(&nr_msrs); if (!msrs) return; for (i = 0; i < nr_msrs; i++) if (msrs[i].host == msrs[i].guest) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, msrs[i].host); } static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl; u32 delta_tsc; if (vmx->hv_deadline_tsc == -1) return; tscl = rdtsc(); if (vmx->hv_deadline_tsc > tscl) /* sure to be 32 bit only because checked on set_hv_timer */ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> cpu_preemption_timer_multi); else delta_tsc = 0; vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr3, cr4; /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) return; if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false; vmcs_write32(PLE_WINDOW, vmx->ple_window); } if (vmx->nested.sync_shadow_vmcs) { copy_vmcs12_to_shadow(vmx); vmx->nested.sync_shadow_vmcs = false; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr3 = __get_current_cr3_fast(); if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) { vmcs_writel(HOST_CR3, cr3); vmx->host_state.vmcs_host_cr3 = cr3; } cr4 = cr4_read_shadow(); if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); vmx->host_state.vmcs_host_cr4 = cr4; } /* When single-stepping over STI and MOV SS, we must clear the * corresponding interruptibility bits in the guest state. Otherwise * vmentry fails as it then expects bit 14 (BS) in pending debug * exceptions being set, but that's not correct for the guest debugging * case. */ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vcpu->arch.pkru); atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); vmx_arm_hv_timer(vcpu); vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ "push %%" _ASM_CX " \n\t" "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" "je 1f \n\t" "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ "mov %c[cr2](%0), %%" _ASM_AX " \n\t" "mov %%cr2, %%" _ASM_DX " \n\t" "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" "je 2f \n\t" "mov %%" _ASM_AX", %%cr2 \n\t" "2: \n\t" /* Check if vmlaunch of vmresume is needed */ "cmpl $0, %c[launched](%0) \n\t" /* Load guest registers. Don't clobber flags. */ "mov %c[rax](%0), %%" _ASM_AX " \n\t" "mov %c[rbx](%0), %%" _ASM_BX " \n\t" "mov %c[rdx](%0), %%" _ASM_DX " \n\t" "mov %c[rsi](%0), %%" _ASM_SI " \n\t" "mov %c[rdi](%0), %%" _ASM_DI " \n\t" "mov %c[rbp](%0), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%0), %%r8 \n\t" "mov %c[r9](%0), %%r9 \n\t" "mov %c[r10](%0), %%r10 \n\t" "mov %c[r11](%0), %%r11 \n\t" "mov %c[r12](%0), %%r12 \n\t" "mov %c[r13](%0), %%r13 \n\t" "mov %c[r14](%0), %%r14 \n\t" "mov %c[r15](%0), %%r15 \n\t" #endif "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ /* Enter guest mode */ "jne 1f \n\t" __ex(ASM_VMX_VMLAUNCH) "\n\t" "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%0) \n\t" "mov %%r9, %c[r9](%0) \n\t" "mov %%r10, %c[r10](%0) \n\t" "mov %%r11, %c[r11](%0) \n\t" "mov %%r12, %c[r12](%0) \n\t" "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" ".popsection" : : "c"(vmx), "d"((unsigned long)HOST_RSP), [launched]"i"(offsetof(struct vcpu_vmx, __launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), #ifdef CONFIG_X86_64 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #else , "eax", "ebx", "edi", "esi" #endif ); /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); #ifndef CONFIG_X86_64 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. * * We can't defer this to vmx_load_host_state() since that function * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ loadsegment(ds, __USER_DS); loadsegment(es, __USER_DS); #endif vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_PDPTR) | (1 << VCPU_EXREG_SEGMENTS) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx->loaded_vmcs->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); /* * eager fpu is enabled if PKEY is supported and CR4 is switched * back on host, so it is safe to read guest PKRU from current * XSAVE. */ if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { vcpu->arch.pkru = __read_pkru(); if (vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vmx->host_pkru); } /* * the KVM_REQ_EVENT optimization bit is only on for one entry, and if * we did not inject a still-pending event to L1 now because of * nested_run_pending, we need to re-enable this bit. */ if (vmx->nested.nested_run_pending) kvm_make_request(KVM_REQ_EVENT, vcpu); vmx->nested.nested_run_pending = 0; vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); } STACK_FRAME_NON_STANDARD(vmx_vcpu_run); static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) { struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; if (vmx->loaded_vmcs == vmcs) return; cpu = get_cpu(); vmx->loaded_vmcs = vmcs; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); } /* * Ensure that the current vmcs of the logical processor is the * vmcs01 of the vcpu before calling free_nested(). */ static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vcpu_load(vcpu); BUG_ON(r); vmx_switch_vmcs(vcpu, &vmx->vmcs01); free_nested(vmx); vcpu_put(vcpu); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (enable_pml) vmx_destroy_pml_buffer(vmx); free_vpid(vmx->vpid); leave_guest_mode(vcpu); vmx_free_vcpu_nested(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); int cpu; if (!vmx) return ERR_PTR(-ENOMEM); vmx->vpid = allocate_vpid(); err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) goto free_vcpu; err = -ENOMEM; /* * If PML is turned on, failure on enabling PML just results in failure * of creating the vcpu, therefore we can simplify PML logic (by * avoiding dealing with cases, such as enabling PML partially on vcpus * for the guest, etc. */ if (enable_pml) { vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!vmx->pml_pg) goto uninit_vcpu; } vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) > PAGE_SIZE); if (!vmx->guest_msrs) goto free_pml; vmx->loaded_vmcs = &vmx->vmcs01; vmx->loaded_vmcs->vmcs = alloc_vmcs(); vmx->loaded_vmcs->shadow_vmcs = NULL; if (!vmx->loaded_vmcs->vmcs) goto free_msrs; loaded_vmcs_init(vmx->loaded_vmcs); cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; err = vmx_vcpu_setup(vmx); vmx_vcpu_put(&vmx->vcpu); put_cpu(); if (err) goto free_vmcs; if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { err = alloc_apic_access_page(kvm); if (err) goto free_vmcs; } if (enable_ept) { if (!kvm->arch.ept_identity_map_addr) kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; err = init_rmode_identity_map(kvm); if (err) goto free_vmcs; } if (nested) { nested_vmx_setup_ctls_msrs(vmx); vmx->nested.vpid02 = allocate_vpid(); } vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; return &vmx->vcpu; free_vmcs: free_vpid(vmx->nested.vpid02); free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); free_pml: vmx_destroy_pml_buffer(vmx); uninit_vcpu: kvm_vcpu_uninit(&vmx->vcpu); free_vcpu: free_vpid(vmx->vpid); kmem_cache_free(kvm_vcpu_cache, vmx); return ERR_PTR(err); } static void __init vmx_check_processor_compat(void *rtn) { struct vmcs_config vmcs_conf; *(int *)rtn = 0; if (setup_vmcs_config(&vmcs_conf) < 0) *(int *)rtn = -EIO; if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); *(int *)rtn = -EIO; } } static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u8 cache; u64 ipat = 0; /* For VT-d and EPT combination * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ if (is_mmio) { cache = MTRR_TYPE_UNCACHABLE; goto exit; } if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; } if (kvm_read_cr0(vcpu) & X86_CR0_CD) { ipat = VMX_EPT_IPAT_BIT; if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cache = MTRR_TYPE_WRBACK; else cache = MTRR_TYPE_UNCACHABLE; goto exit; } cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); exit: return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; } static int vmx_get_lpage_level(void) { if (enable_ept && !cpu_has_vmx_ept_1g_page()) return PT_DIRECTORY_LEVEL; else /* For shadow and EPT supported 1GB page */ return PT_PDPE_LEVEL; } static void vmcs_set_secondary_exec_control(u32 new_ctl) { /* * These bits in the secondary execution controls field * are dynamic, the others are mostly based on the hypervisor * architecture and the guest's CPUID. Do not touch the * dynamic bits. */ u32 mask = SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, (new_ctl & ~mask) | (cur_ctl & mask)); } /* * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits * (indicating "allowed-1") if they are supported in the guest's CPUID. */ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_cpuid_entry2 *entry; vmx->nested.nested_vmx_cr0_fixed1 = 0xffffffff; vmx->nested.nested_vmx_cr4_fixed1 = X86_CR4_PCE; #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ if (entry && (entry->_reg & (_cpuid_mask))) \ vmx->nested.nested_vmx_cr4_fixed1 |= (_cr4_mask); \ } while (0) entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME)); cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME)); cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC)); cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE)); cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE)); cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE)); cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE)); cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE)); cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR)); cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM)); cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX)); cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX)); cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID)); cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE)); entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE)); cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP)); cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */ cr4_fixed1_update(bit(11), ecx, bit(2)); #undef cr4_fixed1_update } static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (cpu_has_secondary_exec_ctrls()) { vmx_compute_secondary_exec_control(vmx); vmcs_set_secondary_exec_control(vmx->secondary_exec_control); } if (nested_vmx_allowed(vcpu)) to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; else to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; if (nested_vmx_allowed(vcpu)) nested_vmx_cr_fixed1_bits_update(vcpu); } static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { if (func == 1 && nested) entry->ecx |= bit(X86_FEATURE_VMX); } static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exit_reason; unsigned long exit_qualification = vcpu->arch.exit_qualification; if (vmx->nested.pml_full) { exit_reason = EXIT_REASON_PML_FULL; vmx->nested.pml_full = false; exit_qualification &= INTR_INFO_UNBLOCK_NMI; } else if (fault->error_code & PFERR_RSVD_MASK) exit_reason = EXIT_REASON_EPT_MISCONFIG; else exit_reason = EXIT_REASON_EPT_VIOLATION; nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); vmcs12->guest_physical_address = fault->address; } static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) { return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT; } /* Callbacks for nested_ept_init_mmu_context: */ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) { /* return the page table to be shadowed - in our case, EPT12 */ return get_vmcs12(vcpu)->ept_pointer; } static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) return 1; kvm_mmu_unload(vcpu); kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT, nested_ept_ad_enabled(vcpu)); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; return 0; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code) { bool inequality, bit; bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; inequality = (error_code & vmcs12->page_fault_error_code_mask) != vmcs12->page_fault_error_code_match; return inequality ^ bit; } static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); WARN_ON(!is_guest_mode(vcpu)); if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) { vmcs12->vm_exit_intr_error_code = fault->error_code; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, fault->address); } else { kvm_inject_page_fault(vcpu, fault); } } static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12); static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct page *page; u64 hpa; if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { /* * Translate L1 physical address to host physical * address for vmcs02. Keep the page pinned, so this * physical address remains valid. We keep a reference * to it so we can release it later. */ if (vmx->nested.apic_access_page) { /* shouldn't happen */ kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); /* * If translation failed, no matter: This feature asks * to exit when accessing the given address, and if it * can never be accessed, this feature won't do * anything anyway. */ if (!is_error_page(page)) { vmx->nested.apic_access_page = page; hpa = page_to_phys(vmx->nested.apic_access_page); vmcs_write64(APIC_ACCESS_ADDR, hpa); } else { vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); } } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) && cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); kvm_vcpu_reload_apic_access_page(vcpu); } if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr); /* * If translation failed, VM entry will fail because * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. * Failing the vm entry is _not_ what the processor * does but it's basically the only possibility we * have. We could still enter the guest if CR8 load * exits are enabled, CR8 store exits are enabled, and * virtualize APIC access is disabled; in this case * the processor would never use the TPR shadow and we * could simply clear the bit from the execution * control. But such a configuration is useless, so * let's keep the code simple. */ if (!is_error_page(page)) { vmx->nested.virtual_apic_page = page; hpa = page_to_phys(vmx->nested.virtual_apic_page); vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); } } if (nested_cpu_has_posted_intr(vmcs12)) { if (vmx->nested.pi_desc_page) { /* shouldn't happen */ kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); if (is_error_page(page)) return; vmx->nested.pi_desc_page = page; vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); vmx->nested.pi_desc = (struct pi_desc *)((void *)vmx->nested.pi_desc + (unsigned long)(vmcs12->posted_intr_desc_addr & (PAGE_SIZE - 1))); vmcs_write64(POSTED_INTR_DESC_ADDR, page_to_phys(vmx->nested.pi_desc_page) + (unsigned long)(vmcs12->posted_intr_desc_addr & (PAGE_SIZE - 1))); } if (cpu_has_vmx_msr_bitmap() && nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) ; else vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_USE_MSR_BITMAPS); } static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) { u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; struct vcpu_vmx *vmx = to_vmx(vcpu); if (vcpu->arch.virtual_tsc_khz == 0) return; /* Make sure short timeouts reliably trigger an immediate vmexit. * hrtimer_start does not guarantee this. */ if (preemption_timeout <= 1) { vmx_preemption_timer_fn(&vmx->nested.preemption_timer); return; } preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; preemption_timeout *= 1000000; do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); hrtimer_start(&vmx->nested.preemption_timer, ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); } static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) return 0; if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) || !page_address_valid(vcpu, vmcs12->io_bitmap_b)) return -EINVAL; return 0; } static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return 0; if (!page_address_valid(vcpu, vmcs12->msr_bitmap)) return -EINVAL; return 0; } static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return 0; if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) return -EINVAL; return 0; } /* * Merge L0's and L1's MSR bitmap, return false to indicate that * we do not use the hardware. */ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { int msr; struct page *page; unsigned long *msr_bitmap_l1; unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; /* This shortcut is ok because we support only x2APIC MSRs so far. */ if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) return false; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); if (is_error_page(page)) return false; msr_bitmap_l1 = (unsigned long *)kmap(page); memset(msr_bitmap_l0, 0xff, PAGE_SIZE); if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { if (nested_cpu_has_apic_reg_virt(vmcs12)) for (msr = 0x800; msr <= 0x8ff; msr++) nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, msr, MSR_TYPE_R); nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, APIC_BASE_MSR + (APIC_TASKPRI >> 4), MSR_TYPE_R | MSR_TYPE_W); if (nested_cpu_has_vid(vmcs12)) { nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, APIC_BASE_MSR + (APIC_EOI >> 4), MSR_TYPE_W); nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, APIC_BASE_MSR + (APIC_SELF_IPI >> 4), MSR_TYPE_W); } } kunmap(page); kvm_release_page_clean(page); return true; } static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && !nested_cpu_has_apic_reg_virt(vmcs12) && !nested_cpu_has_vid(vmcs12) && !nested_cpu_has_posted_intr(vmcs12)) return 0; /* * If virtualize x2apic mode is enabled, * virtualize apic access must be disabled. */ if (nested_cpu_has_virt_x2apic_mode(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) return -EINVAL; /* * If virtual interrupt delivery is enabled, * we must exit on external interrupts. */ if (nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)) return -EINVAL; /* * bits 15:8 should be zero in posted_intr_nv, * the descriptor address has been already checked * in nested_get_vmcs12_pages. */ if (nested_cpu_has_posted_intr(vmcs12) && (!nested_cpu_has_vid(vmcs12) || !nested_exit_intr_ack_set(vcpu) || vmcs12->posted_intr_nv & 0xff00)) return -EINVAL; /* tpr shadow is needed by all apicv features. */ if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return -EINVAL; return 0; } static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, unsigned long count_field, unsigned long addr_field) { int maxphyaddr; u64 count, addr; if (vmcs12_read_any(vcpu, count_field, &count) || vmcs12_read_any(vcpu, addr_field, &addr)) { WARN_ON(1); return -EINVAL; } if (count == 0) return 0; maxphyaddr = cpuid_maxphyaddr(vcpu); if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { pr_debug_ratelimited( "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)", addr_field, maxphyaddr, count, addr); return -EINVAL; } return 0; } static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (vmcs12->vm_exit_msr_load_count == 0 && vmcs12->vm_exit_msr_store_count == 0 && vmcs12->vm_entry_msr_load_count == 0) return 0; /* Fast path */ if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, VM_EXIT_MSR_LOAD_ADDR) || nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, VM_EXIT_MSR_STORE_ADDR) || nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, VM_ENTRY_MSR_LOAD_ADDR)) return -EINVAL; return 0; } static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u64 address = vmcs12->pml_address; int maxphyaddr = cpuid_maxphyaddr(vcpu); if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) { if (!nested_cpu_has_ept(vmcs12) || !IS_ALIGNED(address, 4096) || address >> maxphyaddr) return -EINVAL; } return 0; } static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { /* x2APIC MSR accesses are not allowed */ if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) return -EINVAL; if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ e->index == MSR_IA32_UCODE_REV) return -EINVAL; if (e->reserved != 0) return -EINVAL; return 0; } static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { if (e->index == MSR_FS_BASE || e->index == MSR_GS_BASE || e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ nested_vmx_msr_check_common(vcpu, e)) return -EINVAL; return 0; } static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ nested_vmx_msr_check_common(vcpu, e)) return -EINVAL; return 0; } /* * Load guest's/host's msr at nested entry/exit. * return 0 for success, entry index for failure. */ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) { u32 i; struct vmx_msr_entry e; struct msr_data msr; msr.host_initiated = false; for (i = 0; i < count; i++) { if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), &e, sizeof(e))) { pr_debug_ratelimited( "%s cannot read MSR entry (%u, 0x%08llx)\n", __func__, i, gpa + i * sizeof(e)); goto fail; } if (nested_vmx_load_msr_check(vcpu, &e)) { pr_debug_ratelimited( "%s check failed (%u, 0x%x, 0x%x)\n", __func__, i, e.index, e.reserved); goto fail; } msr.index = e.index; msr.data = e.value; if (kvm_set_msr(vcpu, &msr)) { pr_debug_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", __func__, i, e.index, e.value); goto fail; } } return 0; fail: return i + 1; } static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) { u32 i; struct vmx_msr_entry e; for (i = 0; i < count; i++) { struct msr_data msr_info; if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), &e, 2 * sizeof(u32))) { pr_debug_ratelimited( "%s cannot read MSR entry (%u, 0x%08llx)\n", __func__, i, gpa + i * sizeof(e)); return -EINVAL; } if (nested_vmx_store_msr_check(vcpu, &e)) { pr_debug_ratelimited( "%s check failed (%u, 0x%x, 0x%x)\n", __func__, i, e.index, e.reserved); return -EINVAL; } msr_info.host_initiated = false; msr_info.index = e.index; if (kvm_get_msr(vcpu, &msr_info)) { pr_debug_ratelimited( "%s cannot read MSR (%u, 0x%x)\n", __func__, i, e.index); return -EINVAL; } if (kvm_vcpu_write_guest(vcpu, gpa + i * sizeof(e) + offsetof(struct vmx_msr_entry, value), &msr_info.data, sizeof(msr_info.data))) { pr_debug_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", __func__, i, e.index, msr_info.data); return -EINVAL; } } return 0; } static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) { unsigned long invalid_mask; invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); return (val & invalid_mask) == 0; } /* * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are * emulating VM entry into a guest with EPT enabled. * Returns 0 on success, 1 on failure. Invalid state exit qualification code * is assigned to entry_failure_code on failure. */ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, u32 *entry_failure_code) { if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { if (!nested_cr3_valid(vcpu, cr3)) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return 1; } /* * If PAE paging and EPT are both on, CR3 is not used by the CPU and * must not be dereferenced. */ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && !nested_ept) { if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { *entry_failure_code = ENTRY_FAIL_PDPTE; return 1; } } vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } kvm_mmu_reset_context(vcpu); return 0; } /* * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 * guest in a way that will both be appropriate to L1's requests, and our * needs. In addition to modifying the active vmcs (which is vmcs02), this * function also has additional necessary side-effects, like setting various * vcpu->arch fields. * Returns 0 on success, 1 on failure. Invalid state exit qualification code * is assigned to entry_failure_code on failure. */ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool from_vmentry, u32 *entry_failure_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); } else { kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } if (from_vmentry) { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, vmcs12->vm_entry_exception_error_code); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmcs12->vm_entry_instruction_len); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs12->guest_interruptibility_info); vmx->loaded_vmcs->nmi_known_unmasked = !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); } else { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs12->guest_pending_dbg_exceptions); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); if (nested_cpu_has_xsaves(vmcs12)) vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); vmcs_write64(VMCS_LINK_POINTER, -1ull); exec_control = vmcs12->pin_based_vm_exec_control; /* Preemption timer setting is only taken from vmcs01. */ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; exec_control |= vmcs_config.pin_based_exec_ctrl; if (vmx->hv_deadline_tsc == -1) exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; /* Posted interrupts setting is only taken from vmcs12. */ if (nested_cpu_has_posted_intr(vmcs12)) { vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; vmx->nested.pi_pending = false; vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); } else { exec_control &= ~PIN_BASED_POSTED_INTR; } vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); vmx->nested.preemption_timer_expired = false; if (nested_cpu_has_preemption_timer(vmcs12)) vmx_start_preemption_timer(vcpu); /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx->secondary_exec_control; /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_ENABLE_VMFUNC); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & ~SECONDARY_EXEC_ENABLE_PML; exec_control |= vmcs12_exec_ctrl; } /* All VMFUNCs are currently emulated through L0 vmexits. */ if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC) vmcs_write64(VM_FUNCTION_CONTROL, 0); if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); vmcs_write16(GUEST_INTR_STATUS, vmcs12->guest_intr_status); } /* * Write an illegal value to APIC_ACCESS_ADDR. Later, * nested_get_vmcs12_pages will either fix it up or * remove the VM execution control. */ if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) vmcs_write64(APIC_ACCESS_ADDR, -1ull); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } /* * Set host-state according to L0's settings (vmcs12 is irrelevant here) * Some constant fields are set here by vmx_set_constant_host_state(). * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ vmx_set_constant_host_state(vmx); /* * Set the MSR load/store lists to match L0's settings. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before * entry, but only if the current (host) sp changed from the value * we wrote last (vmx->host_rsp). This cache is no longer relevant * if we switch vmcs, and rather than hold a separate cache per vmcs, * here we just force the write to happen on entry. */ vmx->host_rsp = 0; exec_control = vmx_exec_control(vmx); /* L0's desires */ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; /* * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if * nested_get_vmcs12_pages can't fix it up, the illegal value * will result in a VM entry failure. */ if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); } else { #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING; #endif } /* * Merging of IO bitmap not currently supported. * Rather, exit every time. */ exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* L2->L1 exit controls are emulated - the hardware exit is to L0 so * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. */ vm_entry_controls_init(vmx, (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vcpu->arch.pat = vmcs12->guest_ia32_pat; } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); } set_cr4_guest_host_mask(vmx); if (from_vmentry && vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset + vmcs12->tsc_offset); else vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the * vpid02 is per-vCPU for L0 and reused while the value of * vpid12 is changed w/ one invvpid during nested vmentry. * The vpid12 is allocated by L1 for L2, so it will not * influence global bitmap(for vpid01 and vpid02 allocation) * even if spawn a lot of nested vCPUs. */ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); } } else { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx_flush_tlb(vcpu); } } if (enable_pml) { /* * Conceptually we want to copy the PML address and index from * vmcs01 here, and then back to vmcs01 on nested vmexit. But, * since we always flush the log on each vmexit, this happens * to be equivalent to simply resetting the fields in vmcs02. */ ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } if (nested_cpu_has_ept(vmcs12)) { if (nested_ept_init_mmu_context(vcpu)) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return 1; } } else if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmx_flush_tlb_ept_only(vcpu); } /* * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those * bits which we consider mandatory enabled. * The CR0_READ_SHADOW is what L2 should have expected to read given * the specifications by L1; It's not enough to take * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we * have more bits than L1 expected. */ vmx_set_cr0(vcpu, vmcs12->guest_cr0); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* Shadow page tables on either EPT or shadow page tables. */ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), entry_failure_code)) return 1; if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; /* * L1 may access the L2's PDPTR, so save them to construct vmcs12 */ if (enable_ept) { vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); } kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); return 0; } static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_pml_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high) || (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && !vmx_control_verify(vmcs12->secondary_vm_exec_control, vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high)) || !vmx_control_verify(vmcs12->pin_based_vm_exec_control, vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high) || !vmx_control_verify(vmcs12->vm_exit_controls, vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high) || !vmx_control_verify(vmcs12->vm_entry_controls, vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_cpu_has_vmfunc(vmcs12)) { if (vmcs12->vm_function_control & ~vmx->nested.nested_vmx_vmfunc_controls) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_cpu_has_eptp_switching(vmcs12)) { if (!nested_cpu_has_ept(vmcs12) || !page_address_valid(vcpu, vmcs12->eptp_list_address)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; } } if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || !nested_cr3_valid(vcpu, vmcs12->host_cr3)) return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; return 0; } static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 *exit_qual) { bool ia32e; *exit_qual = ENTRY_FAIL_DEFAULT; if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) || !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) return 1; if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS) && vmcs12->vmcs_link_pointer != -1ull) { *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; return 1; } /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: * - Bits reserved in the IA32_EFER MSR must be 0. * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of * the IA-32e mode guest VM-exit control. It must also be identical * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to * CR0.PG) is 1. */ if (to_vmx(vcpu)->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || ((vmcs12->guest_cr0 & X86_CR0_PG) && ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) return 1; } /* * If the load IA32_EFER VM-exit control is 1, bits reserved in the * IA32_EFER MSR must be 0 in the field for that register. In addition, * the values of the LMA and LME bits in the field must each be that of * the host address-space size VM-exit control. */ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) return 1; } return 0; } static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct loaded_vmcs *vmcs02; u32 msr_entry_idx; u32 exit_qual; vmcs02 = nested_get_current_vmcs02(vmx); if (!vmcs02) return -ENOMEM; enter_guest_mode(vcpu); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); vmx_switch_vmcs(vcpu, vmcs02); vmx_segment_cache_clear(vmx); if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { leave_guest_mode(vcpu); vmx_switch_vmcs(vcpu, &vmx->vmcs01); nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, exit_qual); return 1; } nested_get_vmcs12_pages(vcpu, vmcs12); msr_entry_idx = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); if (msr_entry_idx) { leave_guest_mode(vcpu); vmx_switch_vmcs(vcpu, &vmx->vmcs01); nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); return 1; } /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet * returned as far as L1 is concerned. It will only return (and set * the success flag) when L2 exits (see nested_vmx_vmexit()). */ return 0; } /* * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 * for running an L2 nested guest. */ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); u32 exit_qual; int ret; if (!nested_vmx_check_permission(vcpu)) return 1; if (!nested_vmx_check_vmcs12(vcpu)) goto out; vmcs12 = get_vmcs12(vcpu); if (enable_shadow_vmcs) copy_shadow_to_vmcs12(vmx); /* * The nested entry process starts with enforcing various prerequisites * on vmcs12 as required by the Intel SDM, and act appropriately when * they fail: As the SDM explains, some conditions should cause the * instruction to fail, while others will cause the instruction to seem * to succeed, but return an EXIT_REASON_INVALID_STATE. * To speed up the normal (success) code path, we should avoid checking * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); goto out; } if (vmcs12->launch_state == launch) { nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); goto out; } ret = check_vmentry_prereqs(vcpu, vmcs12); if (ret) { nested_vmx_failValid(vcpu, ret); goto out; } /* * After this point, the trap flag no longer triggers a singlestep trap * on the vm entry instructions; don't call kvm_skip_emulated_instruction. * This is not 100% correct; for performance reasons, we delegate most * of the checks on host state to the processor. If those fail, * the singlestep trap is missed. */ skip_emulated_instruction(vcpu); ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual); if (ret) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, exit_qual); return 1; } /* * We're finally done with prerequisite checking, and can start with * the nested entry. */ ret = enter_vmx_non_root_mode(vcpu, true); if (ret) return ret; if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) return kvm_vcpu_halt(vcpu); vmx->nested.nested_run_pending = 1; return 1; out: return kvm_skip_emulated_instruction(vcpu); } /* * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). * This function returns the new value we should put in vmcs12.guest_cr0. * It's not enough to just return the vmcs02 GUEST_CR0. Rather, * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 * didn't trap the bit, because if L1 did, so would L0). * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have * been modified by L2, and L1 knows it. So just leave the old value of * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 * isn't relevant, because if L0 traps this bit it can set it to anything. * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have * changed these bits, and therefore they need to be updated, but L0 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. */ static inline unsigned long vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | vcpu->arch.cr0_guest_owned_bits)); } static inline unsigned long vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | vcpu->arch.cr4_guest_owned_bits)); } static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u32 idt_vectoring; unsigned int nr; if (vcpu->arch.exception.injected) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (kvm_exception_is_soft(nr)) { vmcs12->vm_exit_instruction_len = vcpu->arch.event_exit_inst_len; idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; } else idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; if (vcpu->arch.exception.has_error_code) { idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; vmcs12->idt_vectoring_error_code = vcpu->arch.exception.error_code; } vmcs12->idt_vectoring_info_field = idt_vectoring; } else if (vcpu->arch.nmi_injected) { vmcs12->idt_vectoring_info_field = INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; } else if (vcpu->arch.interrupt.pending) { nr = vcpu->arch.interrupt.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { idt_vectoring |= INTR_TYPE_SOFT_INTR; vmcs12->vm_entry_instruction_len = vcpu->arch.event_exit_inst_len; } else idt_vectoring |= INTR_TYPE_EXT_INTR; vmcs12->idt_vectoring_info_field = idt_vectoring; } } static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qual; if (kvm_event_needs_reinjection(vcpu)) return -EBUSY; if (vcpu->arch.exception.pending && nested_vmx_check_exception(vcpu, &exit_qual)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); vcpu->arch.exception.pending = false; return 0; } if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK, 0); /* * The NMI-triggered VM exit counts as injection: * clear this one and block further NMIs. */ vcpu->arch.nmi_pending = 0; vmx_set_nmi_mask(vcpu, true); return 0; } if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); return 0; } vmx_complete_nested_posted_interrupt(vcpu); return 0; } static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); u64 value; if (ktime_to_ns(remaining) <= 0) return 0; value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; do_div(value, 1000000); return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; } /* * Update the guest state fields of vmcs12 to reflect changes that * occurred while L2 was running. (The "IA-32e mode guest" bit of the * VM-entry controls is also updated, since this is really a guest * state bit.) */ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); vmcs12->guest_interruptibility_info = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; else vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; if (nested_cpu_has_preemption_timer(vmcs12)) { if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) vmcs12->vmx_preemption_timer_value = vmx_get_preemption_timer_value(vcpu); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); } /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. * * Additionally, restore L2's PDPTR to vmcs12. */ if (enable_ept) { vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); } vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); if (nested_cpu_has_vid(vmcs12)) vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); } /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); if (kvm_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); } /* * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), * and this function updates it to reflect the changes to the guest state while * L2 was running (and perhaps made some exits which were handled directly by L0 * without going back to L1), and to reflect the exit reason. * Note that we do not have to copy here all VMCS fields, just those that * could have changed by the L2 guest or the exit - i.e., the guest-state and * exit-information fields only. Other fields are modified by L1 with VMWRITE, * which already writes to vmcs12 directly. */ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { /* update guest state fields: */ sync_vmcs12(vcpu, vmcs12); /* update exit information fields: */ vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { vmcs12->launch_state = 1; /* vm_entry_intr_info_field is cleared on exit. Emulate this * instead of reading the real value. */ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; /* * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ vmcs12_save_pending_event(vcpu, vmcs12); } /* * Drop what we picked up for L2 via vmx_complete_interrupts. It is * preserved above and would only end up incorrectly in L1. */ vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); } /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified * in vmcs12. * This function is to be called not only on normal nested exit, but also on * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry * Failures During or After Loading Guest State"). * This function should be called when the active VMCS is L1's (vmcs01). */ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); vmx_set_efer(vcpu, vcpu->arch.efer); kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); /* * Note that calling vmx_set_cr0 is important, even if cr0 hasn't * actually changed, because vmx_set_cr0 refers to efer set above. * * CR0_GUEST_HOST_MASK is already set in the original vmcs01 * (KVM doesn't change it); */ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; vmx_set_cr0(vcpu, vmcs12->host_cr0); /* Same as above - no reason to call set_cr4_guest_host_mask(). */ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); nested_ept_uninit_mmu_context(vcpu); /* * Only PDPTE load can fail as the value of cr3 was checked on entry and * couldn't have changed. */ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ vmx_flush_tlb(vcpu); } /* Restore posted intr vector. */ if (nested_cpu_has_posted_intr(vmcs12)) vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, 0); if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); vcpu->arch.pat = vmcs12->host_ia32_pat; } if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, vmcs12->host_ia32_perf_global_ctrl); /* Set L1 segment info according to Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers */ seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .selector = vmcs12->host_cs_selector, .type = 11, .present = 1, .s = 1, .g = 1 }; if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) seg.l = 1; else seg.db = 1; vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .type = 3, .present = 1, .s = 1, .db = 1, .g = 1 }; seg.selector = vmcs12->host_ds_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); seg.selector = vmcs12->host_es_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); seg.selector = vmcs12->host_ss_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); seg.selector = vmcs12->host_fs_selector; seg.base = vmcs12->host_fs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); seg.selector = vmcs12->host_gs_selector; seg.base = vmcs12->host_gs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); seg = (struct kvm_segment) { .base = vmcs12->host_tr_base, .limit = 0x67, .selector = vmcs12->host_tr_selector, .type = 11, .present = 1 }; vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); kvm_set_dr(vcpu, 7, 0x400); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(vcpu); if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); } /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) */ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 vm_inst_error = 0; /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); leave_guest_mode(vcpu); prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, exit_qualification); if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, vmcs12->vm_exit_msr_store_count)) nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); if (unlikely(vmx->fail)) vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR); vmx_switch_vmcs(vcpu, &vmx->vmcs01); /* * TODO: SDM says that with acknowledge interrupt on exit, bit 31 of * the VM-exit interrupt information (valid interrupt) is always set to * 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need * kvm_cpu_has_interrupt(). See the commit message for details. */ if (nested_exit_intr_ack_set(vcpu) && exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && kvm_cpu_has_interrupt(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; } trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, vmcs12->exit_qualification, vmcs12->idt_vectoring_info_field, vmcs12->vm_exit_intr_info, vmcs12->vm_exit_intr_error_code, KVM_ISA_VMX); vm_entry_controls_reset_shadow(vmx); vm_exit_controls_reset_shadow(vmx); vmx_segment_cache_clear(vmx); /* if no vmcs02 cache requested, remove the one we used */ if (VMCS02_POOL_SIZE == 0) nested_free_vmcs02(vmx, vmx->nested.current_vmptr); load_vmcs12_host_state(vcpu, vmcs12); /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); if (vmx->hv_deadline_tsc == -1) vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); else vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { vmx->nested.change_vmcs01_virtual_x2apic_mode = false; vmx_set_virtual_x2apic_mode(vcpu, vcpu->arch.apic_base & X2APIC_ENABLE); } else if (!nested_cpu_has_ept(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmx_flush_tlb_ept_only(vcpu); } /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } /* * We are now running in L2, mmu_notifier will force to reload the * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. */ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); /* * Exiting from L2 to L1, we're now back to L1 which thinks it just * finished a VMLAUNCH or VMRESUME instruction, so we need to set the * success or failure flag accordingly. */ if (unlikely(vmx->fail)) { vmx->fail = 0; nested_vmx_failValid(vcpu, vm_inst_error); } else nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } /* * Forcibly leave nested mode in order to be able to reset the VCPU later on. */ static void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { to_vmx(vcpu)->nested.nested_run_pending = 0; nested_vmx_vmexit(vcpu, -1, 0, 0); } free_nested(to_vmx(vcpu)); } /* * L1's failure to enter L2 is a subset of a normal exit, as explained in * 23.7 "VM-entry failures during or after loading guest state" (this also * lists the acceptable exit-reason and exit-qualification parameters). * It should only be called before L2 actually succeeded to run, and when * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). */ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification) { load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = qualification; nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) to_vmx(vcpu)->nested.sync_shadow_vmcs = true; } static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return X86EMUL_CONTINUE; } #ifdef CONFIG_X86_64 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */ static inline int u64_shl_div_u64(u64 a, unsigned int shift, u64 divisor, u64 *result) { u64 low = a << shift, high = a >> (64 - shift); /* To avoid the overflow on divq */ if (high >= divisor) return 1; /* Low hold the result, high hold rem which is discarded */ asm("divq %2\n\t" : "=a" (low), "=d" (high) : "rm" (divisor), "0" (low), "1" (high)); *result = low; return 0; } static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl = rdtsc(); u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; /* Convert to host delta tsc if tsc scaling is enabled */ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && u64_shl_div_u64(delta_tsc, kvm_tsc_scaling_ratio_frac_bits, vcpu->arch.tsc_scaling_ratio, &delta_tsc)) return -ERANGE; /* * If the delta tsc can't fit in the 32 bit after the multi shift, * we can't use the preemption timer. * It's possible that it fits on later vmentries, but checking * on every vmentry is costly so we just use an hrtimer. */ if (delta_tsc >> (cpu_preemption_timer_multi + 32)) return -ERANGE; vmx->hv_deadline_tsc = tscl + delta_tsc; vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); return delta_tsc == 0; } static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); vmx->hv_deadline_tsc = -1; vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); } #endif static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) { if (ple_gap) shrink_ple_window(vcpu); } static void vmx_slot_enable_log_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_slot_leaf_clear_dirty(kvm, slot); kvm_mmu_slot_largepage_remove_write_access(kvm, slot); } static void vmx_slot_disable_log_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_slot_set_dirty(kvm, slot); } static void vmx_flush_log_dirty(struct kvm *kvm) { kvm_flush_pml_buffers(kvm); } static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t gpa; struct page *page = NULL; u64 *pml_address; if (is_guest_mode(vcpu)) { WARN_ON_ONCE(vmx->nested.pml_full); /* * Check if PML is enabled for the nested guest. * Whether eptp bit 6 is set is already checked * as part of A/D emulation. */ vmcs12 = get_vmcs12(vcpu); if (!nested_cpu_has_pml(vmcs12)) return 0; if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { vmx->nested.pml_full = true; return 1; } gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); if (is_error_page(page)) return 0; pml_address = kmap(page); pml_address[vmcs12->guest_pml_index--] = gpa; kunmap(page); kvm_release_page_clean(page); } return 0; } static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t offset, unsigned long mask) { kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); } /* * This routine does the following things for vCPU which is going * to be blocked if VT-d PI is enabled. * - Store the vCPU to the wakeup list, so when interrupts happen * we can find the right vCPU to wake up. * - Change the Posted-interrupt descriptor as below: * 'NDST' <-- vcpu->pre_pcpu * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR * - If 'ON' is set during this process, which means at least one * interrupt is posted for this vCPU, we cannot block it, in * this case, return 1, otherwise, return 0. * */ static int pi_pre_block(struct kvm_vcpu *vcpu) { unsigned long flags; unsigned int dest; struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return 0; vcpu->pre_pcpu = vcpu->cpu; spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_add_tail(&vcpu->blocked_vcpu_list, &per_cpu(blocked_vcpu_on_cpu, vcpu->pre_pcpu)); spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); do { old.control = new.control = pi_desc->control; /* * We should not block the vCPU if * an interrupt is posted for it. */ if (pi_test_on(pi_desc) == 1) { spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_del(&vcpu->blocked_vcpu_list); spin_unlock_irqrestore( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); vcpu->pre_pcpu = -1; return 1; } WARN((pi_desc->sn == 1), "Warning: SN field of posted-interrupts " "is set before blocking\n"); /* * Since vCPU can be preempted during this process, * vcpu->cpu could be different with pre_pcpu, we * need to set pre_pcpu as the destination of wakeup * notification event, then we can find the right vCPU * to wakeup in wakeup handler if interrupts happen * when the vCPU is in blocked state. */ dest = cpu_physical_id(vcpu->pre_pcpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; /* set 'NV' to 'wakeup vector' */ new.nv = POSTED_INTR_WAKEUP_VECTOR; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); return 0; } static int vmx_pre_block(struct kvm_vcpu *vcpu) { if (pi_pre_block(vcpu)) return 1; if (kvm_lapic_hv_timer_in_use(vcpu)) kvm_lapic_switch_to_sw_timer(vcpu); return 0; } static void pi_post_block(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); struct pi_desc old, new; unsigned int dest; unsigned long flags; if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return; do { old.control = new.control = pi_desc->control; dest = cpu_physical_id(vcpu->cpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; /* Allow posting non-urgent interrupts */ new.sn = 0; /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); if(vcpu->pre_pcpu != -1) { spin_lock_irqsave( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_del(&vcpu->blocked_vcpu_list); spin_unlock_irqrestore( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); vcpu->pre_pcpu = -1; } } static void vmx_post_block(struct kvm_vcpu *vcpu) { if (kvm_x86_ops->set_hv_timer) kvm_lapic_switch_to_hv_timer(vcpu); pi_post_block(vcpu); } /* * vmx_update_pi_irte - set IRTE for Posted-Interrupts * * @kvm: kvm * @host_irq: host irq of the interrupt * @guest_irq: gsi of the interrupt * @set: set or unset PI * returns 0 on success, < 0 on failure */ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; int idx, ret = -EINVAL; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(kvm->vcpus[0])) return 0; idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); BUG_ON(guest_irq >= irq_rt->nr_rt_entries); hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { if (e->type != KVM_IRQ_ROUTING_MSI) continue; /* * VT-d PI cannot support posting multicast/broadcast * interrupts to a vCPU, we still use interrupt remapping * for these kind of interrupts. * * For lowest-priority interrupts, we only support * those with single CPU as the destination, e.g. user * configures the interrupts via /proc/irq or uses * irqbalance to make the interrupts single-CPU. * * We will support full lowest-priority interrupt later. */ kvm_set_msi_irq(kvm, e, &irq); if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { /* * Make sure the IRTE is in remapped mode if * we don't handle it in posted mode. */ ret = irq_set_vcpu_affinity(host_irq, NULL); if (ret < 0) { printk(KERN_INFO "failed to back to remapped mode, irq: %u\n", host_irq); goto out; } continue; } vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); else { /* suppress notification event before unposting */ pi_set_sn(vcpu_to_pi_desc(vcpu)); ret = irq_set_vcpu_affinity(host_irq, NULL); pi_clear_sn(vcpu_to_pi_desc(vcpu)); } if (ret < 0) { printk(KERN_INFO "%s: failed to update PI IRTE\n", __func__); goto out; } } ret = 0; out: srcu_read_unlock(&kvm->irq_srcu, idx); return ret; } static void vmx_setup_mce(struct kvm_vcpu *vcpu) { if (vcpu->arch.mcg_cap & MCG_LMCE_P) to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= FEATURE_CONTROL_LMCE; else to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= ~FEATURE_CONTROL_LMCE; } static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, .hardware_unsetup = hardware_unsetup, .check_processor_compatibility = vmx_check_processor_compat, .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, .vcpu_reset = vmx_vcpu_reset, .prepare_guest_switch = vmx_save_host_state, .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, .update_bp_intercept = update_exception_bitmap, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, .get_segment = vmx_get_segment, .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr3 = vmx_decache_cr3, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, .set_cr4 = vmx_set_cr4, .set_efer = vmx_set_efer, .get_idt = vmx_get_idt, .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, .get_dr6 = vmx_get_dr6, .set_dr6 = vmx_set_dr6, .set_dr7 = vmx_set_dr7, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, .handle_exit = vmx_handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, .patch_hypercall = vmx_patch_hypercall, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, .cancel_injection = vmx_cancel_injection, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .get_enable_apicv = vmx_get_enable_apicv, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, .load_eoi_exitmap = vmx_load_eoi_exitmap, .apicv_post_state_restore = vmx_apicv_post_state_restore, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_mt_mask = vmx_get_mt_mask, .get_exit_info = vmx_get_exit_info, .get_lpage_level = vmx_get_lpage_level, .cpuid_update = vmx_cpuid_update, .rdtscp_supported = vmx_rdtscp_supported, .invpcid_supported = vmx_invpcid_supported, .set_supported_cpuid = vmx_set_supported_cpuid, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .write_tsc_offset = vmx_write_tsc_offset, .set_tdp_cr3 = vmx_set_cr3, .check_intercept = vmx_check_intercept, .handle_external_intr = vmx_handle_external_intr, .mpx_supported = vmx_mpx_supported, .xsaves_supported = vmx_xsaves_supported, .check_nested_events = vmx_check_nested_events, .sched_in = vmx_sched_in, .slot_enable_log_dirty = vmx_slot_enable_log_dirty, .slot_disable_log_dirty = vmx_slot_disable_log_dirty, .flush_log_dirty = vmx_flush_log_dirty, .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, .write_log_dirty = vmx_write_pml_buffer, .pre_block = vmx_pre_block, .post_block = vmx_post_block, .pmu_ops = &intel_pmu_ops, .update_pi_irte = vmx_update_pi_irte, #ifdef CONFIG_X86_64 .set_hv_timer = vmx_set_hv_timer, .cancel_hv_timer = vmx_cancel_hv_timer, #endif .setup_mce = vmx_setup_mce, }; static int __init vmx_init(void) { int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx), THIS_MODULE); if (r) return r; #ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); #endif return 0; } static void __exit vmx_exit(void) { #ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); #endif kvm_exit(); } module_init(vmx_init) module_exit(vmx_exit)
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2524_0
crossvul-cpp_data_good_219_0
/* * Error resilience / concealment * * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Error resilience / concealment. */ #include <limits.h> #include "libavutil/internal.h" #include "avcodec.h" #include "error_resilience.h" #include "me_cmp.h" #include "mpegutils.h" #include "mpegvideo.h" #include "rectangle.h" #include "thread.h" #include "version.h" /** * @param stride the number of MVs to get to the next row * @param mv_step the number of MVs per row or column in a macroblock */ static void set_mv_strides(ERContext *s, ptrdiff_t *mv_step, ptrdiff_t *stride) { if (s->avctx->codec_id == AV_CODEC_ID_H264) { av_assert0(s->quarter_sample); *mv_step = 4; *stride = s->mb_width * 4; } else { *mv_step = 2; *stride = s->b8_stride; } } /** * Replace the current MB with a flat dc-only version. */ static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y) { int *linesize = s->cur_pic.f->linesize; int dc, dcu, dcv, y, i; for (i = 0; i < 4; i++) { dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride]; if (dc < 0) dc = 0; else if (dc > 2040) dc = 2040; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8; } } dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride]; dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride]; if (dcu < 0) dcu = 0; else if (dcu > 2040) dcu = 2040; if (dcv < 0) dcv = 0; else if (dcv > 2040) dcv = 2040; if (dest_cr) for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) { dest_cb[x + y * linesize[1]] = dcu / 8; dest_cr[x + y * linesize[2]] = dcv / 8; } } } static void filter181(int16_t *data, int width, int height, ptrdiff_t stride) { int x, y; /* horizontal filter */ for (y = 1; y < height - 1; y++) { int prev_dc = data[0 + y * stride]; for (x = 1; x < width - 1; x++) { int dc; dc = -prev_dc + data[x + y * stride] * 8 - data[x + 1 + y * stride]; dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16; prev_dc = data[x + y * stride]; data[x + y * stride] = dc; } } /* vertical filter */ for (x = 1; x < width - 1; x++) { int prev_dc = data[x]; for (y = 1; y < height - 1; y++) { int dc; dc = -prev_dc + data[x + y * stride] * 8 - data[x + (y + 1) * stride]; dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16; prev_dc = data[x + y * stride]; data[x + y * stride] = dc; } } } /** * guess the dc of blocks which do not have an undamaged dc * @param w width in 8 pixel blocks * @param h height in 8 pixel blocks */ static void guess_dc(ERContext *s, int16_t *dc, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4); uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4); if(!col || !dist) { av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n"); goto fail; } for(b_y=0; b_y<h; b_y++){ int color= 1024; int distance= -1; for(b_x=0; b_x<w; b_x++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][1]= color; dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999; } color= 1024; distance= -1; for(b_x=w-1; b_x>=0; b_x--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][0]= color; dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999; } } for(b_x=0; b_x<w; b_x++){ int color= 1024; int distance= -1; for(b_y=0; b_y<h; b_y++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][3]= color; dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999; } color= 1024; distance= -1; for(b_y=h-1; b_y>=0; b_y--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][2]= color; dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999; } } for (b_y = 0; b_y < h; b_y++) { for (b_x = 0; b_x < w; b_x++) { int mb_index, error, j; int64_t guess, weight_sum; mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride; error = s->error_status_table[mb_index]; if (IS_INTER(s->cur_pic.mb_type[mb_index])) continue; // inter if (!(error & ER_DC_ERROR)) continue; // dc-ok weight_sum = 0; guess = 0; for (j = 0; j < 4; j++) { int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1); guess += weight*(int64_t)col[b_x + b_y*stride][j]; weight_sum += weight; } guess = (guess + weight_sum / 2) / weight_sum; dc[b_x + b_y * stride] = guess; } } fail: av_freep(&col); av_freep(&dist); } /** * simple horizontal deblocking filter used for error resilience * @param w width in 8 pixel blocks * @param h height in 8 pixel blocks */ static void h_block_filter(ERContext *s, uint8_t *dst, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; ptrdiff_t mvx_stride, mvy_stride; const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; set_mv_strides(s, &mvx_stride, &mvy_stride); mvx_stride >>= is_luma; mvy_stride *= mvx_stride; for (b_y = 0; b_y < h; b_y++) { for (b_x = 0; b_x < w - 1; b_x++) { int y; int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int left_damage = left_status & ER_MB_ERROR; int right_damage = right_status & ER_MB_ERROR; int offset = b_x * 8 + b_y * stride * 8; int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)]; if (!(left_damage || right_damage)) continue; // both undamaged if ((!left_intra) && (!right_intra) && FFABS(left_mv[0] - right_mv[0]) + FFABS(left_mv[1] + right_mv[1]) < 2) continue; for (y = 0; y < 8; y++) { int a, b, c, d; a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride]; b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride]; c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride]; d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1); d = FFMAX(d, 0); if (b < 0) d = -d; if (d == 0) continue; if (!(left_damage && right_damage)) d = d * 16 / 9; if (left_damage) { dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)]; dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)]; dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)]; dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)]; } if (right_damage) { dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)]; dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)]; dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)]; dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)]; } } } } } /** * simple vertical deblocking filter used for error resilience * @param w width in 8 pixel blocks * @param h height in 8 pixel blocks */ static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; ptrdiff_t mvx_stride, mvy_stride; const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; set_mv_strides(s, &mvx_stride, &mvy_stride); mvx_stride >>= is_luma; mvy_stride *= mvx_stride; for (b_y = 0; b_y < h - 1; b_y++) { for (b_x = 0; b_x < w; b_x++) { int x; int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]; int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]); int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]); int top_damage = top_status & ER_MB_ERROR; int bottom_damage = bottom_status & ER_MB_ERROR; int offset = b_x * 8 + b_y * stride * 8; int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x]; if (!(top_damage || bottom_damage)) continue; // both undamaged if ((!top_intra) && (!bottom_intra) && FFABS(top_mv[0] - bottom_mv[0]) + FFABS(top_mv[1] + bottom_mv[1]) < 2) continue; for (x = 0; x < 8; x++) { int a, b, c, d; a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride]; b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride]; c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride]; d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1); d = FFMAX(d, 0); if (b < 0) d = -d; if (d == 0) continue; if (!(top_damage && bottom_damage)) d = d * 16 / 9; if (top_damage) { dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)]; dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)]; dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)]; dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)]; } if (bottom_damage) { dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)]; dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)]; dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)]; dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)]; } } } } } #define MV_FROZEN 8 #define MV_CHANGED 4 #define MV_UNCHANGED 2 #define MV_LISTED 1 static av_always_inline void add_blocklist(int (*blocklist)[2], int *blocklist_length, uint8_t *fixed, int mb_x, int mb_y, int mb_xy) { if (fixed[mb_xy]) return; fixed[mb_xy] = MV_LISTED; blocklist[ *blocklist_length ][0] = mb_x; blocklist[(*blocklist_length)++][1] = mb_y; } static void guess_mv(ERContext *s) { int (*blocklist)[2], (*next_blocklist)[2]; uint8_t *fixed; const ptrdiff_t mb_stride = s->mb_stride; const int mb_width = s->mb_width; int mb_height = s->mb_height; int i, depth, num_avail; int mb_x, mb_y; ptrdiff_t mot_step, mot_stride; int blocklist_length, next_blocklist_length; if (s->last_pic.f && s->last_pic.f->data[0]) mb_height = FFMIN(mb_height, (s->last_pic.f->height+15)>>4); if (s->next_pic.f && s->next_pic.f->data[0]) mb_height = FFMIN(mb_height, (s->next_pic.f->height+15)>>4); blocklist = (int (*)[2])s->er_temp_buffer; next_blocklist = blocklist + s->mb_stride * s->mb_height; fixed = (uint8_t *)(next_blocklist + s->mb_stride * s->mb_height); set_mv_strides(s, &mot_step, &mot_stride); num_avail = 0; if (s->last_pic.motion_val[0]) ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0); for (i = 0; i < mb_width * mb_height; i++) { const int mb_xy = s->mb_index2xy[i]; int f = 0; int error = s->error_status_table[mb_xy]; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) f = MV_FROZEN; // intra // FIXME check if (!(error & ER_MV_ERROR)) f = MV_FROZEN; // inter with undamaged MV fixed[mb_xy] = f; if (f == MV_FROZEN) num_avail++; else if(s->last_pic.f->data[0] && s->last_pic.motion_val[0]){ const int mb_y= mb_xy / s->mb_stride; const int mb_x= mb_xy % s->mb_stride; const int mot_index= (mb_x + mb_y*mot_stride) * mot_step; s->cur_pic.motion_val[0][mot_index][0]= s->last_pic.motion_val[0][mot_index][0]; s->cur_pic.motion_val[0][mot_index][1]= s->last_pic.motion_val[0][mot_index][1]; s->cur_pic.ref_index[0][4*mb_xy] = s->last_pic.ref_index[0][4*mb_xy]; } } if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width / 2) { for (mb_y = 0; mb_y < mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) continue; if (!(s->error_status_table[mb_xy] & ER_MV_ERROR)) continue; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); } } return; } blocklist_length = 0; for (mb_y = 0; mb_y < mb_height; mb_y++) { for (mb_x = 0; mb_x < mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * mb_stride; if (fixed[mb_xy] == MV_FROZEN) { if (mb_x) add_blocklist(blocklist, &blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1); if (mb_y) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride); if (mb_x+1 < mb_width) add_blocklist(blocklist, &blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1); if (mb_y+1 < mb_height) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride); } } } for (depth = 0; ; depth++) { int changed, pass, none_left; int blocklist_index; none_left = 1; changed = 1; for (pass = 0; (changed || pass < 2) && pass < 10; pass++) { int score_sum = 0; changed = 0; for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) { const int mb_x = blocklist[blocklist_index][0]; const int mb_y = blocklist[blocklist_index][1]; const int mb_xy = mb_x + mb_y * mb_stride; int mv_predictor[8][2]; int ref[8]; int pred_count; int j; int best_score; int best_pred; int mot_index; int prev_x, prev_y, prev_ref; if ((mb_x ^ mb_y ^ pass) & 1) continue; av_assert2(fixed[mb_xy] != MV_FROZEN); av_assert1(!IS_INTRA(s->cur_pic.mb_type[mb_xy])); av_assert1(s->last_pic.f && s->last_pic.f->data[0]); j = 0; if (mb_x > 0) j |= fixed[mb_xy - 1]; if (mb_x + 1 < mb_width) j |= fixed[mb_xy + 1]; if (mb_y > 0) j |= fixed[mb_xy - mb_stride]; if (mb_y + 1 < mb_height) j |= fixed[mb_xy + mb_stride]; av_assert2(j & MV_FROZEN); if (!(j & MV_CHANGED) && pass > 1) continue; none_left = 0; pred_count = 0; mot_index = (mb_x + mb_y * mot_stride) * mot_step; if (mb_x > 0 && fixed[mb_xy - 1] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - 1)]; pred_count++; } if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index + mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + 1)]; pred_count++; } if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)]; pred_count++; } if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)]; pred_count++; } if (pred_count == 0) continue; if (pred_count > 1) { int sum_x = 0, sum_y = 0, sum_r = 0; int max_x, max_y, min_x, min_y, max_r, min_r; for (j = 0; j < pred_count; j++) { sum_x += mv_predictor[j][0]; sum_y += mv_predictor[j][1]; sum_r += ref[j]; if (j && ref[j] != ref[j - 1]) goto skip_mean_and_median; } /* mean */ mv_predictor[pred_count][0] = sum_x / j; mv_predictor[pred_count][1] = sum_y / j; ref[pred_count] = sum_r / j; /* median */ if (pred_count >= 3) { min_y = min_x = min_r = 99999; max_y = max_x = max_r = -99999; } else { min_x = min_y = max_x = max_y = min_r = max_r = 0; } for (j = 0; j < pred_count; j++) { max_x = FFMAX(max_x, mv_predictor[j][0]); max_y = FFMAX(max_y, mv_predictor[j][1]); max_r = FFMAX(max_r, ref[j]); min_x = FFMIN(min_x, mv_predictor[j][0]); min_y = FFMIN(min_y, mv_predictor[j][1]); min_r = FFMIN(min_r, ref[j]); } mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x; mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y; ref[pred_count + 1] = sum_r - max_r - min_r; if (pred_count == 4) { mv_predictor[pred_count + 1][0] /= 2; mv_predictor[pred_count + 1][1] /= 2; ref[pred_count + 1] /= 2; } pred_count += 2; } skip_mean_and_median: /* zero MV */ mv_predictor[pred_count][0] = mv_predictor[pred_count][1] = ref[pred_count] = 0; pred_count++; prev_x = s->cur_pic.motion_val[0][mot_index][0]; prev_y = s->cur_pic.motion_val[0][mot_index][1]; prev_ref = s->cur_pic.ref_index[0][4 * mb_xy]; /* last MV */ mv_predictor[pred_count][0] = prev_x; mv_predictor[pred_count][1] = prev_y; ref[pred_count] = prev_ref; pred_count++; best_pred = 0; best_score = 256 * 256 * 256 * 64; for (j = 0; j < pred_count; j++) { int *linesize = s->cur_pic.f->linesize; int score = 0; uint8_t *src = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; s->cur_pic.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0]; s->cur_pic.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1]; // predictor intra or otherwise not available if (ref[j] < 0) continue; s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (mb_x > 0 && fixed[mb_xy - 1] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] - 1] - src[k * linesize[0]]); } if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] + 15] - src[k * linesize[0] + 16]); } if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k - linesize[0]] - src[k]); } if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k + linesize[0] * 15] - src[k + linesize[0] * 16]); } if (score <= best_score) { // <= will favor the last MV best_score = score; best_pred = j; } } score_sum += best_score; s->mv[0][0][0] = mv_predictor[best_pred][0]; s->mv[0][0][1] = mv_predictor[best_pred][1]; for (i = 0; i < mot_step; i++) for (j = 0; j < mot_step; j++) { s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; } s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) { fixed[mb_xy] = MV_CHANGED; changed++; } else fixed[mb_xy] = MV_UNCHANGED; } } if (none_left) return; next_blocklist_length = 0; for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) { const int mb_x = blocklist[blocklist_index][0]; const int mb_y = blocklist[blocklist_index][1]; const int mb_xy = mb_x + mb_y * mb_stride; if (fixed[mb_xy] & (MV_CHANGED|MV_UNCHANGED|MV_FROZEN)) { fixed[mb_xy] = MV_FROZEN; if (mb_x > 0) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1); if (mb_y > 0) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride); if (mb_x + 1 < mb_width) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1); if (mb_y + 1 < mb_height) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride); } } av_assert0(next_blocklist_length <= mb_height * mb_width); FFSWAP(int , blocklist_length, next_blocklist_length); FFSWAP(void*, blocklist, next_blocklist); } } static int is_intra_more_likely(ERContext *s) { int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y; if (!s->last_pic.f || !s->last_pic.f->data[0]) return 1; // no previous frame available -> use spatial prediction if (s->avctx->error_concealment & FF_EC_FAVOR_INTER) return 0; undamaged_count = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; const int error = s->error_status_table[mb_xy]; if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR))) undamaged_count++; } if (undamaged_count < 5) return 0; // almost all MBs damaged -> use temporal prediction // prevent dsp.sad() check, that requires access to the image if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb && s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) return 1; skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs is_intra_likely = 0; j = 0; for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int error; const int mb_xy = mb_x + mb_y * s->mb_stride; error = s->error_status_table[mb_xy]; if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR)) continue; // skip damaged j++; // skip a few to speed things up if ((j % skip_amount) != 0) continue; if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) { int *linesize = s->cur_pic.f->linesize; uint8_t *mb_ptr = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; uint8_t *last_mb_ptr = s->last_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; if (s->avctx->codec_id == AV_CODEC_ID_H264) { // FIXME } else { ff_thread_await_progress(s->last_pic.tf, mb_y, 0); } is_intra_likely += s->mecc.sad[0](NULL, last_mb_ptr, mb_ptr, linesize[0], 16); // FIXME need await_progress() here is_intra_likely -= s->mecc.sad[0](NULL, last_mb_ptr, last_mb_ptr + linesize[0] * 16, linesize[0], 16); } else { if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) is_intra_likely++; else is_intra_likely--; } } } // av_log(NULL, AV_LOG_ERROR, "is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type); return is_intra_likely > 0; } void ff_er_frame_start(ERContext *s) { if (!s->avctx->error_concealment) return; if (!s->mecc_inited) { ff_me_cmp_init(&s->mecc, s->avctx); s->mecc_inited = 1; } memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END, s->mb_stride * s->mb_height * sizeof(uint8_t)); atomic_init(&s->error_count, 3 * s->mb_num); s->error_occurred = 0; } static int er_supported(ERContext *s) { if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice || !s->cur_pic.f || s->cur_pic.field_picture ) return 0; return 1; } /** * Add a slice. * @param endx x component of the last macroblock, can be -1 * for the last of the previous line * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is * assumed that no earlier end or error of the same type occurred */ void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status) { const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1); const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num); const int start_xy = s->mb_index2xy[start_i]; const int end_xy = s->mb_index2xy[end_i]; int mask = -1; if (s->avctx->hwaccel && s->avctx->hwaccel->decode_slice) return; if (start_i > end_i || start_xy > end_xy) { av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n"); return; } if (!s->avctx->error_concealment) return; mask &= ~VP_START; if (status & (ER_AC_ERROR | ER_AC_END)) { mask &= ~(ER_AC_ERROR | ER_AC_END); atomic_fetch_add(&s->error_count, start_i - end_i - 1); } if (status & (ER_DC_ERROR | ER_DC_END)) { mask &= ~(ER_DC_ERROR | ER_DC_END); atomic_fetch_add(&s->error_count, start_i - end_i - 1); } if (status & (ER_MV_ERROR | ER_MV_END)) { mask &= ~(ER_MV_ERROR | ER_MV_END); atomic_fetch_add(&s->error_count, start_i - end_i - 1); } if (status & ER_MB_ERROR) { s->error_occurred = 1; atomic_store(&s->error_count, INT_MAX); } if (mask == ~0x7F) { memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t)); } else { int i; for (i = start_xy; i < end_xy; i++) s->error_status_table[i] &= mask; } if (end_i == s->mb_num) atomic_store(&s->error_count, INT_MAX); else { s->error_status_table[end_xy] &= mask; s->error_status_table[end_xy] |= status; } s->error_status_table[start_xy] |= VP_START; if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) && er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) { int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]]; prev_status &= ~ VP_START; if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) { s->error_occurred = 1; atomic_store(&s->error_count, INT_MAX); } } } void ff_er_frame_end(ERContext *s) { int *linesize = NULL; int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error; int distance; int threshold_part[4] = { 100, 100, 100 }; int threshold = 50; int is_intra_likely; int size = s->b8_stride * 2 * s->mb_height; /* We do not support ER of field pictures yet, * though it should not crash if enabled. */ if (!s->avctx->error_concealment || !atomic_load(&s->error_count) || s->avctx->lowres || !er_supported(s) || atomic_load(&s->error_count) == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom)) { return; } linesize = s->cur_pic.f->linesize; for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int status = s->error_status_table[mb_x + (s->mb_height - 1) * s->mb_stride]; if (status != 0x7F) break; } if ( mb_x == s->mb_width && s->avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && (FFALIGN(s->avctx->height, 16)&16) && atomic_load(&s->error_count) == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom + 1) ) { av_log(s->avctx, AV_LOG_DEBUG, "ignoring last missing slice\n"); return; } if (s->last_pic.f) { if (s->last_pic.f->width != s->cur_pic.f->width || s->last_pic.f->height != s->cur_pic.f->height || s->last_pic.f->format != s->cur_pic.f->format) { av_log(s->avctx, AV_LOG_WARNING, "Cannot use previous picture in error concealment\n"); memset(&s->last_pic, 0, sizeof(s->last_pic)); } } if (s->next_pic.f) { if (s->next_pic.f->width != s->cur_pic.f->width || s->next_pic.f->height != s->cur_pic.f->height || s->next_pic.f->format != s->cur_pic.f->format) { av_log(s->avctx, AV_LOG_WARNING, "Cannot use next picture in error concealment\n"); memset(&s->next_pic, 0, sizeof(s->next_pic)); } } if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) { av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); for (i = 0; i < 2; i++) { s->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); s->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t)); if (!s->ref_index_buf[i] || !s->motion_val_buf[i]) break; s->cur_pic.ref_index[i] = s->ref_index_buf[i]->data; s->cur_pic.motion_val[i] = (int16_t (*)[2])s->motion_val_buf[i]->data + 4; } if (i < 2) { for (i = 0; i < 2; i++) { av_buffer_unref(&s->ref_index_buf[i]); av_buffer_unref(&s->motion_val_buf[i]); s->cur_pic.ref_index[i] = NULL; s->cur_pic.motion_val[i] = NULL; } return; } } if (s->avctx->debug & FF_DEBUG_ER) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int status = s->error_status_table[mb_x + mb_y * s->mb_stride]; av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); } } #if 1 /* handle overlapping slices */ for (error_type = 1; error_type <= 3; error_type++) { int end_ok = 0; for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & (1 << error_type)) end_ok = 1; if (error & (8 << error_type)) end_ok = 1; if (!end_ok) s->error_status_table[mb_xy] |= 1 << error_type; if (error & VP_START) end_ok = 0; } } #endif #if 1 /* handle slices with partitions of different length */ if (s->partitioned_frame) { int end_ok = 0; for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & ER_AC_END) end_ok = 0; if ((error & ER_MV_END) || (error & ER_DC_END) || (error & ER_AC_ERROR)) end_ok = 1; if (!end_ok) s->error_status_table[mb_xy]|= ER_AC_ERROR; if (error & VP_START) end_ok = 0; } } #endif /* handle missing slices */ if (s->avctx->err_recognition & AV_EF_EXPLODE) { int end_ok = 1; // FIXME + 100 hack for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) { const int mb_xy = s->mb_index2xy[i]; int error1 = s->error_status_table[mb_xy]; int error2 = s->error_status_table[s->mb_index2xy[i + 1]]; if (error1 & VP_START) end_ok = 1; if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) && error1 != (VP_START | ER_MB_ERROR | ER_MB_END) && ((error1 & ER_AC_END) || (error1 & ER_DC_END) || (error1 & ER_MV_END))) { // end & uninit end_ok = 0; } if (!end_ok) s->error_status_table[mb_xy] |= ER_MB_ERROR; } } #if 1 /* backward mark errors */ distance = 9999999; for (error_type = 1; error_type <= 3; error_type++) { for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (!s->mbskip_table || !s->mbskip_table[mb_xy]) // FIXME partition specific distance++; if (error & (1 << error_type)) distance = 0; if (s->partitioned_frame) { if (distance < threshold_part[error_type - 1]) s->error_status_table[mb_xy] |= 1 << error_type; } else { if (distance < threshold) s->error_status_table[mb_xy] |= 1 << error_type; } if (error & VP_START) distance = 9999999; } } #endif /* forward mark errors */ error = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int old_error = s->error_status_table[mb_xy]; if (old_error & VP_START) { error = old_error & ER_MB_ERROR; } else { error |= old_error & ER_MB_ERROR; s->error_status_table[mb_xy] |= error; } } #if 1 /* handle not partitioned case */ if (!s->partitioned_frame) { for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & ER_MB_ERROR) error |= ER_MB_ERROR; s->error_status_table[mb_xy] = error; } } #endif dc_error = ac_error = mv_error = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & ER_DC_ERROR) dc_error++; if (error & ER_AC_ERROR) ac_error++; if (error & ER_MV_ERROR) mv_error++; } av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n", dc_error, ac_error, mv_error, av_get_picture_type_char(s->cur_pic.f->pict_type)); is_intra_likely = is_intra_more_likely(s); /* set unknown mb-type to most likely */ for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR))) continue; if (is_intra_likely) s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4; else s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; } // change inter to intra blocks if no reference frames are available if (!(s->last_pic.f && s->last_pic.f->data[0]) && !(s->next_pic.f && s->next_pic.f->data[0])) for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; if (!IS_INTRA(s->cur_pic.mb_type[mb_xy])) s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4; } /* handle inter blocks with damaged AC */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; const int dir = !(s->last_pic.f && s->last_pic.f->data[0]); const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD; int mv_type; int error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type)) continue; // intra if (error & ER_MV_ERROR) continue; // inter with damaged MV if (!(error & ER_AC_ERROR)) continue; // undamaged inter if (IS_8X8(mb_type)) { int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride; int j; mv_type = MV_TYPE_8X8; for (j = 0; j < 4; j++) { s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; } } else { mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0]; s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1]; } s->decode_mb(s->opaque, 0 /* FIXME H.264 partitioned slices need this set */, mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0); } } /* guess MVs */ if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int xy = mb_x * 2 + mb_y * 2 * s->b8_stride; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; int error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type)) continue; if (!(error & ER_MV_ERROR)) continue; // inter with undamaged MV if (!(error & ER_AC_ERROR)) continue; // undamaged inter if (!(s->last_pic.f && s->last_pic.f->data[0])) mv_dir &= ~MV_DIR_FORWARD; if (!(s->next_pic.f && s->next_pic.f->data[0])) mv_dir &= ~MV_DIR_BACKWARD; if (s->pp_time) { int time_pp = s->pp_time; int time_pb = s->pb_time; av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264); ff_thread_await_progress(s->next_pic.tf, mb_y, 0); s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp; s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp; s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; } else { s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mv[1][0][0] = 0; s->mv[1][0][1] = 0; } s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); } } } else guess_mv(s); /* the filters below manipulate raw image, skip them */ if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) goto ec_clean; /* fill DC for inter blocks */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int dc, dcu, dcv, y, n; int16_t *dc_ptr; uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; // error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type) && s->partitioned_frame) continue; // if (error & ER_MV_ERROR) // continue; // inter data damaged FIXME is this good? dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride]; for (n = 0; n < 4; n++) { dc = 0; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) dc += dest_y[x + (n & 1) * 8 + (y + (n >> 1) * 8) * linesize[0]]; } dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3; } if (!s->cur_pic.f->data[2]) continue; dcu = dcv = 0; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) { dcu += dest_cb[x + y * linesize[1]]; dcv += dest_cr[x + y * linesize[2]]; } } s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3; s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3; } } #if 1 /* guess DC for damaged blocks */ guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1); guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0); guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0); #endif /* filter luma DC */ filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride); #if 1 /* render DC only intra */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; int error = s->error_status_table[mb_xy]; if (IS_INTER(mb_type)) continue; if (!(error & ER_AC_ERROR)) continue; // undamaged dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; if (!s->cur_pic.f->data[2]) dest_cb = dest_cr = NULL; put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); } } #endif if (s->avctx->error_concealment & FF_EC_DEBLOCK) { /* filter horizontal block boundaries */ h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); /* filter vertical block boundaries */ v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); if (s->cur_pic.f->data[2]) { h_block_filter(s, s->cur_pic.f->data[1], s->mb_width, s->mb_height, linesize[1], 0); h_block_filter(s, s->cur_pic.f->data[2], s->mb_width, s->mb_height, linesize[2], 0); v_block_filter(s, s->cur_pic.f->data[1], s->mb_width, s->mb_height, linesize[1], 0); v_block_filter(s, s->cur_pic.f->data[2], s->mb_width, s->mb_height, linesize[2], 0); } } ec_clean: /* clean a few tables */ for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (s->mbskip_table && s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B && (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) { s->mbskip_table[mb_xy] = 0; } if (s->mbintra_table) s->mbintra_table[mb_xy] = 1; } for (i = 0; i < 2; i++) { av_buffer_unref(&s->ref_index_buf[i]); av_buffer_unref(&s->motion_val_buf[i]); s->cur_pic.ref_index[i] = NULL; s->cur_pic.motion_val[i] = NULL; } memset(&s->cur_pic, 0, sizeof(ERPicture)); memset(&s->last_pic, 0, sizeof(ERPicture)); memset(&s->next_pic, 0, sizeof(ERPicture)); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_219_0
crossvul-cpp_data_good_1771_4
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_SONMP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> static struct sonmp_chassis sonmp_chassis_types[] = { {1, "unknown (via SONMP)"}, {2, "Nortel 3000"}, {3, "Nortel 3030"}, {4, "Nortel 2310"}, {5, "Nortel 2810"}, {6, "Nortel 2912"}, {7, "Nortel 2914"}, {8, "Nortel 271x"}, {9, "Nortel 2813"}, {10, "Nortel 2814"}, {11, "Nortel 2915"}, {12, "Nortel 5000"}, {13, "Nortel 2813SA"}, {14, "Nortel 2814SA"}, {15, "Nortel 810M"}, {16, "Nortel EtherCell"}, {17, "Nortel 5005"}, {18, "Alcatel Ethernet workgroup conc."}, {20, "Nortel 2715SA"}, {21, "Nortel 2486"}, {22, "Nortel 28000 series"}, {23, "Nortel 23000 series"}, {24, "Nortel 5DN00x series"}, {25, "BayStack Ethernet"}, {26, "Nortel 23100 series"}, {27, "Nortel 100Base-T Hub"}, {28, "Nortel 3000 Fast Ethernet"}, {29, "Nortel Orion switch"}, {30, "unknown"}, {31, "Nortel DDS "}, {32, "Nortel Centillion"}, {33, "Nortel Centillion"}, {34, "Nortel Centillion"}, {35, "BayStack 301"}, {36, "BayStack TokenRing Hub"}, {37, "Nortel FVC Multimedia Switch"}, {38, "Nortel Switch Node"}, {39, "BayStack 302 Switch"}, {40, "BayStack 350 Switch"}, {41, "BayStack 150 Ethernet Hub"}, {42, "Nortel Centillion 50N switch"}, {43, "Nortel Centillion 50T switch"}, {44, "BayStack 303 and 304 Switches"}, {45, "BayStack 200 Ethernet Hub"}, {46, "BayStack 250 10/100 Ethernet Hub"}, {48, "BayStack 450 10/100/1000 Switches"}, {49, "BayStack 410 10/100 Switches"}, {50, "Nortel Ethernet Routing 1200 L3 Switch"}, {51, "Nortel Ethernet Routing 1250 L3 Switch"}, {52, "Nortel Ethernet Routing 1100 L3 Switch"}, {53, "Nortel Ethernet Routing 1150 L3 Switch"}, {54, "Nortel Ethernet Routing 1050 L3 Switch"}, {55, "Nortel Ethernet Routing 1051 L3 Switch"}, {56, "Nortel Ethernet Routing 8610 L3 Switch"}, {57, "Nortel Ethernet Routing 8606 L3 Switch"}, {58, "Nortel Ethernet Routing Switch 8010"}, {59, "Nortel Ethernet Routing Switch 8006"}, {60, "BayStack 670 wireless access point"}, {61, "Nortel Ethernet Routing Switch 740 "}, {62, "Nortel Ethernet Routing Switch 750 "}, {63, "Nortel Ethernet Routing Switch 790"}, {64, "Nortel Business Policy Switch 2000 10/100 Switches"}, {65, "Nortel Ethernet Routing 8110 L2 Switch"}, {66, "Nortel Ethernet Routing 8106 L2 Switch"}, {67, "BayStack 3580 Gig Switch"}, {68, "BayStack 10 Power Supply Unit"}, {69, "BayStack 420 10/100 Switch"}, {70, "OPTera Metro 1200 Ethernet Service Module"}, {71, "Nortel Ethernet Routing Switch 8010co"}, {72, "Nortel Ethernet Routing 8610co L3 switch"}, {73, "Nortel Ethernet Routing 8110co L2 switch"}, {74, "Nortel Ethernet Routing 8003"}, {75, "Nortel Ethernet Routing 8603 L3 switch"}, {76, "Nortel Ethernet Routing 8103 L2 switch"}, {77, "BayStack 380 10/100/1000 Switch"}, {78, "Nortel Ethernet Switch 470-48T"}, {79, "OPTera Metro 1450 Ethernet Service Module"}, {80, "OPTera Metro 1400 Ethernet Service Module"}, {81, "Alteon Switch Family"}, {82, "Ethernet Switch 460-24T-PWR"}, {83, "OPTera Metro 8010 OPM L2 Switch"}, {84, "OPTera Metro 8010co OPM L2 Switch"}, {85, "OPTera Metro 8006 OPM L2 Switch"}, {86, "OPTera Metro 8003 OPM L2 Switch"}, {87, "Alteon 180e"}, {88, "Alteon AD3"}, {89, "Alteon 184"}, {90, "Alteon AD4"}, {91, "Nortel Ethernet Routing 1424 L3 switch"}, {92, "Nortel Ethernet Routing 1648 L3 switch"}, {93, "Nortel Ethernet Routing 1612 L3 switch"}, {94, "Nortel Ethernet Routing 1624 L3 switch "}, {95, "BayStack 380-24F Fiber 1000 Switch"}, {96, "Nortel Ethernet Routing Switch 5510-24T"}, {97, "Nortel Ethernet Routing Switch 5510-48T"}, {98, "Nortel Ethernet Switch 470-24T"}, {99, "Nortel Networks Wireless LAN Access Point 2220"}, {100, "Ethernet Routing RBS 2402 L3 switch"}, {101, "Alteon Application Switch 2424 "}, {102, "Alteon Application Switch 2224 "}, {103, "Alteon Application Switch 2208 "}, {104, "Alteon Application Switch 2216"}, {105, "Alteon Application Switch 3408"}, {106, "Alteon Application Switch 3416"}, {107, "Nortel Networks Wireless LAN SecuritySwitch 2250"}, {108, "Ethernet Switch 425-48T"}, {109, "Ethernet Switch 425-24T"}, {110, "Nortel Networks Wireless LAN Access Point 2221"}, {111, "Nortel Metro Ethernet Service Unit 24-T SPF switch"}, {112, "Nortel Metro Ethernet Service Unit 24-T LX DC switch"}, {113, "Nortel Ethernet Routing Switch 8300 10-slot chassis"}, {114, "Nortel Ethernet Routing Switch 8300 6-slot chassis"}, {115, "Nortel Ethernet Routing Switch 5520-24T-PWR"}, {116, "Nortel Ethernet Routing Switch 5520-48T-PWR"}, {117, "Nortel Networks VPN Gateway 3050"}, {118, "Alteon SSL 310 10/100"}, {119, "Alteon SSL 310 10/100 Fiber"}, {120, "Alteon SSL 310 10/100 FIPS"}, {121, "Alteon SSL 410 10/100/1000"}, {122, "Alteon SSL 410 10/100/1000 Fiber"}, {123, "Alteon Application Switch 2424-SSL"}, {124, "Nortel Ethernet Switch 325-24T"}, {125, "Nortel Ethernet Switch 325-24G"}, {126, "Nortel Networks Wireless LAN Access Point 2225"}, {127, "Nortel Networks Wireless LAN SecuritySwitch 2270"}, {128, "Nortel 24-port Ethernet Switch 470-24T-PWR"}, {129, "Nortel 48-port Ethernet Switch 470-48T-PWR"}, {130, "Nortel Ethernet Routing Switch 5530-24TFD"}, {131, "Nortel Ethernet Switch 3510-24T"}, {132, "Nortel Metro Ethernet Service Unit 12G AC L3 switch"}, {133, "Nortel Metro Ethernet Service Unit 12G DC L3 switch"}, {134, "Nortel Secure Access Switch"}, {135, "Networks VPN Gateway 3070"}, {136, "OPTera Metro 3500"}, {137, "SMB BES 1010 24T"}, {138, "SMB BES 1010 48T"}, {139, "SMB BES 1020 24T PWR"}, {140, "SMB BES 1020 48T PWR"}, {141, "SMB BES 2010 24T"}, {142, "SMB BES 2010 48T"}, {143, "SMB BES 2020 24T PWR"}, {144, "SMB BES 2020 48T PWR"}, {145, "SMB BES 110 24T"}, {146, "SMB BES 110 48T"}, {147, "SMB BES 120 24T PWR"}, {148, "SMB BES 120 48T PWR"}, {149, "SMB BES 210 24T"}, {150, "SMB BES 210 48T"}, {151, "SMB BES 220 24T PWR"}, {152, "SMB BES 220 48T PWR"}, {153, "OME 6500"}, {0, "unknown (via SONMP)"}, }; int sonmp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_NORTEL; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; u_int8_t *packet, *pos, *pos_pid, *end; int length; struct in_addr address; log_debug("sonmp", "send SONMP PDU to %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* SONMP multicast address as target */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC addresss */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* SONMP frame is of fixed size */ POKE_UINT16(SONMP_SIZE))) goto toobig; /* LLC header */ if (!( /* DSAP and SSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_SAVE(pos_pid) && /* We will modify PID later to create a new frame */ POKE_UINT16(LLC_PID_SONMP_HELLO))) goto toobig; address.s_addr = htonl(INADDR_ANY); TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { if (mgmt->m_family == LLDPD_AF_IPV4) { address.s_addr = mgmt->m_addr.inet.s_addr; } break; } /* SONMP */ if (!( /* Our IP address */ POKE_BYTES(&address, sizeof(struct in_addr)) && /* Segment on three bytes, we don't have slots, so we skip the first two bytes */ POKE_UINT16(0) && POKE_UINT8(hardware->h_ifindex) && POKE_UINT8(1) && /* Chassis: Other */ POKE_UINT8(12) && /* Back: Ethernet, Fast Ethernet and Gigabit */ POKE_UINT8(SONMP_TOPOLOGY_NEW) && /* Should work. We have no state */ POKE_UINT8(1) && /* Links: Dunno what it is */ POKE_SAVE(end))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } POKE_RESTORE(pos_pid); /* Modify LLC PID */ (void)POKE_UINT16(LLC_PID_SONMP_FLATNET); POKE_RESTORE(packet); /* Go to the beginning */ PEEK_DISCARD(ETHER_ADDR_LEN - 1); /* Modify the last byte of the MAC address */ (void)POKE_UINT8(1); if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send second SONMP packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); hardware->h_tx_cnt++; return 0; toobig: free(packet); return -1; } int sonmp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; int length, i; u_int8_t *pos; u_int8_t seg[3], rchassis; struct in_addr address; log_debug("sonmp", "decode SONMP PDU from %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("sonmp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("sonmp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < SONMP_SIZE) { log_warnx("sonmp", "too short SONMP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(mcastaddr, sizeof(mcastaddr)) != 0) /* There is two multicast address. We just handle only one of * them. */ goto malformed; /* We skip to LLC PID */ PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); if (PEEK_UINT16 != LLC_PID_SONMP_HELLO) { log_debug("sonmp", "incorrect LLC protocol ID received for SONMP on %s", hardware->h_ifname); goto malformed; } chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_ADDR; if ((chassis->c_id = calloc(1, sizeof(struct in_addr) + 1)) == NULL) { log_warn("sonmp", "unable to allocate memory for chassis id on %s", hardware->h_ifname); goto malformed; } chassis->c_id_len = sizeof(struct in_addr) + 1; chassis->c_id[0] = 1; PEEK_BYTES(&address, sizeof(struct in_addr)); memcpy(chassis->c_id + 1, &address, sizeof(struct in_addr)); if (asprintf(&chassis->c_name, "%s", inet_ntoa(address)) == -1) { log_warnx("sonmp", "unable to write chassis name for %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(seg, sizeof(seg)); rchassis = PEEK_UINT8; for (i=0; sonmp_chassis_types[i].type != 0; i++) { if (sonmp_chassis_types[i].type == rchassis) break; } if (asprintf(&chassis->c_descr, "%s", sonmp_chassis_types[i].description) == -1) { log_warnx("sonmp", "unable to write chassis description for %s", hardware->h_ifname); goto malformed; } mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { if (errno == ENOMEM) log_warn("sonmp", "unable to allocate memory for management address"); else log_warn("sonmp", "too large management address received on %s", hardware->h_ifname); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); chassis->c_ttl = cfg?(cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold): LLDPD_TTL; port->p_id_subtype = LLDP_PORTID_SUBTYPE_LOCAL; if (asprintf(&port->p_id, "%02x-%02x-%02x", seg[0], seg[1], seg[2]) == -1) { log_warn("sonmp", "unable to allocate memory for port id on %s", hardware->h_ifname); goto malformed; } port->p_id_len = strlen(port->p_id); /* Port description depend on the number of segments */ if ((seg[0] == 0) && (seg[1] == 0)) { if (asprintf(&port->p_descr, "port %d", seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else if (seg[0] == 0) { if (asprintf(&port->p_descr, "port %d/%d", seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else { if (asprintf(&port->p_descr, "port %x:%x:%x", seg[0], seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_SONMP */
./CrossVul/dataset_final_sorted/CWE-617/c/good_1771_4
crossvul-cpp_data_good_1771_1
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* We also supports FDP which is very similar to CDPv1 */ #include "lldpd.h" #include "frame.h" #if defined (ENABLE_CDP) || defined (ENABLE_FDP) #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> static int cdp_send(struct lldpd *global, struct lldpd_hardware *hardware, int version) { const char *platform = "Unknown"; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; struct lldpd_port *port; u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; u_int8_t llcorg[] = LLC_ORG_CISCO; #ifdef ENABLE_FDP char *capstr; #endif u_int16_t checksum; int length, i; u_int32_t cap; u_int8_t *packet; u_int8_t *pos, *pos_len_eh, *pos_llc, *pos_cdp, *pos_checksum, *tlv, *end; log_debug("cdp", "send CDP frame on %s", hardware->h_ifname); port = &(hardware->h_lport); chassis = port->p_chassis; #ifdef ENABLE_FDP if (version == 0) { /* With FDP, change multicast address and LLC PID */ const u_int8_t fdpmcastaddr[] = FDP_MULTICAST_ADDR; const u_int8_t fdpllcorg[] = LLC_ORG_FOUNDRY; memcpy(mcastaddr, fdpmcastaddr, sizeof(mcastaddr)); memcpy(llcorg, fdpllcorg, sizeof(llcorg)); } #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && POKE_UINT8(0xaa) && /* SSAP */ POKE_UINT8(0xaa) && /* DSAP */ POKE_UINT8(0x03) && /* Control field */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_CDP))) goto toobig; /* CDP header */ if (!( POKE_SAVE(pos_cdp) && POKE_UINT8((version == 0)?1:version) && POKE_UINT8(chassis->c_ttl) && POKE_SAVE(pos_checksum) && /* Save checksum position */ POKE_UINT16(0))) goto toobig; /* Chassis ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_CHASSIS) && (chassis->c_name? POKE_BYTES(chassis->c_name, strlen(chassis->c_name)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Adresses */ /* See: * http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#xtocid12 * * It seems that Cisco implies that CDP supports IPv6 using * 802.2 address format with 0xAAAA03 0x000000 0x0800, but * 0x0800 is the Ethernet protocol type for IPv4. Therefore, * we support only IPv4. */ i = 0; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) if (mgmt->m_family == LLDPD_AF_IPV4) i++; if (i > 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_ADDRESSES) && POKE_UINT32(i))) goto toobig; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { switch (mgmt->m_family) { case LLDPD_AF_IPV4: if (!( POKE_UINT8(1) && /* Type: NLPID */ POKE_UINT8(1) && /* Length: 1 */ POKE_UINT8(CDP_ADDRESS_PROTO_IP) && /* IP */ POKE_UINT16(sizeof(struct in_addr)) && /* Address length */ POKE_BYTES(&mgmt->m_addr, sizeof(struct in_addr)))) goto toobig; break; } } if (!(POKE_END_CDP_TLV)) goto toobig; } /* Port ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_PORT) && (hardware->h_lport.p_descr? POKE_BYTES(hardware->h_lport.p_descr, strlen(hardware->h_lport.p_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Capabilities */ if (version != 0) { cap = 0; if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) cap |= CDP_CAP_ROUTER; if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) cap |= CDP_CAP_SWITCH; cap |= CDP_CAP_HOST; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_UINT32(cap) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_FDP } else { /* With FDP, it seems that a string is used in place of an int */ if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) capstr = "Router"; else if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) capstr = "Switch"; else if (chassis->c_cap_enabled & LLDP_CAP_REPEATER) capstr = "Bridge"; else capstr = "Host"; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_BYTES(capstr, strlen(capstr)) && POKE_END_CDP_TLV)) goto toobig; #endif } /* Native VLAN */ #ifdef ENABLE_DOT1 if (version >=2 && hardware->h_lport.p_pvid != 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_NATIVEVLAN) && POKE_UINT16(hardware->h_lport.p_pvid) && POKE_END_CDP_TLV)) goto toobig; } #endif /* Software version */ if (!( POKE_START_CDP_TLV(CDP_TLV_SOFTWARE) && (chassis->c_descr? POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Platform */ if (global && global->g_config.c_platform) platform = global->g_config.c_platform; if (!( POKE_START_CDP_TLV(CDP_TLV_PLATFORM) && POKE_BYTES(platform, strlen(platform)) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_LLDPMED /* Power use */ if ((version >= 2) && port->p_med_cap_enabled && (port->p_med_power.source != LLDP_MED_POW_SOURCE_LOCAL) && (port->p_med_power.val > 0) && (port->p_med_power.val <= 655)) { if (!( POKE_START_CDP_TLV(CDP_TLV_POWER_CONSUMPTION) && POKE_UINT16(port->p_med_power.val * 100) && POKE_END_CDP_TLV)) goto toobig; } #endif (void)POKE_SAVE(end); /* Compute len and checksum */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(end - pos_llc))) goto toobig; checksum = frame_checksum(pos_cdp, end - pos_cdp, (version != 0) ? 1 : 0); POKE_RESTORE(pos_checksum); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("cdp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; free(packet); return 0; toobig: free(packet); return -1; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("cdp", name " CDP/FDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) /* cdp_decode also decodes FDP */ int cdp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; struct in_addr addr; #if 0 u_int16_t cksum; #endif u_int8_t *software = NULL, *platform = NULL; int software_len = 0, platform_len = 0, proto, version, nb, caps; const unsigned char cdpaddr[] = CDP_MULTICAST_ADDR; #ifdef ENABLE_FDP const unsigned char fdpaddr[] = CDP_MULTICAST_ADDR; int fdp = 0; #endif u_int8_t *pos, *tlv, *pos_address, *pos_next_address; int length, len_eth, tlv_type, tlv_len, addresses_len, address_len; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; #endif log_debug("cdp", "decode CDP frame received on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("cdp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("cdp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) { log_warn("cdp", "too short CDP/FDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(cdpaddr, sizeof(cdpaddr)) != 0) { #ifdef ENABLE_FDP PEEK_RESTORE((u_int8_t*)frame); if (PEEK_CMP(fdpaddr, sizeof(fdpaddr)) != 0) fdp = 1; else { #endif log_info("cdp", "frame not targeted at CDP/FDP multicast address received on %s", hardware->h_ifname); goto malformed; #ifdef ENABLE_FDP } #endif } PEEK_DISCARD(ETHER_ADDR_LEN); /* Don't care of source address */ len_eth = PEEK_UINT16; if (len_eth > length) { log_warnx("cdp", "incorrect 802.3 frame size reported on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(6); /* Skip beginning of LLC */ proto = PEEK_UINT16; if (proto != LLC_PID_CDP) { if ((proto != LLC_PID_DRIP) && (proto != LLC_PID_PAGP) && (proto != LLC_PID_PVSTP) && (proto != LLC_PID_UDLD) && (proto != LLC_PID_VTP) && (proto != LLC_PID_DTP) && (proto != LLC_PID_STP)) log_debug("cdp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } #if 0 /* Check checksum */ cksum = frame_checksum(pos, len_eth - 8, #ifdef ENABLE_FDP !fdp /* fdp = 0 -> cisco checksum */ #else 1 /* cisco checksum */ #endif ); if (cksum != 0) { log_info("cdp", "incorrect CDP/FDP checksum for frame received on %s (%d)", hardware->h_ifname, cksum); goto malformed; } #endif /* Check version */ version = PEEK_UINT8; if ((version != 1) && (version != 2)) { log_warnx("cdp", "incorrect CDP/FDP version (%d) for frame received on %s", version, hardware->h_ifname); goto malformed; } chassis->c_ttl = PEEK_UINT8; /* TTL */ PEEK_DISCARD_UINT16; /* Checksum, already checked */ while (length) { if (length < 4) { log_warnx("cdp", "CDP/FDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT16; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (length < tlv_len)) { log_warnx("cdp", "incorrect size in CDP/FDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case CDP_TLV_CHASSIS: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis name"); goto malformed; } PEEK_BYTES(chassis->c_name, tlv_len); chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; if ((chassis->c_id = (char *)malloc(tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis ID"); goto malformed; } memcpy(chassis->c_id, chassis->c_name, tlv_len); chassis->c_id_len = tlv_len; break; case CDP_TLV_ADDRESSES: CHECK_TLV_SIZE(4, "Address"); addresses_len = tlv_len - 4; for (nb = PEEK_UINT32; nb > 0; nb--) { (void)PEEK_SAVE(pos_address); /* We first try to get the real length of the packet */ if (addresses_len < 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; addresses_len--; address_len = PEEK_UINT8; addresses_len--; if (addresses_len < address_len + 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); addresses_len -= address_len; address_len = PEEK_UINT16; addresses_len -= 2; if (addresses_len < address_len) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); (void)PEEK_SAVE(pos_next_address); /* Next, we go back and try to extract IPv4 address */ PEEK_RESTORE(pos_address); if ((PEEK_UINT8 == 1) && (PEEK_UINT8 == 1) && (PEEK_UINT8 == CDP_ADDRESS_PROTO_IP) && (PEEK_UINT16 == sizeof(struct in_addr))) { PEEK_BYTES(&addr, sizeof(struct in_addr)); mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &addr, sizeof(struct in_addr), 0); if (mgmt == NULL) { if (errno == ENOMEM) log_warn("cdp", "unable to allocate memory for management address"); else log_warn("cdp", "too large management address received on %s", hardware->h_ifname); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } /* Go to the end of the address */ PEEK_RESTORE(pos_next_address); } break; case CDP_TLV_PORT: if (tlv_len == 0) { log_warn("cdp", "too short port description received"); goto malformed; } if ((port->p_descr = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for port description"); goto malformed; } PEEK_BYTES(port->p_descr, tlv_len); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; if ((port->p_id = (char *)calloc(1, tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for port ID"); goto malformed; } memcpy(port->p_id, port->p_descr, tlv_len); port->p_id_len = tlv_len; break; case CDP_TLV_CAPABILITIES: #ifdef ENABLE_FDP if (fdp) { /* Capabilities are string with FDP */ if (!strncmp("Router", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_ROUTER; else if (!strncmp("Switch", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_BRIDGE; else if (!strncmp("Bridge", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_REPEATER; else chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; } #endif CHECK_TLV_SIZE(4, "Capabilities"); caps = PEEK_UINT32; if (caps & CDP_CAP_ROUTER) chassis->c_cap_enabled |= LLDP_CAP_ROUTER; if (caps & 0x0e) chassis->c_cap_enabled |= LLDP_CAP_BRIDGE; if (chassis->c_cap_enabled == 0) chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; case CDP_TLV_SOFTWARE: software_len = tlv_len; (void)PEEK_SAVE(software); break; case CDP_TLV_PLATFORM: platform_len = tlv_len; (void)PEEK_SAVE(platform); break; #ifdef ENABLE_DOT1 case CDP_TLV_NATIVEVLAN: CHECK_TLV_SIZE(2, "Native VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("cdp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = port->p_pvid = PEEK_UINT16; if (asprintf(&vlan->v_name, "VLAN #%d", vlan->v_vid) == -1) { log_warn("cdp", "unable to alloc VLAN name for " "TLV received on %s", hardware->h_ifname); free(vlan); goto malformed; } TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); break; #endif default: log_debug("cdp", "unknown CDP/FDP TLV type (%d) received on %s", ntohs(tlv_type), hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if (!software && platform) { if ((chassis->c_descr = (char *)calloc(1, platform_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); } else if (software && !platform) { if ((chassis->c_descr = (char *)calloc(1, software_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, software, software_len); } else if (software && platform) { #define CONCAT_PLATFORM " running on\n" if ((chassis->c_descr = (char *)calloc(1, software_len + platform_len + strlen(CONCAT_PLATFORM) + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); memcpy(chassis->c_descr + platform_len, CONCAT_PLATFORM, strlen(CONCAT_PLATFORM)); memcpy(chassis->c_descr + platform_len + strlen(CONCAT_PLATFORM), software, software_len); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (chassis->c_ttl == 0) || (chassis->c_cap_enabled == 0)) { log_warnx("cdp", "some mandatory CDP/FDP tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #ifdef ENABLE_CDP int cdpv1_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 1); } int cdpv2_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 2); } #endif #ifdef ENABLE_FDP int fdp_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 0); } #endif #ifdef ENABLE_CDP static int cdp_guess(char *pos, int length, int version) { const u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) return 0; if (PEEK_CMP(mcastaddr, ETHER_ADDR_LEN) != 0) return 0; PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; /* Ethernet */ PEEK_DISCARD(8); /* LLC */ return (PEEK_UINT8 == version); } int cdpv1_guess(char *frame, int len) { return cdp_guess(frame, len, 1); } int cdpv2_guess(char *frame, int len) { return cdp_guess(frame, len, 2); } #endif #endif /* defined (ENABLE_CDP) || defined (ENABLE_FDP) */
./CrossVul/dataset_final_sorted/CWE-617/c/good_1771_1
crossvul-cpp_data_good_3398_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/delegate.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" #include "magick/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireMagickMemory(sizeof(*image)); if (image == (Image *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; image->blur=1.0; InitializeExceptionInfo(&image->exception); (void) QueryColorDatabase(BackgroundColor,&image->background_color, &image->exception); (void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception); (void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception); (void) QueryColorDatabase(TransparentColor,&image->transparent_color, &image->exception); GetTimerInfo(&image->timer); image->ping=MagickFalse; image->cache=AcquirePixelCache(0); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=time((time_t *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AllocateSemaphoreInfo(); image->signature=MagickSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); (void) SyncImageSettings(image_info,image); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, matte, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); matte=images->matte; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass) == MagickFalse) { InheritException(exception,&append_image->exception); append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace); append_image->depth=depth; append_image->matte=matte; append_image->page=images->page; (void) SetImageBackgroundColor(append_image); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict append_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); append_indexes=GetCacheViewAuthenticIndexQueue(append_view); for (x=0; x < (ssize_t) next->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (next->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if ((next->colorspace == CMYKColorspace) && (append_image->colorspace == CMYKColorspace)) SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x)); p++; q++; } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); GetImageException(image,exception); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipImage(Image *image) { return(ClipImagePath(image,"#1",MagickTrue)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(&image->exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask); if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageClipMask(image,clip_mask); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image)); if (clone_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickSignature; clone_image->storage_class=image->storage_class; clone_image->channels=image->channels; clone_image->colorspace=image->colorspace; clone_image->matte=image->matte; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; if (image->colormap != (PixelPacket *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelPacket *) NULL) { image=(Image *) RelinquishMagickMemory(image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) CopyMagickMemory(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); InitializeExceptionInfo(&clone_image->exception); InheritException(&clone_image->exception,&image->exception); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); clone_image->clip_mask=NewImageList(); clone_image->mask=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AllocateSemaphoreInfo(); if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } if ((columns == image->columns) && (rows == image->rows)) { if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows) == MagickFalse) { InheritException(exception,&clone_image->exception); clone_image=DestroyImage(clone_image); } return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; (void) CloneString(&clone_info->size,image_info->size); (void) CloneString(&clone_info->extract,image_info->extract); (void) CloneString(&clone_info->scenes,image_info->scenes); (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; (void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor); (void) CloneString(&clone_info->server_name,image_info->server_name); (void) CloneString(&clone_info->font,image_info->font); (void) CloneString(&clone_info->texture,image_info->texture); (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->pen=image_info->pen; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colors=image_info->colors; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; (void) CloneString(&clone_info->view,image_info->view); (void) CloneString(&clone_info->authenticate,image_info->authenticate); (void) CloneImageOptions(clone_info,image_info); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->virtual_pixel_method=image_info->virtual_pixel_method; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->subimage=image_info->scene; /* deprecated */ clone_info->subrange=image_info->number_scenes; /* deprecated */ clone_info->channel=image_info->channel; clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return the highest severity exception. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) geometry->width; x++) { *q=(*p); if (image->colorspace == CMYKColorspace) indexes[x]=source_indexes[x]; p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CopyImagePixels) #endif proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); source_view=DestroyCacheView(source_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelPacket *) NULL) image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); (void) ClearExceptionInfo(&image->exception,MagickTrue); if (image->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&image->semaphore); image->signature=(~MagickSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->authenticate != (char *) NULL) image_info->authenticate=DestroyString( image_info->authenticate); DestroyImageOptions(image_info); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); image_info->signature=(~MagickSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClipMask() returns the clip path associated with the image. % % The format of the GetImageClipMask method is: % % Image *GetImageClipMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageClipMask(const Image *image, ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->clip_mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->clip_mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageException() traverses an image sequence and returns any % error more severe than noted by the exception parameter. % % The format of the GetImageException method is: % % void GetImageException(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to a list of one or more images. % % o exception: return the highest severity exception. % */ MagickExport void GetImageException(Image *image,ExceptionInfo *exception) { register Image *next; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->exception.severity == UndefinedException) continue; if (next->exception.severity > exception->severity) InheritException(exception,&next->exception); next->exception.severity=UndefinedException; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) ResetMagickMemory(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorDatabase(BackgroundColor,&image_info->background_color, exception); (void) QueryColorDatabase(BorderColor,&image_info->border_color,exception); (void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception); (void) QueryColorDatabase(TransparentColor,&image_info->transparent_color, exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannels() returns the number of pixel channels associated with the % specified image. % % The format of the GetChannels method is: % % size_t GetImageChannels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport size_t GetImageChannels(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(image->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename) { char *q; int c; MagickBooleanType canonical; register const char *p; size_t length; canonical=MagickFalse; length=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } if (*q == '0') { ssize_t value; value=(ssize_t) strtol(q,&q,10); (void) value; } switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent- (p-format)),p,value); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; #if 0 /* FUTURE: remove this code. -- Anthony 29 Arpil 2012 Removed as GetMagickProperty() will will never match a "filename:" string as this is not a 'known' image property. */ if ((image_info != (const ImageInfo *) NULL) && (image != (const Image *) NULL)) value=GetMagickProperty(image_info,image,pattern); else #endif if (image != (Image *) NULL) value=GetImageProperty(image,pattern); if ((value == (const char *) NULL) && (image != (Image *) NULL)) value=GetImageArtifact(image,pattern); if ((value == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-length),value,(size_t) (MaxTextExtent-(p-format-length))); length+=strlen(pattern)-1; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((pixel.red < 0.0) || (pixel.red > QuantumRange) || (pixel.red != (QuantumAny) pixel.red)) break; if ((pixel.green < 0.0) || (pixel.green > QuantumRange) || (pixel.green != (QuantumAny) pixel.green)) break; if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) || (pixel.blue != (QuantumAny) pixel.blue)) break; if (pixel.matte != MagickFalse) { if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) || (pixel.opacity != (QuantumAny) pixel.opacity)) break; } if (pixel.colorspace == CMYKColorspace) { if ((pixel.index < 0.0) || (pixel.index > QuantumRange) || (pixel.index != (QuantumAny) pixel.index)) break; } p++; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const MagickPixelPacket *background) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const MagickPixelPacket *background) { CacheView *image_view; ExceptionInfo *exception; Image *image; ssize_t y; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickSignature); assert(background != (const MagickPixelPacket *) NULL); image=AcquireImage(image_info); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->matte=background->matte; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,background,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; pixel.opacity=OpaqueOpacity; SetPixelPacket(image,&background,&pixel,&index); /* Set image background color. */ status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) *q++=pixel; if (image->colorspace == CMYKColorspace) { register IndexPacket *magick_restrict indexes; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannels() sets the number of pixels channels associated with the % image. % % The format of the SetImageChannels method is: % % MagickBooleanType SetImageChannels(Image *image,const size_t channels) % % A description of each parameter follows: % % o image: the image. % % o channels: The number of pixel channels. % */ MagickExport MagickBooleanType SetImageChannels(Image *image, const size_t channels) { image->channels=channels; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image, % const MagickPixelPacket *color) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const MagickPixelPacket *color) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); assert(color != (const MagickPixelPacket *) NULL); image->colorspace=color->colorspace; image->matte=color->matte; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,color,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageClipMask() associates a clip path with the image. The clip path % must be the same dimensions as the image. Set any pixel component of % the clip path to TransparentOpacity to prevent that corresponding image % pixel component from being updated when SyncAuthenticPixels() is applied. % % The format of the SetImageClipMask method is: % % MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask) % % A description of each parameter follows: % % o image: the image. % % o clip_mask: the image clip path. % */ MagickExport MagickBooleanType SetImageClipMask(Image *image, const Image *clip_mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (clip_mask != (const Image *) NULL) if ((clip_mask->columns != image->columns) || (clip_mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); image->clip_mask=NewImageList(); if (clip_mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception); if (image->clip_mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth > (8*sizeof(MagickSizeType))) ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename); return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the `magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, `ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: `image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; image_info->subimage=image_info->scene; image_info->subrange=image_info->number_scenes; } } *extension='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"svgz") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if (*extension != '\0') { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); magick_info=GetMagickInfo(magic,sans_exception); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy standard input or pipe to temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) ResetMagickMemory(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (mask != (const Image *) NULL) if ((mask->columns != image->columns) || (mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=NewImageList(); if (mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception); if (image->mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e O p a c i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageOpacity() sets the opacity levels of the image. % % The format of the SetImageOpacity method is: % % MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageOpacity(Image *image, const Quantum opacity) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); image->matte=MagickTrue; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelOpacity(q,opacity); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const PixelPacket *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const PixelPacket *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" CacheView *smush_view; const Image *image; Image *smush_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse) { InheritException(exception,&smush_image->exception); smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->matte=matte; (void) SetImageBackgroundColor(smush_image); status=MagickTrue; x_offset=0; y_offset=0; smush_view=AcquireVirtualCacheView(smush_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; smush_view=DestroyCacheView(smush_view); if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType StripImage(Image *image) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,cHRM,EXIF,gAMA,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline IndexPacket PushColormapIndex(Image *image, const size_t index,MagickBooleanType *range_exception) { if (index < image->colors) return((IndexPacket) index); *range_exception=MagickTrue; return((IndexPacket) 0); } MagickExport MagickBooleanType SyncImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelPacket *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(range_exception,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x), &range_exception); if (image->matte == MagickFalse) SetPixelRgb(q,image->colormap+(ssize_t) index) else SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs image_info options into per-image attributes. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image) { char property[MaxTextExtent]; const char *option, *value; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->background_color, &image->exception); option=GetImageOption(image_info,"bias"); if (option != (const char *) NULL) image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->border_color,&image->exception); option=GetImageOption(image_info,"colors"); if (option != (const char *) NULL) image->colors=StringToUnsignedLong(option); option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; /* Set image density. */ flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(InterpolatePixelMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->matte_color,&image->exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->transparent_color, &image->exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); else units = image_info->units; if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->x_resolution=(double) ((size_t) (100.0*2.54* image->x_resolution+0.5))/100.0; image->y_resolution=(double) ((size_t) (100.0*2.54* image->y_resolution+0.5))/100.0; } break; } default: break; } image->units=units; } option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_3398_0
crossvul-cpp_data_bad_1770_3
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #include <unistd.h> #include <errno.h> #include <assert.h> #include <time.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/ioctl.h> inline static int lldpd_af_to_lldp_proto(int af) { switch (af) { case LLDPD_AF_IPV4: return LLDP_MGMT_ADDR_IP4; case LLDPD_AF_IPV6: return LLDP_MGMT_ADDR_IP6; default: return LLDP_MGMT_ADDR_NONE; } } inline static int lldpd_af_from_lldp_proto(int proto) { switch (proto) { case LLDP_MGMT_ADDR_IP4: return LLDPD_AF_IPV4; case LLDP_MGMT_ADDR_IP6: return LLDPD_AF_IPV6; default: return LLDPD_AF_UNSPEC; } } static int _lldp_send(struct lldpd *global, struct lldpd_hardware *hardware, u_int8_t c_id_subtype, char *c_id, int c_id_len, u_int8_t p_id_subtype, char *p_id, int p_id_len, int shutdown) { struct lldpd_port *port; struct lldpd_chassis *chassis; struct lldpd_frame *frame; int length; u_int8_t *packet, *pos, *tlv; struct lldpd_mgmt *mgmt; int proto; u_int8_t mcastaddr[] = LLDP_MULTICAST_ADDR; #ifdef ENABLE_DOT1 const u_int8_t dot1[] = LLDP_TLV_ORG_DOT1; struct lldpd_vlan *vlan; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi; #endif #ifdef ENABLE_DOT3 const u_int8_t dot3[] = LLDP_TLV_ORG_DOT3; #endif #ifdef ENABLE_LLDPMED int i; const u_int8_t med[] = LLDP_TLV_ORG_MED; #endif #ifdef ENABLE_CUSTOM struct lldpd_custom *custom; #endif port = &hardware->h_lport; chassis = port->p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* LLDP multicast address */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC address */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* LLDP frame */ POKE_UINT16(ETHERTYPE_LLDP))) goto toobig; /* Chassis ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_CHASSIS_ID) && POKE_UINT8(c_id_subtype) && POKE_BYTES(c_id, c_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Port ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_ID) && POKE_UINT8(p_id_subtype) && POKE_BYTES(p_id, p_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Time to live */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_TTL) && POKE_UINT16(shutdown?0:chassis->c_ttl) && POKE_END_LLDP_TLV)) goto toobig; if (shutdown) goto end; /* System name */ if (chassis->c_name && *chassis->c_name != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_NAME) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* System description (skip it if empty) */ if (chassis->c_descr && *chassis->c_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_DESCR) && POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)) && POKE_END_LLDP_TLV)) goto toobig; } /* System capabilities */ if (global->g_config.c_cap_advertise && chassis->c_cap_available) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_CAP) && POKE_UINT16(chassis->c_cap_available) && POKE_UINT16(chassis->c_cap_enabled) && POKE_END_LLDP_TLV)) goto toobig; } /* Management addresses */ TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { proto = lldpd_af_to_lldp_proto(mgmt->m_family); assert(proto != LLDP_MGMT_ADDR_NONE); if (!( POKE_START_LLDP_TLV(LLDP_TLV_MGMT_ADDR) && /* Size of the address, including its type */ POKE_UINT8(mgmt->m_addrsize + 1) && POKE_UINT8(proto) && POKE_BYTES(&mgmt->m_addr, mgmt->m_addrsize))) goto toobig; /* Interface port type, OID */ if (mgmt->m_iface == 0) { if (!( /* We don't know the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_UNKNOWN) && POKE_UINT32(0))) goto toobig; } else { if (!( /* We have the index of the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_IFINDEX) && POKE_UINT32(mgmt->m_iface))) goto toobig; } if (!( /* We don't provide an OID for management */ POKE_UINT8(0) && POKE_END_LLDP_TLV)) goto toobig; } /* Port description */ if (port->p_descr && *port->p_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_DESCR) && POKE_BYTES(port->p_descr, strlen(port->p_descr)) && POKE_END_LLDP_TLV)) goto toobig; } #ifdef ENABLE_DOT1 /* Port VLAN ID */ if(port->p_pvid != 0) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PVID) && POKE_UINT16(port->p_pvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* Port and Protocol VLAN IDs */ TAILQ_FOREACH(ppvid, &port->p_ppvids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PPVID) && POKE_UINT8(ppvid->p_cap_status) && POKE_UINT16(ppvid->p_ppvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* VLANs */ TAILQ_FOREACH(vlan, &port->p_vlans, v_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_VLANNAME) && POKE_UINT16(vlan->v_vid) && POKE_UINT8(strlen(vlan->v_name)) && POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* Protocol Identities */ TAILQ_FOREACH(pi, &port->p_pids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PI) && POKE_UINT8(pi->p_pi_len) && POKE_BYTES(pi->p_pi, pi->p_pi_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_DOT3 /* Aggregation status */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_LA) && /* Bit 0 = capability ; Bit 1 = status */ POKE_UINT8((port->p_aggregid) ? 3:1) && POKE_UINT32(port->p_aggregid) && POKE_END_LLDP_TLV)) goto toobig; /* MAC/PHY */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MAC) && POKE_UINT8(port->p_macphy.autoneg_support | (port->p_macphy.autoneg_enabled << 1)) && POKE_UINT16(port->p_macphy.autoneg_advertised) && POKE_UINT16(port->p_macphy.mau_type) && POKE_END_LLDP_TLV)) goto toobig; /* MFS */ if (port->p_mfs) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MFS) && POKE_UINT16(port->p_mfs) && POKE_END_LLDP_TLV)) goto toobig; } /* Power */ if (port->p_power.devicetype) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_POWER) && POKE_UINT8(( (((2 - port->p_power.devicetype) %(1<< 1))<<0) | (( port->p_power.supported %(1<< 1))<<1) | (( port->p_power.enabled %(1<< 1))<<2) | (( port->p_power.paircontrol %(1<< 1))<<3))) && POKE_UINT8(port->p_power.pairs) && POKE_UINT8(port->p_power.class))) goto toobig; /* 802.3at */ if (port->p_power.powertype != LLDP_DOT3_POWER_8023AT_OFF) { if (!( POKE_UINT8(( (((port->p_power.powertype == LLDP_DOT3_POWER_8023AT_TYPE1)?1:0) << 7) | (((port->p_power.devicetype == LLDP_DOT3_POWER_PSE)?0:1) << 6) | ((port->p_power.source %(1<< 2))<<4) | ((port->p_power.priority %(1<< 2))<<0))) && POKE_UINT16(port->p_power.requested) && POKE_UINT16(port->p_power.allocated))) goto toobig; } if (!(POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_LLDPMED if (port->p_med_cap_enabled) { /* LLDP-MED cap */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_CAP) && POKE_UINT16(chassis->c_med_cap_available) && POKE_UINT8(chassis->c_med_type) && POKE_END_LLDP_TLV)) goto toobig; /* LLDP-MED inventory */ #define LLDP_INVENTORY(value, subtype) \ if (value) { \ if (!( \ POKE_START_LLDP_TLV(LLDP_TLV_ORG) && \ POKE_BYTES(med, sizeof(med)) && \ POKE_UINT8(subtype) && \ POKE_BYTES(value, \ (strlen(value)>32)?32:strlen(value)) && \ POKE_END_LLDP_TLV)) \ goto toobig; \ } if (port->p_med_cap_enabled & LLDP_MED_CAP_IV) { LLDP_INVENTORY(chassis->c_med_hw, LLDP_TLV_MED_IV_HW); LLDP_INVENTORY(chassis->c_med_fw, LLDP_TLV_MED_IV_FW); LLDP_INVENTORY(chassis->c_med_sw, LLDP_TLV_MED_IV_SW); LLDP_INVENTORY(chassis->c_med_sn, LLDP_TLV_MED_IV_SN); LLDP_INVENTORY(chassis->c_med_manuf, LLDP_TLV_MED_IV_MANUF); LLDP_INVENTORY(chassis->c_med_model, LLDP_TLV_MED_IV_MODEL); LLDP_INVENTORY(chassis->c_med_asset, LLDP_TLV_MED_IV_ASSET); } /* LLDP-MED location */ for (i = 0; i < LLDP_MED_LOCFORMAT_LAST; i++) { if (port->p_med_location[i].format == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_LOCATION) && POKE_UINT8(port->p_med_location[i].format) && POKE_BYTES(port->p_med_location[i].data, port->p_med_location[i].data_len) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED network policy */ for (i = 0; i < LLDP_MED_APPTYPE_LAST; i++) { if (port->p_med_policy[i].type == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_POLICY) && POKE_UINT32(( ((port->p_med_policy[i].type %(1<< 8))<<24) | ((port->p_med_policy[i].unknown %(1<< 1))<<23) | ((port->p_med_policy[i].tagged %(1<< 1))<<22) | /*((0 %(1<< 1))<<21) |*/ ((port->p_med_policy[i].vid %(1<<12))<< 9) | ((port->p_med_policy[i].priority %(1<< 3))<< 6) | ((port->p_med_policy[i].dscp %(1<< 6))<< 0) )) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED POE-MDI */ if ((port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PSE) || (port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PD)) { int devicetype = 0, source = 0; if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_MDI))) goto toobig; switch (port->p_med_power.devicetype) { case LLDP_MED_POW_TYPE_PSE: devicetype = 0; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PRIMARY: source = 1; break; case LLDP_MED_POW_SOURCE_BACKUP: source = 2; break; case LLDP_MED_POW_SOURCE_RESERVED: source = 3; break; default: source = 0; break; } break; case LLDP_MED_POW_TYPE_PD: devicetype = 1; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PSE: source = 1; break; case LLDP_MED_POW_SOURCE_LOCAL: source = 2; break; case LLDP_MED_POW_SOURCE_BOTH: source = 3; break; default: source = 0; break; } break; } if (!( POKE_UINT8(( ((devicetype %(1<< 2))<<6) | ((source %(1<< 2))<<4) | ((port->p_med_power.priority %(1<< 4))<<0) )) && POKE_UINT16(port->p_med_power.val) && POKE_END_LLDP_TLV)) goto toobig; } } #endif #ifdef ENABLE_CUSTOM TAILQ_FOREACH(custom, &port->p_custom_list, next) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(custom->oui, sizeof(custom->oui)) && POKE_UINT8(custom->subtype) && POKE_BYTES(custom->oui_info, custom->oui_info_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif end: /* END */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_END) && POKE_END_LLDP_TLV)) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, pos - packet) == -1) { log_warn("lldp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; /* We assume that LLDP frame is the reference */ if (!shutdown && (frame = (struct lldpd_frame*)malloc( sizeof(int) + pos - packet)) != NULL) { frame->size = pos - packet; memcpy(&frame->frame, packet, frame->size); if ((hardware->h_lport.p_lastframe == NULL) || (hardware->h_lport.p_lastframe->size != frame->size) || (memcmp(hardware->h_lport.p_lastframe->frame, frame->frame, frame->size) != 0)) { free(hardware->h_lport.p_lastframe); hardware->h_lport.p_lastframe = frame; hardware->h_lport.p_lastchange = time(NULL); } else free(frame); } free(packet); return 0; toobig: free(packet); return E2BIG; } /* Send a shutdown LLDPDU. */ int lldp_send_shutdown(struct lldpd *global, struct lldpd_hardware *hardware) { if (hardware->h_lchassis_previous_id == NULL || hardware->h_lport_previous_id == NULL) return 0; return _lldp_send(global, hardware, hardware->h_lchassis_previous_id_subtype, hardware->h_lchassis_previous_id, hardware->h_lchassis_previous_id_len, hardware->h_lport_previous_id_subtype, hardware->h_lport_previous_id, hardware->h_lport_previous_id_len, 1); } int lldp_send(struct lldpd *global, struct lldpd_hardware *hardware) { struct lldpd_port *port = &hardware->h_lport; struct lldpd_chassis *chassis = port->p_chassis; int ret; /* Check if we have a change. */ if (hardware->h_lchassis_previous_id != NULL && hardware->h_lport_previous_id != NULL && (hardware->h_lchassis_previous_id_subtype != chassis->c_id_subtype || hardware->h_lchassis_previous_id_len != chassis->c_id_len || hardware->h_lport_previous_id_subtype != port->p_id_subtype || hardware->h_lport_previous_id_len != port->p_id_len || memcmp(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len) || memcmp(hardware->h_lport_previous_id, port->p_id, port->p_id_len))) { log_info("lldp", "MSAP has changed for port %s, sending a shutdown LLDPDU", hardware->h_ifname); if ((ret = lldp_send_shutdown(global, hardware)) != 0) return ret; } log_debug("lldp", "send LLDP PDU to %s", hardware->h_ifname); if ((ret = _lldp_send(global, hardware, chassis->c_id_subtype, chassis->c_id, chassis->c_id_len, port->p_id_subtype, port->p_id, port->p_id_len, 0)) != 0) return ret; /* Record current chassis and port ID */ free(hardware->h_lchassis_previous_id); hardware->h_lchassis_previous_id_subtype = chassis->c_id_subtype; hardware->h_lchassis_previous_id_len = chassis->c_id_len; if ((hardware->h_lchassis_previous_id = malloc(chassis->c_id_len)) != NULL) memcpy(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len); free(hardware->h_lport_previous_id); hardware->h_lport_previous_id_subtype = port->p_id_subtype; hardware->h_lport_previous_id_len = port->p_id_len; if ((hardware->h_lport_previous_id = malloc(port->p_id_len)) != NULL) memcpy(hardware->h_lport_previous_id, port->p_id, port->p_id_len); return 0; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_size < (x)) { \ log_warnx("lldp", name " TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int lldp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; const char lldpaddr[] = LLDP_MULTICAST_ADDR; const char dot1[] = LLDP_TLV_ORG_DOT1; const char dot3[] = LLDP_TLV_ORG_DOT3; const char med[] = LLDP_TLV_ORG_MED; const char dcbx[] = LLDP_TLV_ORG_DCBX; unsigned char orgid[3]; int length, gotend = 0, ttl_received = 0; int tlv_size, tlv_type, tlv_subtype; u_int8_t *pos, *tlv; char *b; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan = NULL; int vlan_len; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi = NULL; #endif struct lldpd_mgmt *mgmt; int af; u_int8_t addr_str_length, addr_str_buffer[32]; u_int8_t addr_family, addr_length, *addr_ptr, iface_subtype; u_int32_t iface_number, iface; #ifdef ENABLE_CUSTOM struct lldpd_custom *custom = NULL; #endif log_debug("lldp", "receive LLDP PDU on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("lldp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("lldp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); TAILQ_INIT(&port->p_ppvids); TAILQ_INIT(&port->p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&port->p_custom_list); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t)) { log_warnx("lldp", "too short frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(lldpaddr, ETHER_ADDR_LEN) != 0) { log_info("lldp", "frame not targeted at LLDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); /* Skip source address */ if (PEEK_UINT16 != ETHERTYPE_LLDP) { log_info("lldp", "non LLDP frame received on %s", hardware->h_ifname); goto malformed; } while (length && (!gotend)) { if (length < 2) { log_warnx("lldp", "tlv header too short received on %s", hardware->h_ifname); goto malformed; } tlv_size = PEEK_UINT16; tlv_type = tlv_size >> 9; tlv_size = tlv_size & 0x1ff; (void)PEEK_SAVE(tlv); if (length < tlv_size) { log_warnx("lldp", "frame too short for tlv received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case LLDP_TLV_END: if (tlv_size != 0) { log_warnx("lldp", "lldp end received with size not null on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("lldp", "extra data after lldp end on %s", hardware->h_ifname); gotend = 1; break; case LLDP_TLV_CHASSIS_ID: case LLDP_TLV_PORT_ID: CHECK_TLV_SIZE(2, "Port Id"); tlv_subtype = PEEK_UINT8; if ((tlv_subtype == 0) || (tlv_subtype > 7)) { log_warnx("lldp", "unknown subtype for tlv id received on %s", hardware->h_ifname); goto malformed; } if ((b = (char *)calloc(1, tlv_size - 1)) == NULL) { log_warn("lldp", "unable to allocate memory for id tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 1); if (tlv_type == LLDP_TLV_PORT_ID) { port->p_id_subtype = tlv_subtype; port->p_id = b; port->p_id_len = tlv_size - 1; } else { chassis->c_id_subtype = tlv_subtype; chassis->c_id = b; chassis->c_id_len = tlv_size - 1; } break; case LLDP_TLV_TTL: CHECK_TLV_SIZE(2, "TTL"); chassis->c_ttl = PEEK_UINT16; ttl_received = 1; break; case LLDP_TLV_PORT_DESCR: case LLDP_TLV_SYSTEM_NAME: case LLDP_TLV_SYSTEM_DESCR: if (tlv_size < 1) { log_debug("lldp", "empty tlv received on %s", hardware->h_ifname); break; } if ((b = (char *)calloc(1, tlv_size + 1)) == NULL) { log_warn("lldp", "unable to allocate memory for string tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size); if (tlv_type == LLDP_TLV_PORT_DESCR) port->p_descr = b; else if (tlv_type == LLDP_TLV_SYSTEM_NAME) chassis->c_name = b; else chassis->c_descr = b; break; case LLDP_TLV_SYSTEM_CAP: CHECK_TLV_SIZE(4, "System capabilities"); chassis->c_cap_available = PEEK_UINT16; chassis->c_cap_enabled = PEEK_UINT16; break; case LLDP_TLV_MGMT_ADDR: CHECK_TLV_SIZE(1, "Management address"); addr_str_length = PEEK_UINT8; if (addr_str_length > sizeof(addr_str_buffer)) { log_warnx("lldp", "too large management address on %s", hardware->h_ifname); goto malformed; } CHECK_TLV_SIZE(1 + addr_str_length, "Management address"); PEEK_BYTES(addr_str_buffer, addr_str_length); addr_length = addr_str_length - 1; addr_family = addr_str_buffer[0]; addr_ptr = &addr_str_buffer[1]; CHECK_TLV_SIZE(1 + addr_str_length + 5, "Management address"); iface_subtype = PEEK_UINT8; iface_number = PEEK_UINT32; af = lldpd_af_from_lldp_proto(addr_family); if (af == LLDPD_AF_UNSPEC) break; if (iface_subtype == LLDP_MGMT_IFACE_IFINDEX) iface = iface_number; else iface = 0; mgmt = lldpd_alloc_mgmt(af, addr_ptr, addr_length, iface); if (mgmt == NULL) { assert(errno == ENOMEM); log_warn("lldp", "unable to allocate memory " "for management address"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); break; case LLDP_TLV_ORG: CHECK_TLV_SIZE(1 + (int)sizeof(orgid), "Organisational"); PEEK_BYTES(orgid, sizeof(orgid)); tlv_subtype = PEEK_UINT8; if (memcmp(dot1, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT1 hardware->h_rx_unrecognized_cnt++; #else /* Dot1 */ switch (tlv_subtype) { case LLDP_TLV_DOT1_VLANNAME: CHECK_TLV_SIZE(7, "VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("lldp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = PEEK_UINT16; vlan_len = PEEK_UINT8; CHECK_TLV_SIZE(7 + vlan_len, "VLAN"); if ((vlan->v_name = (char *)calloc(1, vlan_len + 1)) == NULL) { log_warn("lldp", "unable to alloc vlan name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(vlan->v_name, vlan_len); TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); vlan = NULL; break; case LLDP_TLV_DOT1_PVID: CHECK_TLV_SIZE(6, "PVID"); port->p_pvid = PEEK_UINT16; break; case LLDP_TLV_DOT1_PPVID: CHECK_TLV_SIZE(7, "PPVID"); /* validation needed */ /* PPVID has to be unique if more than one PPVID TLVs are received - discard if duplicate */ /* if support bit is not set and enabled bit is set - PPVID TLV is considered error and discarded */ /* if PPVID > 4096 - bad and discard */ if ((ppvid = (struct lldpd_ppvid *)calloc(1, sizeof(struct lldpd_ppvid))) == NULL) { log_warn("lldp", "unable to alloc ppvid " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } ppvid->p_cap_status = PEEK_UINT8; ppvid->p_ppvid = PEEK_UINT16; TAILQ_INSERT_TAIL(&port->p_ppvids, ppvid, p_entries); break; case LLDP_TLV_DOT1_PI: /* validation needed */ /* PI has to be unique if more than one PI TLVs are received - discard if duplicate ?? */ CHECK_TLV_SIZE(5, "PI"); if ((pi = (struct lldpd_pi *)calloc(1, sizeof(struct lldpd_pi))) == NULL) { log_warn("lldp", "unable to alloc PI " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } pi->p_pi_len = PEEK_UINT8; CHECK_TLV_SIZE(5 + pi->p_pi_len, "PI"); if ((pi->p_pi = (char *)calloc(1, pi->p_pi_len)) == NULL) { log_warn("lldp", "unable to alloc pid name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(pi->p_pi, pi->p_pi_len); TAILQ_INSERT_TAIL(&port->p_pids, pi, p_entries); pi = NULL; break; default: /* Unknown Dot1 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(dot3, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT3 hardware->h_rx_unrecognized_cnt++; #else /* Dot3 */ switch (tlv_subtype) { case LLDP_TLV_DOT3_MAC: CHECK_TLV_SIZE(9, "MAC/PHY"); port->p_macphy.autoneg_support = PEEK_UINT8; port->p_macphy.autoneg_enabled = (port->p_macphy.autoneg_support & 0x2) >> 1; port->p_macphy.autoneg_support = port->p_macphy.autoneg_support & 0x1; port->p_macphy.autoneg_advertised = PEEK_UINT16; port->p_macphy.mau_type = PEEK_UINT16; break; case LLDP_TLV_DOT3_LA: CHECK_TLV_SIZE(9, "Link aggregation"); PEEK_DISCARD_UINT8; port->p_aggregid = PEEK_UINT32; break; case LLDP_TLV_DOT3_MFS: CHECK_TLV_SIZE(6, "MFS"); port->p_mfs = PEEK_UINT16; break; case LLDP_TLV_DOT3_POWER: CHECK_TLV_SIZE(7, "Power"); port->p_power.devicetype = PEEK_UINT8; port->p_power.supported = (port->p_power.devicetype & 0x2) >> 1; port->p_power.enabled = (port->p_power.devicetype & 0x4) >> 2; port->p_power.paircontrol = (port->p_power.devicetype & 0x8) >> 3; port->p_power.devicetype = (port->p_power.devicetype & 0x1)? LLDP_DOT3_POWER_PSE:LLDP_DOT3_POWER_PD; port->p_power.pairs = PEEK_UINT8; port->p_power.class = PEEK_UINT8; /* 802.3at? */ if (tlv_size >= 12) { port->p_power.powertype = PEEK_UINT8; port->p_power.source = (port->p_power.powertype & (1<<5 | 1<<4)) >> 4; port->p_power.priority = (port->p_power.powertype & (1<<1 | 1<<0)); port->p_power.powertype = (port->p_power.powertype & (1<<7))? LLDP_DOT3_POWER_8023AT_TYPE1: LLDP_DOT3_POWER_8023AT_TYPE2; port->p_power.requested = PEEK_UINT16; port->p_power.allocated = PEEK_UINT16; } else port->p_power.powertype = LLDP_DOT3_POWER_8023AT_OFF; break; default: /* Unknown Dot3 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(med, orgid, sizeof(orgid)) == 0) { /* LLDP-MED */ #ifndef ENABLE_LLDPMED hardware->h_rx_unrecognized_cnt++; #else u_int32_t policy; unsigned loctype; unsigned power; switch (tlv_subtype) { case LLDP_TLV_MED_CAP: CHECK_TLV_SIZE(7, "LLDP-MED capabilities"); chassis->c_med_cap_available = PEEK_UINT16; chassis->c_med_type = PEEK_UINT8; port->p_med_cap_enabled |= LLDP_MED_CAP_CAP; break; case LLDP_TLV_MED_POLICY: CHECK_TLV_SIZE(8, "LLDP-MED policy"); policy = PEEK_UINT32; if (((policy >> 24) < 1) || ((policy >> 24) > LLDP_MED_APPTYPE_LAST)) { log_info("lldp", "unknown policy field %d " "received on %s", policy, hardware->h_ifname); break; } port->p_med_policy[(policy >> 24) - 1].type = (policy >> 24); port->p_med_policy[(policy >> 24) - 1].unknown = ((policy & 0x800000) != 0); port->p_med_policy[(policy >> 24) - 1].tagged = ((policy & 0x400000) != 0); port->p_med_policy[(policy >> 24) - 1].vid = (policy & 0x001FFE00) >> 9; port->p_med_policy[(policy >> 24) - 1].priority = (policy & 0x1C0) >> 6; port->p_med_policy[(policy >> 24) - 1].dscp = policy & 0x3F; port->p_med_cap_enabled |= LLDP_MED_CAP_POLICY; break; case LLDP_TLV_MED_LOCATION: CHECK_TLV_SIZE(5, "LLDP-MED Location"); loctype = PEEK_UINT8; if ((loctype < 1) || (loctype > LLDP_MED_LOCFORMAT_LAST)) { log_info("lldp", "unknown location type " "received on %s", hardware->h_ifname); break; } if ((port->p_med_location[loctype - 1].data = (char*)malloc(tlv_size - 5)) == NULL) { log_warn("lldp", "unable to allocate memory " "for LLDP-MED location for " "frame received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(port->p_med_location[loctype - 1].data, tlv_size - 5); port->p_med_location[loctype - 1].data_len = tlv_size - 5; port->p_med_location[loctype - 1].format = loctype; port->p_med_cap_enabled |= LLDP_MED_CAP_LOCATION; break; case LLDP_TLV_MED_MDI: CHECK_TLV_SIZE(7, "LLDP-MED PoE-MDI"); power = PEEK_UINT8; switch (power & 0xC0) { case 0x0: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PSE; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PSE; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PRIMARY; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_BACKUP; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_RESERVED; } break; case 0x40: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PD; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PD; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PSE; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_LOCAL; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_BOTH; } break; default: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_RESERVED; } if ((power & 0x0F) > LLDP_MED_POW_PRIO_LOW) port->p_med_power.priority = LLDP_MED_POW_PRIO_UNKNOWN; else port->p_med_power.priority = power & 0x0F; port->p_med_power.val = PEEK_UINT16; break; case LLDP_TLV_MED_IV_HW: case LLDP_TLV_MED_IV_SW: case LLDP_TLV_MED_IV_FW: case LLDP_TLV_MED_IV_SN: case LLDP_TLV_MED_IV_MANUF: case LLDP_TLV_MED_IV_MODEL: case LLDP_TLV_MED_IV_ASSET: if (tlv_size <= 4) b = NULL; else { if ((b = (char*)malloc(tlv_size - 3)) == NULL) { log_warn("lldp", "unable to allocate " "memory for LLDP-MED " "inventory for frame " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 4); b[tlv_size - 4] = '\0'; } switch (tlv_subtype) { case LLDP_TLV_MED_IV_HW: chassis->c_med_hw = b; break; case LLDP_TLV_MED_IV_FW: chassis->c_med_fw = b; break; case LLDP_TLV_MED_IV_SW: chassis->c_med_sw = b; break; case LLDP_TLV_MED_IV_SN: chassis->c_med_sn = b; break; case LLDP_TLV_MED_IV_MANUF: chassis->c_med_manuf = b; break; case LLDP_TLV_MED_IV_MODEL: chassis->c_med_model = b; break; case LLDP_TLV_MED_IV_ASSET: chassis->c_med_asset = b; break; } port->p_med_cap_enabled |= LLDP_MED_CAP_IV; break; default: /* Unknown LLDP MED, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif /* ENABLE_LLDPMED */ } else if (memcmp(dcbx, orgid, sizeof(orgid)) == 0) { log_debug("lldp", "unsupported DCBX tlv received on %s - ignore", hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } else { log_debug("lldp", "unknown org tlv [%02x:%02x:%02x] received on %s", orgid[0], orgid[1], orgid[2], hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; #ifdef ENABLE_CUSTOM custom = (struct lldpd_custom*)calloc(1, sizeof(struct lldpd_custom)); if (!custom) { log_warn("lldp", "unable to allocate memory for custom TLV"); goto malformed; } custom->oui_info_len = tlv_size > 4 ? tlv_size - 4 : 0; memcpy(custom->oui, orgid, sizeof(custom->oui)); custom->subtype = tlv_subtype; if (custom->oui_info_len > 0) { custom->oui_info = malloc(custom->oui_info_len); if (!custom->oui_info) { log_warn("lldp", "unable to allocate memory for custom TLV data"); goto malformed; } PEEK_BYTES(custom->oui_info, custom->oui_info_len); } TAILQ_INSERT_TAIL(&port->p_custom_list, custom, next); custom = NULL; #endif } break; default: log_warnx("lldp", "unknown tlv (%d) received on %s", tlv_type, hardware->h_ifname); goto malformed; } if (pos > tlv + tlv_size) { log_warnx("lldp", "BUG: already past TLV!"); goto malformed; } PEEK_DISCARD(tlv + tlv_size - pos); } /* Some random check */ if ((chassis->c_id == NULL) || (port->p_id == NULL) || (!ttl_received) || (gotend == 0)) { log_warnx("lldp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_CUSTOM free(custom); #endif #ifdef ENABLE_DOT1 free(vlan); free(pi); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1770_3
crossvul-cpp_data_good_1770_3
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #include <unistd.h> #include <errno.h> #include <time.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/ioctl.h> inline static int lldpd_af_to_lldp_proto(int af) { switch (af) { case LLDPD_AF_IPV4: return LLDP_MGMT_ADDR_IP4; case LLDPD_AF_IPV6: return LLDP_MGMT_ADDR_IP6; default: return LLDP_MGMT_ADDR_NONE; } } inline static int lldpd_af_from_lldp_proto(int proto) { switch (proto) { case LLDP_MGMT_ADDR_IP4: return LLDPD_AF_IPV4; case LLDP_MGMT_ADDR_IP6: return LLDPD_AF_IPV6; default: return LLDPD_AF_UNSPEC; } } static int _lldp_send(struct lldpd *global, struct lldpd_hardware *hardware, u_int8_t c_id_subtype, char *c_id, int c_id_len, u_int8_t p_id_subtype, char *p_id, int p_id_len, int shutdown) { struct lldpd_port *port; struct lldpd_chassis *chassis; struct lldpd_frame *frame; int length; u_int8_t *packet, *pos, *tlv; struct lldpd_mgmt *mgmt; int proto; u_int8_t mcastaddr[] = LLDP_MULTICAST_ADDR; #ifdef ENABLE_DOT1 const u_int8_t dot1[] = LLDP_TLV_ORG_DOT1; struct lldpd_vlan *vlan; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi; #endif #ifdef ENABLE_DOT3 const u_int8_t dot3[] = LLDP_TLV_ORG_DOT3; #endif #ifdef ENABLE_LLDPMED int i; const u_int8_t med[] = LLDP_TLV_ORG_MED; #endif #ifdef ENABLE_CUSTOM struct lldpd_custom *custom; #endif port = &hardware->h_lport; chassis = port->p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* LLDP multicast address */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC address */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* LLDP frame */ POKE_UINT16(ETHERTYPE_LLDP))) goto toobig; /* Chassis ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_CHASSIS_ID) && POKE_UINT8(c_id_subtype) && POKE_BYTES(c_id, c_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Port ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_ID) && POKE_UINT8(p_id_subtype) && POKE_BYTES(p_id, p_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Time to live */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_TTL) && POKE_UINT16(shutdown?0:chassis->c_ttl) && POKE_END_LLDP_TLV)) goto toobig; if (shutdown) goto end; /* System name */ if (chassis->c_name && *chassis->c_name != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_NAME) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* System description (skip it if empty) */ if (chassis->c_descr && *chassis->c_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_DESCR) && POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)) && POKE_END_LLDP_TLV)) goto toobig; } /* System capabilities */ if (global->g_config.c_cap_advertise && chassis->c_cap_available) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_CAP) && POKE_UINT16(chassis->c_cap_available) && POKE_UINT16(chassis->c_cap_enabled) && POKE_END_LLDP_TLV)) goto toobig; } /* Management addresses */ TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { proto = lldpd_af_to_lldp_proto(mgmt->m_family); if (proto == LLDP_MGMT_ADDR_NONE) continue; if (!( POKE_START_LLDP_TLV(LLDP_TLV_MGMT_ADDR) && /* Size of the address, including its type */ POKE_UINT8(mgmt->m_addrsize + 1) && POKE_UINT8(proto) && POKE_BYTES(&mgmt->m_addr, mgmt->m_addrsize))) goto toobig; /* Interface port type, OID */ if (mgmt->m_iface == 0) { if (!( /* We don't know the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_UNKNOWN) && POKE_UINT32(0))) goto toobig; } else { if (!( /* We have the index of the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_IFINDEX) && POKE_UINT32(mgmt->m_iface))) goto toobig; } if (!( /* We don't provide an OID for management */ POKE_UINT8(0) && POKE_END_LLDP_TLV)) goto toobig; } /* Port description */ if (port->p_descr && *port->p_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_DESCR) && POKE_BYTES(port->p_descr, strlen(port->p_descr)) && POKE_END_LLDP_TLV)) goto toobig; } #ifdef ENABLE_DOT1 /* Port VLAN ID */ if(port->p_pvid != 0) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PVID) && POKE_UINT16(port->p_pvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* Port and Protocol VLAN IDs */ TAILQ_FOREACH(ppvid, &port->p_ppvids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PPVID) && POKE_UINT8(ppvid->p_cap_status) && POKE_UINT16(ppvid->p_ppvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* VLANs */ TAILQ_FOREACH(vlan, &port->p_vlans, v_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_VLANNAME) && POKE_UINT16(vlan->v_vid) && POKE_UINT8(strlen(vlan->v_name)) && POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* Protocol Identities */ TAILQ_FOREACH(pi, &port->p_pids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PI) && POKE_UINT8(pi->p_pi_len) && POKE_BYTES(pi->p_pi, pi->p_pi_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_DOT3 /* Aggregation status */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_LA) && /* Bit 0 = capability ; Bit 1 = status */ POKE_UINT8((port->p_aggregid) ? 3:1) && POKE_UINT32(port->p_aggregid) && POKE_END_LLDP_TLV)) goto toobig; /* MAC/PHY */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MAC) && POKE_UINT8(port->p_macphy.autoneg_support | (port->p_macphy.autoneg_enabled << 1)) && POKE_UINT16(port->p_macphy.autoneg_advertised) && POKE_UINT16(port->p_macphy.mau_type) && POKE_END_LLDP_TLV)) goto toobig; /* MFS */ if (port->p_mfs) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MFS) && POKE_UINT16(port->p_mfs) && POKE_END_LLDP_TLV)) goto toobig; } /* Power */ if (port->p_power.devicetype) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_POWER) && POKE_UINT8(( (((2 - port->p_power.devicetype) %(1<< 1))<<0) | (( port->p_power.supported %(1<< 1))<<1) | (( port->p_power.enabled %(1<< 1))<<2) | (( port->p_power.paircontrol %(1<< 1))<<3))) && POKE_UINT8(port->p_power.pairs) && POKE_UINT8(port->p_power.class))) goto toobig; /* 802.3at */ if (port->p_power.powertype != LLDP_DOT3_POWER_8023AT_OFF) { if (!( POKE_UINT8(( (((port->p_power.powertype == LLDP_DOT3_POWER_8023AT_TYPE1)?1:0) << 7) | (((port->p_power.devicetype == LLDP_DOT3_POWER_PSE)?0:1) << 6) | ((port->p_power.source %(1<< 2))<<4) | ((port->p_power.priority %(1<< 2))<<0))) && POKE_UINT16(port->p_power.requested) && POKE_UINT16(port->p_power.allocated))) goto toobig; } if (!(POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_LLDPMED if (port->p_med_cap_enabled) { /* LLDP-MED cap */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_CAP) && POKE_UINT16(chassis->c_med_cap_available) && POKE_UINT8(chassis->c_med_type) && POKE_END_LLDP_TLV)) goto toobig; /* LLDP-MED inventory */ #define LLDP_INVENTORY(value, subtype) \ if (value) { \ if (!( \ POKE_START_LLDP_TLV(LLDP_TLV_ORG) && \ POKE_BYTES(med, sizeof(med)) && \ POKE_UINT8(subtype) && \ POKE_BYTES(value, \ (strlen(value)>32)?32:strlen(value)) && \ POKE_END_LLDP_TLV)) \ goto toobig; \ } if (port->p_med_cap_enabled & LLDP_MED_CAP_IV) { LLDP_INVENTORY(chassis->c_med_hw, LLDP_TLV_MED_IV_HW); LLDP_INVENTORY(chassis->c_med_fw, LLDP_TLV_MED_IV_FW); LLDP_INVENTORY(chassis->c_med_sw, LLDP_TLV_MED_IV_SW); LLDP_INVENTORY(chassis->c_med_sn, LLDP_TLV_MED_IV_SN); LLDP_INVENTORY(chassis->c_med_manuf, LLDP_TLV_MED_IV_MANUF); LLDP_INVENTORY(chassis->c_med_model, LLDP_TLV_MED_IV_MODEL); LLDP_INVENTORY(chassis->c_med_asset, LLDP_TLV_MED_IV_ASSET); } /* LLDP-MED location */ for (i = 0; i < LLDP_MED_LOCFORMAT_LAST; i++) { if (port->p_med_location[i].format == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_LOCATION) && POKE_UINT8(port->p_med_location[i].format) && POKE_BYTES(port->p_med_location[i].data, port->p_med_location[i].data_len) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED network policy */ for (i = 0; i < LLDP_MED_APPTYPE_LAST; i++) { if (port->p_med_policy[i].type == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_POLICY) && POKE_UINT32(( ((port->p_med_policy[i].type %(1<< 8))<<24) | ((port->p_med_policy[i].unknown %(1<< 1))<<23) | ((port->p_med_policy[i].tagged %(1<< 1))<<22) | /*((0 %(1<< 1))<<21) |*/ ((port->p_med_policy[i].vid %(1<<12))<< 9) | ((port->p_med_policy[i].priority %(1<< 3))<< 6) | ((port->p_med_policy[i].dscp %(1<< 6))<< 0) )) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED POE-MDI */ if ((port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PSE) || (port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PD)) { int devicetype = 0, source = 0; if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_MDI))) goto toobig; switch (port->p_med_power.devicetype) { case LLDP_MED_POW_TYPE_PSE: devicetype = 0; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PRIMARY: source = 1; break; case LLDP_MED_POW_SOURCE_BACKUP: source = 2; break; case LLDP_MED_POW_SOURCE_RESERVED: source = 3; break; default: source = 0; break; } break; case LLDP_MED_POW_TYPE_PD: devicetype = 1; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PSE: source = 1; break; case LLDP_MED_POW_SOURCE_LOCAL: source = 2; break; case LLDP_MED_POW_SOURCE_BOTH: source = 3; break; default: source = 0; break; } break; } if (!( POKE_UINT8(( ((devicetype %(1<< 2))<<6) | ((source %(1<< 2))<<4) | ((port->p_med_power.priority %(1<< 4))<<0) )) && POKE_UINT16(port->p_med_power.val) && POKE_END_LLDP_TLV)) goto toobig; } } #endif #ifdef ENABLE_CUSTOM TAILQ_FOREACH(custom, &port->p_custom_list, next) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(custom->oui, sizeof(custom->oui)) && POKE_UINT8(custom->subtype) && POKE_BYTES(custom->oui_info, custom->oui_info_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif end: /* END */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_END) && POKE_END_LLDP_TLV)) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, pos - packet) == -1) { log_warn("lldp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; /* We assume that LLDP frame is the reference */ if (!shutdown && (frame = (struct lldpd_frame*)malloc( sizeof(int) + pos - packet)) != NULL) { frame->size = pos - packet; memcpy(&frame->frame, packet, frame->size); if ((hardware->h_lport.p_lastframe == NULL) || (hardware->h_lport.p_lastframe->size != frame->size) || (memcmp(hardware->h_lport.p_lastframe->frame, frame->frame, frame->size) != 0)) { free(hardware->h_lport.p_lastframe); hardware->h_lport.p_lastframe = frame; hardware->h_lport.p_lastchange = time(NULL); } else free(frame); } free(packet); return 0; toobig: free(packet); return E2BIG; } /* Send a shutdown LLDPDU. */ int lldp_send_shutdown(struct lldpd *global, struct lldpd_hardware *hardware) { if (hardware->h_lchassis_previous_id == NULL || hardware->h_lport_previous_id == NULL) return 0; return _lldp_send(global, hardware, hardware->h_lchassis_previous_id_subtype, hardware->h_lchassis_previous_id, hardware->h_lchassis_previous_id_len, hardware->h_lport_previous_id_subtype, hardware->h_lport_previous_id, hardware->h_lport_previous_id_len, 1); } int lldp_send(struct lldpd *global, struct lldpd_hardware *hardware) { struct lldpd_port *port = &hardware->h_lport; struct lldpd_chassis *chassis = port->p_chassis; int ret; /* Check if we have a change. */ if (hardware->h_lchassis_previous_id != NULL && hardware->h_lport_previous_id != NULL && (hardware->h_lchassis_previous_id_subtype != chassis->c_id_subtype || hardware->h_lchassis_previous_id_len != chassis->c_id_len || hardware->h_lport_previous_id_subtype != port->p_id_subtype || hardware->h_lport_previous_id_len != port->p_id_len || memcmp(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len) || memcmp(hardware->h_lport_previous_id, port->p_id, port->p_id_len))) { log_info("lldp", "MSAP has changed for port %s, sending a shutdown LLDPDU", hardware->h_ifname); if ((ret = lldp_send_shutdown(global, hardware)) != 0) return ret; } log_debug("lldp", "send LLDP PDU to %s", hardware->h_ifname); if ((ret = _lldp_send(global, hardware, chassis->c_id_subtype, chassis->c_id, chassis->c_id_len, port->p_id_subtype, port->p_id, port->p_id_len, 0)) != 0) return ret; /* Record current chassis and port ID */ free(hardware->h_lchassis_previous_id); hardware->h_lchassis_previous_id_subtype = chassis->c_id_subtype; hardware->h_lchassis_previous_id_len = chassis->c_id_len; if ((hardware->h_lchassis_previous_id = malloc(chassis->c_id_len)) != NULL) memcpy(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len); free(hardware->h_lport_previous_id); hardware->h_lport_previous_id_subtype = port->p_id_subtype; hardware->h_lport_previous_id_len = port->p_id_len; if ((hardware->h_lport_previous_id = malloc(port->p_id_len)) != NULL) memcpy(hardware->h_lport_previous_id, port->p_id, port->p_id_len); return 0; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_size < (x)) { \ log_warnx("lldp", name " TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int lldp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; const char lldpaddr[] = LLDP_MULTICAST_ADDR; const char dot1[] = LLDP_TLV_ORG_DOT1; const char dot3[] = LLDP_TLV_ORG_DOT3; const char med[] = LLDP_TLV_ORG_MED; const char dcbx[] = LLDP_TLV_ORG_DCBX; unsigned char orgid[3]; int length, gotend = 0, ttl_received = 0; int tlv_size, tlv_type, tlv_subtype; u_int8_t *pos, *tlv; char *b; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan = NULL; int vlan_len; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi = NULL; #endif struct lldpd_mgmt *mgmt; int af; u_int8_t addr_str_length, addr_str_buffer[32]; u_int8_t addr_family, addr_length, *addr_ptr, iface_subtype; u_int32_t iface_number, iface; #ifdef ENABLE_CUSTOM struct lldpd_custom *custom = NULL; #endif log_debug("lldp", "receive LLDP PDU on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("lldp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("lldp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); TAILQ_INIT(&port->p_ppvids); TAILQ_INIT(&port->p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&port->p_custom_list); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t)) { log_warnx("lldp", "too short frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(lldpaddr, ETHER_ADDR_LEN) != 0) { log_info("lldp", "frame not targeted at LLDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); /* Skip source address */ if (PEEK_UINT16 != ETHERTYPE_LLDP) { log_info("lldp", "non LLDP frame received on %s", hardware->h_ifname); goto malformed; } while (length && (!gotend)) { if (length < 2) { log_warnx("lldp", "tlv header too short received on %s", hardware->h_ifname); goto malformed; } tlv_size = PEEK_UINT16; tlv_type = tlv_size >> 9; tlv_size = tlv_size & 0x1ff; (void)PEEK_SAVE(tlv); if (length < tlv_size) { log_warnx("lldp", "frame too short for tlv received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case LLDP_TLV_END: if (tlv_size != 0) { log_warnx("lldp", "lldp end received with size not null on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("lldp", "extra data after lldp end on %s", hardware->h_ifname); gotend = 1; break; case LLDP_TLV_CHASSIS_ID: case LLDP_TLV_PORT_ID: CHECK_TLV_SIZE(2, "Port Id"); tlv_subtype = PEEK_UINT8; if ((tlv_subtype == 0) || (tlv_subtype > 7)) { log_warnx("lldp", "unknown subtype for tlv id received on %s", hardware->h_ifname); goto malformed; } if ((b = (char *)calloc(1, tlv_size - 1)) == NULL) { log_warn("lldp", "unable to allocate memory for id tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 1); if (tlv_type == LLDP_TLV_PORT_ID) { port->p_id_subtype = tlv_subtype; port->p_id = b; port->p_id_len = tlv_size - 1; } else { chassis->c_id_subtype = tlv_subtype; chassis->c_id = b; chassis->c_id_len = tlv_size - 1; } break; case LLDP_TLV_TTL: CHECK_TLV_SIZE(2, "TTL"); chassis->c_ttl = PEEK_UINT16; ttl_received = 1; break; case LLDP_TLV_PORT_DESCR: case LLDP_TLV_SYSTEM_NAME: case LLDP_TLV_SYSTEM_DESCR: if (tlv_size < 1) { log_debug("lldp", "empty tlv received on %s", hardware->h_ifname); break; } if ((b = (char *)calloc(1, tlv_size + 1)) == NULL) { log_warn("lldp", "unable to allocate memory for string tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size); if (tlv_type == LLDP_TLV_PORT_DESCR) port->p_descr = b; else if (tlv_type == LLDP_TLV_SYSTEM_NAME) chassis->c_name = b; else chassis->c_descr = b; break; case LLDP_TLV_SYSTEM_CAP: CHECK_TLV_SIZE(4, "System capabilities"); chassis->c_cap_available = PEEK_UINT16; chassis->c_cap_enabled = PEEK_UINT16; break; case LLDP_TLV_MGMT_ADDR: CHECK_TLV_SIZE(1, "Management address"); addr_str_length = PEEK_UINT8; if (addr_str_length > sizeof(addr_str_buffer)) { log_warnx("lldp", "too large management address on %s", hardware->h_ifname); goto malformed; } CHECK_TLV_SIZE(1 + addr_str_length, "Management address"); PEEK_BYTES(addr_str_buffer, addr_str_length); addr_length = addr_str_length - 1; addr_family = addr_str_buffer[0]; addr_ptr = &addr_str_buffer[1]; CHECK_TLV_SIZE(1 + addr_str_length + 5, "Management address"); iface_subtype = PEEK_UINT8; iface_number = PEEK_UINT32; af = lldpd_af_from_lldp_proto(addr_family); if (af == LLDPD_AF_UNSPEC) break; if (iface_subtype == LLDP_MGMT_IFACE_IFINDEX) iface = iface_number; else iface = 0; mgmt = lldpd_alloc_mgmt(af, addr_ptr, addr_length, iface); if (mgmt == NULL) { if (errno == ENOMEM) log_warn("lldp", "unable to allocate memory " "for management address"); else log_warn("lldp", "too large management address " "received on %s", hardware->h_ifname); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); break; case LLDP_TLV_ORG: CHECK_TLV_SIZE(1 + (int)sizeof(orgid), "Organisational"); PEEK_BYTES(orgid, sizeof(orgid)); tlv_subtype = PEEK_UINT8; if (memcmp(dot1, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT1 hardware->h_rx_unrecognized_cnt++; #else /* Dot1 */ switch (tlv_subtype) { case LLDP_TLV_DOT1_VLANNAME: CHECK_TLV_SIZE(7, "VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("lldp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = PEEK_UINT16; vlan_len = PEEK_UINT8; CHECK_TLV_SIZE(7 + vlan_len, "VLAN"); if ((vlan->v_name = (char *)calloc(1, vlan_len + 1)) == NULL) { log_warn("lldp", "unable to alloc vlan name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(vlan->v_name, vlan_len); TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); vlan = NULL; break; case LLDP_TLV_DOT1_PVID: CHECK_TLV_SIZE(6, "PVID"); port->p_pvid = PEEK_UINT16; break; case LLDP_TLV_DOT1_PPVID: CHECK_TLV_SIZE(7, "PPVID"); /* validation needed */ /* PPVID has to be unique if more than one PPVID TLVs are received - discard if duplicate */ /* if support bit is not set and enabled bit is set - PPVID TLV is considered error and discarded */ /* if PPVID > 4096 - bad and discard */ if ((ppvid = (struct lldpd_ppvid *)calloc(1, sizeof(struct lldpd_ppvid))) == NULL) { log_warn("lldp", "unable to alloc ppvid " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } ppvid->p_cap_status = PEEK_UINT8; ppvid->p_ppvid = PEEK_UINT16; TAILQ_INSERT_TAIL(&port->p_ppvids, ppvid, p_entries); break; case LLDP_TLV_DOT1_PI: /* validation needed */ /* PI has to be unique if more than one PI TLVs are received - discard if duplicate ?? */ CHECK_TLV_SIZE(5, "PI"); if ((pi = (struct lldpd_pi *)calloc(1, sizeof(struct lldpd_pi))) == NULL) { log_warn("lldp", "unable to alloc PI " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } pi->p_pi_len = PEEK_UINT8; CHECK_TLV_SIZE(5 + pi->p_pi_len, "PI"); if ((pi->p_pi = (char *)calloc(1, pi->p_pi_len)) == NULL) { log_warn("lldp", "unable to alloc pid name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(pi->p_pi, pi->p_pi_len); TAILQ_INSERT_TAIL(&port->p_pids, pi, p_entries); pi = NULL; break; default: /* Unknown Dot1 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(dot3, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT3 hardware->h_rx_unrecognized_cnt++; #else /* Dot3 */ switch (tlv_subtype) { case LLDP_TLV_DOT3_MAC: CHECK_TLV_SIZE(9, "MAC/PHY"); port->p_macphy.autoneg_support = PEEK_UINT8; port->p_macphy.autoneg_enabled = (port->p_macphy.autoneg_support & 0x2) >> 1; port->p_macphy.autoneg_support = port->p_macphy.autoneg_support & 0x1; port->p_macphy.autoneg_advertised = PEEK_UINT16; port->p_macphy.mau_type = PEEK_UINT16; break; case LLDP_TLV_DOT3_LA: CHECK_TLV_SIZE(9, "Link aggregation"); PEEK_DISCARD_UINT8; port->p_aggregid = PEEK_UINT32; break; case LLDP_TLV_DOT3_MFS: CHECK_TLV_SIZE(6, "MFS"); port->p_mfs = PEEK_UINT16; break; case LLDP_TLV_DOT3_POWER: CHECK_TLV_SIZE(7, "Power"); port->p_power.devicetype = PEEK_UINT8; port->p_power.supported = (port->p_power.devicetype & 0x2) >> 1; port->p_power.enabled = (port->p_power.devicetype & 0x4) >> 2; port->p_power.paircontrol = (port->p_power.devicetype & 0x8) >> 3; port->p_power.devicetype = (port->p_power.devicetype & 0x1)? LLDP_DOT3_POWER_PSE:LLDP_DOT3_POWER_PD; port->p_power.pairs = PEEK_UINT8; port->p_power.class = PEEK_UINT8; /* 802.3at? */ if (tlv_size >= 12) { port->p_power.powertype = PEEK_UINT8; port->p_power.source = (port->p_power.powertype & (1<<5 | 1<<4)) >> 4; port->p_power.priority = (port->p_power.powertype & (1<<1 | 1<<0)); port->p_power.powertype = (port->p_power.powertype & (1<<7))? LLDP_DOT3_POWER_8023AT_TYPE1: LLDP_DOT3_POWER_8023AT_TYPE2; port->p_power.requested = PEEK_UINT16; port->p_power.allocated = PEEK_UINT16; } else port->p_power.powertype = LLDP_DOT3_POWER_8023AT_OFF; break; default: /* Unknown Dot3 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(med, orgid, sizeof(orgid)) == 0) { /* LLDP-MED */ #ifndef ENABLE_LLDPMED hardware->h_rx_unrecognized_cnt++; #else u_int32_t policy; unsigned loctype; unsigned power; switch (tlv_subtype) { case LLDP_TLV_MED_CAP: CHECK_TLV_SIZE(7, "LLDP-MED capabilities"); chassis->c_med_cap_available = PEEK_UINT16; chassis->c_med_type = PEEK_UINT8; port->p_med_cap_enabled |= LLDP_MED_CAP_CAP; break; case LLDP_TLV_MED_POLICY: CHECK_TLV_SIZE(8, "LLDP-MED policy"); policy = PEEK_UINT32; if (((policy >> 24) < 1) || ((policy >> 24) > LLDP_MED_APPTYPE_LAST)) { log_info("lldp", "unknown policy field %d " "received on %s", policy, hardware->h_ifname); break; } port->p_med_policy[(policy >> 24) - 1].type = (policy >> 24); port->p_med_policy[(policy >> 24) - 1].unknown = ((policy & 0x800000) != 0); port->p_med_policy[(policy >> 24) - 1].tagged = ((policy & 0x400000) != 0); port->p_med_policy[(policy >> 24) - 1].vid = (policy & 0x001FFE00) >> 9; port->p_med_policy[(policy >> 24) - 1].priority = (policy & 0x1C0) >> 6; port->p_med_policy[(policy >> 24) - 1].dscp = policy & 0x3F; port->p_med_cap_enabled |= LLDP_MED_CAP_POLICY; break; case LLDP_TLV_MED_LOCATION: CHECK_TLV_SIZE(5, "LLDP-MED Location"); loctype = PEEK_UINT8; if ((loctype < 1) || (loctype > LLDP_MED_LOCFORMAT_LAST)) { log_info("lldp", "unknown location type " "received on %s", hardware->h_ifname); break; } if ((port->p_med_location[loctype - 1].data = (char*)malloc(tlv_size - 5)) == NULL) { log_warn("lldp", "unable to allocate memory " "for LLDP-MED location for " "frame received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(port->p_med_location[loctype - 1].data, tlv_size - 5); port->p_med_location[loctype - 1].data_len = tlv_size - 5; port->p_med_location[loctype - 1].format = loctype; port->p_med_cap_enabled |= LLDP_MED_CAP_LOCATION; break; case LLDP_TLV_MED_MDI: CHECK_TLV_SIZE(7, "LLDP-MED PoE-MDI"); power = PEEK_UINT8; switch (power & 0xC0) { case 0x0: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PSE; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PSE; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PRIMARY; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_BACKUP; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_RESERVED; } break; case 0x40: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PD; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PD; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PSE; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_LOCAL; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_BOTH; } break; default: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_RESERVED; } if ((power & 0x0F) > LLDP_MED_POW_PRIO_LOW) port->p_med_power.priority = LLDP_MED_POW_PRIO_UNKNOWN; else port->p_med_power.priority = power & 0x0F; port->p_med_power.val = PEEK_UINT16; break; case LLDP_TLV_MED_IV_HW: case LLDP_TLV_MED_IV_SW: case LLDP_TLV_MED_IV_FW: case LLDP_TLV_MED_IV_SN: case LLDP_TLV_MED_IV_MANUF: case LLDP_TLV_MED_IV_MODEL: case LLDP_TLV_MED_IV_ASSET: if (tlv_size <= 4) b = NULL; else { if ((b = (char*)malloc(tlv_size - 3)) == NULL) { log_warn("lldp", "unable to allocate " "memory for LLDP-MED " "inventory for frame " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 4); b[tlv_size - 4] = '\0'; } switch (tlv_subtype) { case LLDP_TLV_MED_IV_HW: chassis->c_med_hw = b; break; case LLDP_TLV_MED_IV_FW: chassis->c_med_fw = b; break; case LLDP_TLV_MED_IV_SW: chassis->c_med_sw = b; break; case LLDP_TLV_MED_IV_SN: chassis->c_med_sn = b; break; case LLDP_TLV_MED_IV_MANUF: chassis->c_med_manuf = b; break; case LLDP_TLV_MED_IV_MODEL: chassis->c_med_model = b; break; case LLDP_TLV_MED_IV_ASSET: chassis->c_med_asset = b; break; } port->p_med_cap_enabled |= LLDP_MED_CAP_IV; break; default: /* Unknown LLDP MED, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif /* ENABLE_LLDPMED */ } else if (memcmp(dcbx, orgid, sizeof(orgid)) == 0) { log_debug("lldp", "unsupported DCBX tlv received on %s - ignore", hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } else { log_debug("lldp", "unknown org tlv [%02x:%02x:%02x] received on %s", orgid[0], orgid[1], orgid[2], hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; #ifdef ENABLE_CUSTOM custom = (struct lldpd_custom*)calloc(1, sizeof(struct lldpd_custom)); if (!custom) { log_warn("lldp", "unable to allocate memory for custom TLV"); goto malformed; } custom->oui_info_len = tlv_size > 4 ? tlv_size - 4 : 0; memcpy(custom->oui, orgid, sizeof(custom->oui)); custom->subtype = tlv_subtype; if (custom->oui_info_len > 0) { custom->oui_info = malloc(custom->oui_info_len); if (!custom->oui_info) { log_warn("lldp", "unable to allocate memory for custom TLV data"); goto malformed; } PEEK_BYTES(custom->oui_info, custom->oui_info_len); } TAILQ_INSERT_TAIL(&port->p_custom_list, custom, next); custom = NULL; #endif } break; default: log_warnx("lldp", "unknown tlv (%d) received on %s", tlv_type, hardware->h_ifname); goto malformed; } if (pos > tlv + tlv_size) { log_warnx("lldp", "BUG: already past TLV!"); goto malformed; } PEEK_DISCARD(tlv + tlv_size - pos); } /* Some random check */ if ((chassis->c_id == NULL) || (port->p_id == NULL) || (!ttl_received) || (gotend == 0)) { log_warnx("lldp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_CUSTOM free(custom); #endif #ifdef ENABLE_DOT1 free(vlan); free(pi); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; }
./CrossVul/dataset_final_sorted/CWE-617/c/good_1770_3
crossvul-cpp_data_bad_389_0
/* * Copyright (c) 2008-2017 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include <ctype.h> #include <errno.h> #include <inttypes.h> #include <sys/types.h> #include <netinet/in.h> #include <netinet/icmp6.h> #include <stdlib.h> #include "bitmap.h" #include "bundle.h" #include "byte-order.h" #include "classifier.h" #include "learn.h" #include "multipath.h" #include "netdev.h" #include "nx-match.h" #include "id-pool.h" #include "openflow/netronome-ext.h" #include "openvswitch/dynamic-string.h" #include "openvswitch/meta-flow.h" #include "openvswitch/ofp-actions.h" #include "openvswitch/ofp-errors.h" #include "openvswitch/ofp-msgs.h" #include "openvswitch/ofp-print.h" #include "openvswitch/ofp-prop.h" #include "openvswitch/ofp-util.h" #include "openvswitch/ofpbuf.h" #include "openvswitch/type-props.h" #include "openvswitch/vlog.h" #include "openflow/intel-ext.h" #include "packets.h" #include "random.h" #include "tun-metadata.h" #include "unaligned.h" #include "util.h" #include "uuid.h" VLOG_DEFINE_THIS_MODULE(ofp_util); /* Rate limit for OpenFlow message parse errors. These always indicate a bug * in the peer and so there's not much point in showing a lot of them. */ static struct vlog_rate_limit bad_ofmsg_rl = VLOG_RATE_LIMIT_INIT(1, 5); static enum ofputil_table_vacancy ofputil_decode_table_vacancy( ovs_be32 config, enum ofp_version); static enum ofputil_table_eviction ofputil_decode_table_eviction( ovs_be32 config, enum ofp_version); static ovs_be32 ofputil_encode_table_config(enum ofputil_table_miss, enum ofputil_table_eviction, enum ofputil_table_vacancy, enum ofp_version); /* Given the wildcard bit count in the least-significant 6 of 'wcbits', returns * an IP netmask with a 1 in each bit that must match and a 0 in each bit that * is wildcarded. * * The bits in 'wcbits' are in the format used in enum ofp_flow_wildcards: 0 * is exact match, 1 ignores the LSB, 2 ignores the 2 least-significant bits, * ..., 32 and higher wildcard the entire field. This is the *opposite* of the * usual convention where e.g. /24 indicates that 8 bits (not 24 bits) are * wildcarded. */ ovs_be32 ofputil_wcbits_to_netmask(int wcbits) { wcbits &= 0x3f; return wcbits < 32 ? htonl(~((1u << wcbits) - 1)) : 0; } /* Given the IP netmask 'netmask', returns the number of bits of the IP address * that it wildcards, that is, the number of 0-bits in 'netmask', a number * between 0 and 32 inclusive. * * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will * still be in the valid range but isn't otherwise meaningful. */ int ofputil_netmask_to_wcbits(ovs_be32 netmask) { return 32 - ip_count_cidr_bits(netmask); } /* Converts the OpenFlow 1.0 wildcards in 'ofpfw' (OFPFW10_*) into a * flow_wildcards in 'wc' for use in struct match. It is the caller's * responsibility to handle the special case where the flow match's dl_vlan is * set to OFP_VLAN_NONE. */ void ofputil_wildcard_from_ofpfw10(uint32_t ofpfw, struct flow_wildcards *wc) { BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36); /* Initialize most of wc. */ flow_wildcards_init_catchall(wc); if (!(ofpfw & OFPFW10_IN_PORT)) { wc->masks.in_port.ofp_port = u16_to_ofp(UINT16_MAX); } if (!(ofpfw & OFPFW10_NW_TOS)) { wc->masks.nw_tos |= IP_DSCP_MASK; } if (!(ofpfw & OFPFW10_NW_PROTO)) { wc->masks.nw_proto = UINT8_MAX; } wc->masks.nw_src = ofputil_wcbits_to_netmask(ofpfw >> OFPFW10_NW_SRC_SHIFT); wc->masks.nw_dst = ofputil_wcbits_to_netmask(ofpfw >> OFPFW10_NW_DST_SHIFT); if (!(ofpfw & OFPFW10_TP_SRC)) { wc->masks.tp_src = OVS_BE16_MAX; } if (!(ofpfw & OFPFW10_TP_DST)) { wc->masks.tp_dst = OVS_BE16_MAX; } if (!(ofpfw & OFPFW10_DL_SRC)) { WC_MASK_FIELD(wc, dl_src); } if (!(ofpfw & OFPFW10_DL_DST)) { WC_MASK_FIELD(wc, dl_dst); } if (!(ofpfw & OFPFW10_DL_TYPE)) { wc->masks.dl_type = OVS_BE16_MAX; } /* VLAN TCI mask. */ if (!(ofpfw & OFPFW10_DL_VLAN_PCP)) { wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI); } if (!(ofpfw & OFPFW10_DL_VLAN)) { wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI); } } /* Converts the ofp10_match in 'ofmatch' into a struct match in 'match'. */ void ofputil_match_from_ofp10_match(const struct ofp10_match *ofmatch, struct match *match) { uint32_t ofpfw = ntohl(ofmatch->wildcards) & OFPFW10_ALL; /* Initialize match->wc. */ memset(&match->flow, 0, sizeof match->flow); ofputil_wildcard_from_ofpfw10(ofpfw, &match->wc); memset(&match->tun_md, 0, sizeof match->tun_md); /* Initialize most of match->flow. */ match->flow.nw_src = ofmatch->nw_src; match->flow.nw_dst = ofmatch->nw_dst; match->flow.in_port.ofp_port = u16_to_ofp(ntohs(ofmatch->in_port)); match->flow.dl_type = ofputil_dl_type_from_openflow(ofmatch->dl_type); match->flow.tp_src = ofmatch->tp_src; match->flow.tp_dst = ofmatch->tp_dst; match->flow.dl_src = ofmatch->dl_src; match->flow.dl_dst = ofmatch->dl_dst; match->flow.nw_tos = ofmatch->nw_tos & IP_DSCP_MASK; match->flow.nw_proto = ofmatch->nw_proto; /* Translate VLANs. */ if (!(ofpfw & OFPFW10_DL_VLAN) && ofmatch->dl_vlan == htons(OFP10_VLAN_NONE)) { /* Match only packets without 802.1Q header. * * When OFPFW10_DL_VLAN_PCP is wildcarded, this is obviously correct. * * If OFPFW10_DL_VLAN_PCP is matched, the flow match is contradictory, * because we can't have a specific PCP without an 802.1Q header. * However, older versions of OVS treated this as matching packets * withut an 802.1Q header, so we do here too. */ match->flow.vlan_tci = htons(0); match->wc.masks.vlan_tci = htons(0xffff); } else { ovs_be16 vid, pcp, tci; uint16_t hpcp; vid = ofmatch->dl_vlan & htons(VLAN_VID_MASK); hpcp = (ofmatch->dl_vlan_pcp << VLAN_PCP_SHIFT) & VLAN_PCP_MASK; pcp = htons(hpcp); tci = vid | pcp | htons(VLAN_CFI); match->flow.vlan_tci = tci & match->wc.masks.vlan_tci; } /* Clean up. */ match_zero_wildcarded_fields(match); } /* Convert 'match' into the OpenFlow 1.0 match structure 'ofmatch'. */ void ofputil_match_to_ofp10_match(const struct match *match, struct ofp10_match *ofmatch) { const struct flow_wildcards *wc = &match->wc; uint32_t ofpfw; /* Figure out most OpenFlow wildcards. */ ofpfw = 0; if (!wc->masks.in_port.ofp_port) { ofpfw |= OFPFW10_IN_PORT; } if (!wc->masks.dl_type) { ofpfw |= OFPFW10_DL_TYPE; } if (!wc->masks.nw_proto) { ofpfw |= OFPFW10_NW_PROTO; } ofpfw |= (ofputil_netmask_to_wcbits(wc->masks.nw_src) << OFPFW10_NW_SRC_SHIFT); ofpfw |= (ofputil_netmask_to_wcbits(wc->masks.nw_dst) << OFPFW10_NW_DST_SHIFT); if (!(wc->masks.nw_tos & IP_DSCP_MASK)) { ofpfw |= OFPFW10_NW_TOS; } if (!wc->masks.tp_src) { ofpfw |= OFPFW10_TP_SRC; } if (!wc->masks.tp_dst) { ofpfw |= OFPFW10_TP_DST; } if (eth_addr_is_zero(wc->masks.dl_src)) { ofpfw |= OFPFW10_DL_SRC; } if (eth_addr_is_zero(wc->masks.dl_dst)) { ofpfw |= OFPFW10_DL_DST; } /* Translate VLANs. */ ofmatch->dl_vlan = htons(0); ofmatch->dl_vlan_pcp = 0; if (match->wc.masks.vlan_tci == htons(0)) { ofpfw |= OFPFW10_DL_VLAN | OFPFW10_DL_VLAN_PCP; } else if (match->wc.masks.vlan_tci & htons(VLAN_CFI) && !(match->flow.vlan_tci & htons(VLAN_CFI))) { ofmatch->dl_vlan = htons(OFP10_VLAN_NONE); } else { if (!(match->wc.masks.vlan_tci & htons(VLAN_VID_MASK))) { ofpfw |= OFPFW10_DL_VLAN; } else { ofmatch->dl_vlan = htons(vlan_tci_to_vid(match->flow.vlan_tci)); } if (!(match->wc.masks.vlan_tci & htons(VLAN_PCP_MASK))) { ofpfw |= OFPFW10_DL_VLAN_PCP; } else { ofmatch->dl_vlan_pcp = vlan_tci_to_pcp(match->flow.vlan_tci); } } /* Compose most of the match structure. */ ofmatch->wildcards = htonl(ofpfw); ofmatch->in_port = htons(ofp_to_u16(match->flow.in_port.ofp_port)); ofmatch->dl_src = match->flow.dl_src; ofmatch->dl_dst = match->flow.dl_dst; ofmatch->dl_type = ofputil_dl_type_to_openflow(match->flow.dl_type); ofmatch->nw_src = match->flow.nw_src; ofmatch->nw_dst = match->flow.nw_dst; ofmatch->nw_tos = match->flow.nw_tos & IP_DSCP_MASK; ofmatch->nw_proto = match->flow.nw_proto; ofmatch->tp_src = match->flow.tp_src; ofmatch->tp_dst = match->flow.tp_dst; memset(ofmatch->pad1, '\0', sizeof ofmatch->pad1); memset(ofmatch->pad2, '\0', sizeof ofmatch->pad2); } enum ofperr ofputil_pull_ofp11_match(struct ofpbuf *buf, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct match *match, uint16_t *padded_match_len) { struct ofp11_match_header *omh = buf->data; uint16_t match_len; if (buf->size < sizeof *omh) { return OFPERR_OFPBMC_BAD_LEN; } match_len = ntohs(omh->length); switch (ntohs(omh->type)) { case OFPMT_STANDARD: { struct ofp11_match *om; if (match_len != sizeof *om || buf->size < sizeof *om) { return OFPERR_OFPBMC_BAD_LEN; } om = ofpbuf_pull(buf, sizeof *om); if (padded_match_len) { *padded_match_len = match_len; } return ofputil_match_from_ofp11_match(om, match); } case OFPMT_OXM: if (padded_match_len) { *padded_match_len = ROUND_UP(match_len, 8); } return oxm_pull_match(buf, tun_table, vl_mff_map, match); default: return OFPERR_OFPBMC_BAD_TYPE; } } /* Converts the ofp11_match in 'ofmatch' into a struct match in 'match'. * Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_match_from_ofp11_match(const struct ofp11_match *ofmatch, struct match *match) { uint16_t wc = ntohl(ofmatch->wildcards); bool ipv4, arp, rarp; match_init_catchall(match); if (!(wc & OFPFW11_IN_PORT)) { ofp_port_t ofp_port; enum ofperr error; error = ofputil_port_from_ofp11(ofmatch->in_port, &ofp_port); if (error) { return OFPERR_OFPBMC_BAD_VALUE; } match_set_in_port(match, ofp_port); } match_set_dl_src_masked(match, ofmatch->dl_src, eth_addr_invert(ofmatch->dl_src_mask)); match_set_dl_dst_masked(match, ofmatch->dl_dst, eth_addr_invert(ofmatch->dl_dst_mask)); if (!(wc & OFPFW11_DL_VLAN)) { if (ofmatch->dl_vlan == htons(OFPVID11_NONE)) { /* Match only packets without a VLAN tag. */ match->flow.vlan_tci = htons(0); match->wc.masks.vlan_tci = OVS_BE16_MAX; } else { if (ofmatch->dl_vlan == htons(OFPVID11_ANY)) { /* Match any packet with a VLAN tag regardless of VID. */ match->flow.vlan_tci = htons(VLAN_CFI); match->wc.masks.vlan_tci = htons(VLAN_CFI); } else if (ntohs(ofmatch->dl_vlan) < 4096) { /* Match only packets with the specified VLAN VID. */ match->flow.vlan_tci = htons(VLAN_CFI) | ofmatch->dl_vlan; match->wc.masks.vlan_tci = htons(VLAN_CFI | VLAN_VID_MASK); } else { /* Invalid VID. */ return OFPERR_OFPBMC_BAD_VALUE; } if (!(wc & OFPFW11_DL_VLAN_PCP)) { if (ofmatch->dl_vlan_pcp <= 7) { match->flow.vlan_tci |= htons(ofmatch->dl_vlan_pcp << VLAN_PCP_SHIFT); match->wc.masks.vlan_tci |= htons(VLAN_PCP_MASK); } else { /* Invalid PCP. */ return OFPERR_OFPBMC_BAD_VALUE; } } } } if (!(wc & OFPFW11_DL_TYPE)) { match_set_dl_type(match, ofputil_dl_type_from_openflow(ofmatch->dl_type)); } ipv4 = match->flow.dl_type == htons(ETH_TYPE_IP); arp = match->flow.dl_type == htons(ETH_TYPE_ARP); rarp = match->flow.dl_type == htons(ETH_TYPE_RARP); if (ipv4 && !(wc & OFPFW11_NW_TOS)) { if (ofmatch->nw_tos & ~IP_DSCP_MASK) { /* Invalid TOS. */ return OFPERR_OFPBMC_BAD_VALUE; } match_set_nw_dscp(match, ofmatch->nw_tos); } if (ipv4 || arp || rarp) { if (!(wc & OFPFW11_NW_PROTO)) { match_set_nw_proto(match, ofmatch->nw_proto); } match_set_nw_src_masked(match, ofmatch->nw_src, ~ofmatch->nw_src_mask); match_set_nw_dst_masked(match, ofmatch->nw_dst, ~ofmatch->nw_dst_mask); } #define OFPFW11_TP_ALL (OFPFW11_TP_SRC | OFPFW11_TP_DST) if (ipv4 && (wc & OFPFW11_TP_ALL) != OFPFW11_TP_ALL) { switch (match->flow.nw_proto) { case IPPROTO_ICMP: /* "A.2.3 Flow Match Structures" in OF1.1 says: * * The tp_src and tp_dst fields will be ignored unless the * network protocol specified is as TCP, UDP or SCTP. * * but I'm pretty sure we should support ICMP too, otherwise * that's a regression from OF1.0. */ if (!(wc & OFPFW11_TP_SRC)) { uint16_t icmp_type = ntohs(ofmatch->tp_src); if (icmp_type < 0x100) { match_set_icmp_type(match, icmp_type); } else { return OFPERR_OFPBMC_BAD_FIELD; } } if (!(wc & OFPFW11_TP_DST)) { uint16_t icmp_code = ntohs(ofmatch->tp_dst); if (icmp_code < 0x100) { match_set_icmp_code(match, icmp_code); } else { return OFPERR_OFPBMC_BAD_FIELD; } } break; case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: if (!(wc & (OFPFW11_TP_SRC))) { match_set_tp_src(match, ofmatch->tp_src); } if (!(wc & (OFPFW11_TP_DST))) { match_set_tp_dst(match, ofmatch->tp_dst); } break; default: /* OF1.1 says explicitly to ignore this. */ break; } } if (eth_type_mpls(match->flow.dl_type)) { if (!(wc & OFPFW11_MPLS_LABEL)) { match_set_mpls_label(match, 0, ofmatch->mpls_label); } if (!(wc & OFPFW11_MPLS_TC)) { match_set_mpls_tc(match, 0, ofmatch->mpls_tc); } } match_set_metadata_masked(match, ofmatch->metadata, ~ofmatch->metadata_mask); return 0; } /* Convert 'match' into the OpenFlow 1.1 match structure 'ofmatch'. */ void ofputil_match_to_ofp11_match(const struct match *match, struct ofp11_match *ofmatch) { uint32_t wc = 0; memset(ofmatch, 0, sizeof *ofmatch); ofmatch->omh.type = htons(OFPMT_STANDARD); ofmatch->omh.length = htons(OFPMT11_STANDARD_LENGTH); if (!match->wc.masks.in_port.ofp_port) { wc |= OFPFW11_IN_PORT; } else { ofmatch->in_port = ofputil_port_to_ofp11(match->flow.in_port.ofp_port); } ofmatch->dl_src = match->flow.dl_src; ofmatch->dl_src_mask = eth_addr_invert(match->wc.masks.dl_src); ofmatch->dl_dst = match->flow.dl_dst; ofmatch->dl_dst_mask = eth_addr_invert(match->wc.masks.dl_dst); if (match->wc.masks.vlan_tci == htons(0)) { wc |= OFPFW11_DL_VLAN | OFPFW11_DL_VLAN_PCP; } else if (match->wc.masks.vlan_tci & htons(VLAN_CFI) && !(match->flow.vlan_tci & htons(VLAN_CFI))) { ofmatch->dl_vlan = htons(OFPVID11_NONE); wc |= OFPFW11_DL_VLAN_PCP; } else { if (!(match->wc.masks.vlan_tci & htons(VLAN_VID_MASK))) { ofmatch->dl_vlan = htons(OFPVID11_ANY); } else { ofmatch->dl_vlan = htons(vlan_tci_to_vid(match->flow.vlan_tci)); } if (!(match->wc.masks.vlan_tci & htons(VLAN_PCP_MASK))) { wc |= OFPFW11_DL_VLAN_PCP; } else { ofmatch->dl_vlan_pcp = vlan_tci_to_pcp(match->flow.vlan_tci); } } if (!match->wc.masks.dl_type) { wc |= OFPFW11_DL_TYPE; } else { ofmatch->dl_type = ofputil_dl_type_to_openflow(match->flow.dl_type); } if (!(match->wc.masks.nw_tos & IP_DSCP_MASK)) { wc |= OFPFW11_NW_TOS; } else { ofmatch->nw_tos = match->flow.nw_tos & IP_DSCP_MASK; } if (!match->wc.masks.nw_proto) { wc |= OFPFW11_NW_PROTO; } else { ofmatch->nw_proto = match->flow.nw_proto; } ofmatch->nw_src = match->flow.nw_src; ofmatch->nw_src_mask = ~match->wc.masks.nw_src; ofmatch->nw_dst = match->flow.nw_dst; ofmatch->nw_dst_mask = ~match->wc.masks.nw_dst; if (!match->wc.masks.tp_src) { wc |= OFPFW11_TP_SRC; } else { ofmatch->tp_src = match->flow.tp_src; } if (!match->wc.masks.tp_dst) { wc |= OFPFW11_TP_DST; } else { ofmatch->tp_dst = match->flow.tp_dst; } if (!(match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK))) { wc |= OFPFW11_MPLS_LABEL; } else { ofmatch->mpls_label = htonl(mpls_lse_to_label( match->flow.mpls_lse[0])); } if (!(match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK))) { wc |= OFPFW11_MPLS_TC; } else { ofmatch->mpls_tc = mpls_lse_to_tc(match->flow.mpls_lse[0]); } ofmatch->metadata = match->flow.metadata; ofmatch->metadata_mask = ~match->wc.masks.metadata; ofmatch->wildcards = htonl(wc); } /* Returns the "typical" length of a match for 'protocol', for use in * estimating space to preallocate. */ int ofputil_match_typical_len(enum ofputil_protocol protocol) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: return sizeof(struct ofp10_match); case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return NXM_TYPICAL_LEN; case OFPUTIL_P_OF11_STD: return sizeof(struct ofp11_match); case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: return NXM_TYPICAL_LEN; default: OVS_NOT_REACHED(); } } /* Appends to 'b' an struct ofp11_match_header followed by a match that * expresses 'match' properly for 'protocol', plus enough zero bytes to pad the * data appended out to a multiple of 8. 'protocol' must be one that is usable * in OpenFlow 1.1 or later. * * This function can cause 'b''s data to be reallocated. * * Returns the number of bytes appended to 'b', excluding the padding. Never * returns zero. */ int ofputil_put_ofp11_match(struct ofpbuf *b, const struct match *match, enum ofputil_protocol protocol) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: OVS_NOT_REACHED(); case OFPUTIL_P_OF11_STD: { struct ofp11_match *om; /* Make sure that no padding is needed. */ BUILD_ASSERT_DECL(sizeof *om % 8 == 0); om = ofpbuf_put_uninit(b, sizeof *om); ofputil_match_to_ofp11_match(match, om); return sizeof *om; } case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: return oxm_put_match(b, match, ofputil_protocol_to_ofp_version(protocol)); } OVS_NOT_REACHED(); } /* Given a 'dl_type' value in the format used in struct flow, returns the * corresponding 'dl_type' value for use in an ofp10_match or ofp11_match * structure. */ ovs_be16 ofputil_dl_type_to_openflow(ovs_be16 flow_dl_type) { return (flow_dl_type == htons(FLOW_DL_TYPE_NONE) ? htons(OFP_DL_TYPE_NOT_ETH_TYPE) : flow_dl_type); } /* Given a 'dl_type' value in the format used in an ofp10_match or ofp11_match * structure, returns the corresponding 'dl_type' value for use in struct * flow. */ ovs_be16 ofputil_dl_type_from_openflow(ovs_be16 ofp_dl_type) { return (ofp_dl_type == htons(OFP_DL_TYPE_NOT_ETH_TYPE) ? htons(FLOW_DL_TYPE_NONE) : ofp_dl_type); } /* Protocols. */ struct proto_abbrev { enum ofputil_protocol protocol; const char *name; }; /* Most users really don't care about some of the differences between * protocols. These abbreviations help with that. */ static const struct proto_abbrev proto_abbrevs[] = { { OFPUTIL_P_ANY, "any" }, { OFPUTIL_P_OF10_STD_ANY, "OpenFlow10" }, { OFPUTIL_P_OF10_NXM_ANY, "NXM" }, { OFPUTIL_P_ANY_OXM, "OXM" }, }; #define N_PROTO_ABBREVS ARRAY_SIZE(proto_abbrevs) enum ofputil_protocol ofputil_flow_dump_protocols[] = { OFPUTIL_P_OF16_OXM, OFPUTIL_P_OF15_OXM, OFPUTIL_P_OF14_OXM, OFPUTIL_P_OF13_OXM, OFPUTIL_P_OF12_OXM, OFPUTIL_P_OF11_STD, OFPUTIL_P_OF10_NXM, OFPUTIL_P_OF10_STD, }; size_t ofputil_n_flow_dump_protocols = ARRAY_SIZE(ofputil_flow_dump_protocols); /* Returns the set of ofputil_protocols that are supported with the given * OpenFlow 'version'. 'version' should normally be an 8-bit OpenFlow version * identifier (e.g. 0x01 for OpenFlow 1.0, 0x02 for OpenFlow 1.1). Returns 0 * if 'version' is not supported or outside the valid range. */ enum ofputil_protocol ofputil_protocols_from_ofp_version(enum ofp_version version) { switch (version) { case OFP10_VERSION: return OFPUTIL_P_OF10_STD_ANY | OFPUTIL_P_OF10_NXM_ANY; case OFP11_VERSION: return OFPUTIL_P_OF11_STD; case OFP12_VERSION: return OFPUTIL_P_OF12_OXM; case OFP13_VERSION: return OFPUTIL_P_OF13_OXM; case OFP14_VERSION: return OFPUTIL_P_OF14_OXM; case OFP15_VERSION: return OFPUTIL_P_OF15_OXM; case OFP16_VERSION: return OFPUTIL_P_OF16_OXM; default: return 0; } } /* Returns the ofputil_protocol that is initially in effect on an OpenFlow * connection that has negotiated the given 'version'. 'version' should * normally be an 8-bit OpenFlow version identifier (e.g. 0x01 for OpenFlow * 1.0, 0x02 for OpenFlow 1.1). Returns 0 if 'version' is not supported or * outside the valid range. */ enum ofputil_protocol ofputil_protocol_from_ofp_version(enum ofp_version version) { return rightmost_1bit(ofputil_protocols_from_ofp_version(version)); } /* Returns the OpenFlow protocol version number (e.g. OFP10_VERSION, * etc.) that corresponds to 'protocol'. */ enum ofp_version ofputil_protocol_to_ofp_version(enum ofputil_protocol protocol) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return OFP10_VERSION; case OFPUTIL_P_OF11_STD: return OFP11_VERSION; case OFPUTIL_P_OF12_OXM: return OFP12_VERSION; case OFPUTIL_P_OF13_OXM: return OFP13_VERSION; case OFPUTIL_P_OF14_OXM: return OFP14_VERSION; case OFPUTIL_P_OF15_OXM: return OFP15_VERSION; case OFPUTIL_P_OF16_OXM: return OFP16_VERSION; } OVS_NOT_REACHED(); } /* Returns a bitmap of OpenFlow versions that are supported by at * least one of the 'protocols'. */ uint32_t ofputil_protocols_to_version_bitmap(enum ofputil_protocol protocols) { uint32_t bitmap = 0; for (; protocols; protocols = zero_rightmost_1bit(protocols)) { enum ofputil_protocol protocol = rightmost_1bit(protocols); bitmap |= 1u << ofputil_protocol_to_ofp_version(protocol); } return bitmap; } /* Returns the set of protocols that are supported on top of the * OpenFlow versions included in 'bitmap'. */ enum ofputil_protocol ofputil_protocols_from_version_bitmap(uint32_t bitmap) { enum ofputil_protocol protocols = 0; for (; bitmap; bitmap = zero_rightmost_1bit(bitmap)) { enum ofp_version version = rightmost_1bit_idx(bitmap); protocols |= ofputil_protocols_from_ofp_version(version); } return protocols; } /* Returns true if 'protocol' is a single OFPUTIL_P_* value, false * otherwise. */ bool ofputil_protocol_is_valid(enum ofputil_protocol protocol) { return protocol & OFPUTIL_P_ANY && is_pow2(protocol); } /* Returns the equivalent of 'protocol' with the Nicira flow_mod_table_id * extension turned on or off if 'enable' is true or false, respectively. * * This extension is only useful for protocols whose "standard" version does * not allow specific tables to be modified. In particular, this is true of * OpenFlow 1.0. In later versions of OpenFlow, a flow_mod request always * specifies a table ID and so there is no need for such an extension. When * 'protocol' is such a protocol that doesn't need a flow_mod_table_id * extension, this function just returns its 'protocol' argument unchanged * regardless of the value of 'enable'. */ enum ofputil_protocol ofputil_protocol_set_tid(enum ofputil_protocol protocol, bool enable) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: return enable ? OFPUTIL_P_OF10_STD_TID : OFPUTIL_P_OF10_STD; case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return enable ? OFPUTIL_P_OF10_NXM_TID : OFPUTIL_P_OF10_NXM; case OFPUTIL_P_OF11_STD: return OFPUTIL_P_OF11_STD; case OFPUTIL_P_OF12_OXM: return OFPUTIL_P_OF12_OXM; case OFPUTIL_P_OF13_OXM: return OFPUTIL_P_OF13_OXM; case OFPUTIL_P_OF14_OXM: return OFPUTIL_P_OF14_OXM; case OFPUTIL_P_OF15_OXM: return OFPUTIL_P_OF15_OXM; case OFPUTIL_P_OF16_OXM: return OFPUTIL_P_OF16_OXM; default: OVS_NOT_REACHED(); } } /* Returns the "base" version of 'protocol'. That is, if 'protocol' includes * some extension to a standard protocol version, the return value is the * standard version of that protocol without any extension. If 'protocol' is a * standard protocol version, returns 'protocol' unchanged. */ enum ofputil_protocol ofputil_protocol_to_base(enum ofputil_protocol protocol) { return ofputil_protocol_set_tid(protocol, false); } /* Returns 'new_base' with any extensions taken from 'cur'. */ enum ofputil_protocol ofputil_protocol_set_base(enum ofputil_protocol cur, enum ofputil_protocol new_base) { bool tid = (cur & OFPUTIL_P_TID) != 0; switch (new_base) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: return ofputil_protocol_set_tid(OFPUTIL_P_OF10_STD, tid); case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return ofputil_protocol_set_tid(OFPUTIL_P_OF10_NXM, tid); case OFPUTIL_P_OF11_STD: return ofputil_protocol_set_tid(OFPUTIL_P_OF11_STD, tid); case OFPUTIL_P_OF12_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF12_OXM, tid); case OFPUTIL_P_OF13_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF13_OXM, tid); case OFPUTIL_P_OF14_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF14_OXM, tid); case OFPUTIL_P_OF15_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF15_OXM, tid); case OFPUTIL_P_OF16_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF16_OXM, tid); default: OVS_NOT_REACHED(); } } /* Returns a string form of 'protocol', if a simple form exists (that is, if * 'protocol' is either a single protocol or it is a combination of protocols * that have a single abbreviation). Otherwise, returns NULL. */ const char * ofputil_protocol_to_string(enum ofputil_protocol protocol) { const struct proto_abbrev *p; /* Use a "switch" statement for single-bit names so that we get a compiler * warning if we forget any. */ switch (protocol) { case OFPUTIL_P_OF10_NXM: return "NXM-table_id"; case OFPUTIL_P_OF10_NXM_TID: return "NXM+table_id"; case OFPUTIL_P_OF10_STD: return "OpenFlow10-table_id"; case OFPUTIL_P_OF10_STD_TID: return "OpenFlow10+table_id"; case OFPUTIL_P_OF11_STD: return "OpenFlow11"; case OFPUTIL_P_OF12_OXM: return "OXM-OpenFlow12"; case OFPUTIL_P_OF13_OXM: return "OXM-OpenFlow13"; case OFPUTIL_P_OF14_OXM: return "OXM-OpenFlow14"; case OFPUTIL_P_OF15_OXM: return "OXM-OpenFlow15"; case OFPUTIL_P_OF16_OXM: return "OXM-OpenFlow16"; } /* Check abbreviations. */ for (p = proto_abbrevs; p < &proto_abbrevs[N_PROTO_ABBREVS]; p++) { if (protocol == p->protocol) { return p->name; } } return NULL; } /* Returns a string that represents 'protocols'. The return value might be a * comma-separated list if 'protocols' doesn't have a simple name. The return * value is "none" if 'protocols' is 0. * * The caller must free the returned string (with free()). */ char * ofputil_protocols_to_string(enum ofputil_protocol protocols) { struct ds s; ovs_assert(!(protocols & ~OFPUTIL_P_ANY)); if (protocols == 0) { return xstrdup("none"); } ds_init(&s); while (protocols) { const struct proto_abbrev *p; int i; if (s.length) { ds_put_char(&s, ','); } for (p = proto_abbrevs; p < &proto_abbrevs[N_PROTO_ABBREVS]; p++) { if ((protocols & p->protocol) == p->protocol) { ds_put_cstr(&s, p->name); protocols &= ~p->protocol; goto match; } } for (i = 0; i < CHAR_BIT * sizeof(enum ofputil_protocol); i++) { enum ofputil_protocol bit = 1u << i; if (protocols & bit) { ds_put_cstr(&s, ofputil_protocol_to_string(bit)); protocols &= ~bit; goto match; } } OVS_NOT_REACHED(); match: ; } return ds_steal_cstr(&s); } static enum ofputil_protocol ofputil_protocol_from_string__(const char *s, size_t n) { const struct proto_abbrev *p; int i; for (i = 0; i < CHAR_BIT * sizeof(enum ofputil_protocol); i++) { enum ofputil_protocol bit = 1u << i; const char *name = ofputil_protocol_to_string(bit); if (name && n == strlen(name) && !strncasecmp(s, name, n)) { return bit; } } for (p = proto_abbrevs; p < &proto_abbrevs[N_PROTO_ABBREVS]; p++) { if (n == strlen(p->name) && !strncasecmp(s, p->name, n)) { return p->protocol; } } return 0; } /* Returns the nonempty set of protocols represented by 's', which can be a * single protocol name or abbreviation or a comma-separated list of them. * * Aborts the program with an error message if 's' is invalid. */ enum ofputil_protocol ofputil_protocols_from_string(const char *s) { const char *orig_s = s; enum ofputil_protocol protocols; protocols = 0; while (*s) { enum ofputil_protocol p; size_t n; n = strcspn(s, ","); if (n == 0) { s++; continue; } p = ofputil_protocol_from_string__(s, n); if (!p) { ovs_fatal(0, "%.*s: unknown flow protocol", (int) n, s); } protocols |= p; s += n; } if (!protocols) { ovs_fatal(0, "%s: no flow protocol specified", orig_s); } return protocols; } enum ofp_version ofputil_version_from_string(const char *s) { if (!strcasecmp(s, "OpenFlow10")) { return OFP10_VERSION; } if (!strcasecmp(s, "OpenFlow11")) { return OFP11_VERSION; } if (!strcasecmp(s, "OpenFlow12")) { return OFP12_VERSION; } if (!strcasecmp(s, "OpenFlow13")) { return OFP13_VERSION; } if (!strcasecmp(s, "OpenFlow14")) { return OFP14_VERSION; } if (!strcasecmp(s, "OpenFlow15")) { return OFP15_VERSION; } if (!strcasecmp(s, "OpenFlow16")) { return OFP16_VERSION; } return 0; } static bool is_delimiter(unsigned char c) { return isspace(c) || c == ','; } uint32_t ofputil_versions_from_string(const char *s) { size_t i = 0; uint32_t bitmap = 0; while (s[i]) { size_t j; int version; char *key; if (is_delimiter(s[i])) { i++; continue; } j = 0; while (s[i + j] && !is_delimiter(s[i + j])) { j++; } key = xmemdup0(s + i, j); version = ofputil_version_from_string(key); if (!version) { VLOG_FATAL("Unknown OpenFlow version: \"%s\"", key); } free(key); bitmap |= 1u << version; i += j; } return bitmap; } uint32_t ofputil_versions_from_strings(char ** const s, size_t count) { uint32_t bitmap = 0; while (count--) { int version = ofputil_version_from_string(s[count]); if (!version) { VLOG_WARN("Unknown OpenFlow version: \"%s\"", s[count]); } else { bitmap |= 1u << version; } } return bitmap; } const char * ofputil_version_to_string(enum ofp_version ofp_version) { switch (ofp_version) { case OFP10_VERSION: return "OpenFlow10"; case OFP11_VERSION: return "OpenFlow11"; case OFP12_VERSION: return "OpenFlow12"; case OFP13_VERSION: return "OpenFlow13"; case OFP14_VERSION: return "OpenFlow14"; case OFP15_VERSION: return "OpenFlow15"; case OFP16_VERSION: return "OpenFlow16"; default: OVS_NOT_REACHED(); } } bool ofputil_packet_in_format_is_valid(enum nx_packet_in_format packet_in_format) { switch (packet_in_format) { case NXPIF_STANDARD: case NXPIF_NXT_PACKET_IN: case NXPIF_NXT_PACKET_IN2: return true; } return false; } const char * ofputil_packet_in_format_to_string(enum nx_packet_in_format packet_in_format) { switch (packet_in_format) { case NXPIF_STANDARD: return "standard"; case NXPIF_NXT_PACKET_IN: return "nxt_packet_in"; case NXPIF_NXT_PACKET_IN2: return "nxt_packet_in2"; default: OVS_NOT_REACHED(); } } int ofputil_packet_in_format_from_string(const char *s) { return (!strcmp(s, "standard") || !strcmp(s, "openflow10") ? NXPIF_STANDARD : !strcmp(s, "nxt_packet_in") || !strcmp(s, "nxm") ? NXPIF_NXT_PACKET_IN : !strcmp(s, "nxt_packet_in2") ? NXPIF_NXT_PACKET_IN2 : -1); } void ofputil_format_version(struct ds *msg, enum ofp_version version) { ds_put_format(msg, "0x%02x", version); } void ofputil_format_version_name(struct ds *msg, enum ofp_version version) { ds_put_cstr(msg, ofputil_version_to_string(version)); } static void ofputil_format_version_bitmap__(struct ds *msg, uint32_t bitmap, void (*format_version)(struct ds *msg, enum ofp_version)) { while (bitmap) { format_version(msg, raw_ctz(bitmap)); bitmap = zero_rightmost_1bit(bitmap); if (bitmap) { ds_put_cstr(msg, ", "); } } } void ofputil_format_version_bitmap(struct ds *msg, uint32_t bitmap) { ofputil_format_version_bitmap__(msg, bitmap, ofputil_format_version); } void ofputil_format_version_bitmap_names(struct ds *msg, uint32_t bitmap) { ofputil_format_version_bitmap__(msg, bitmap, ofputil_format_version_name); } static bool ofputil_decode_hello_bitmap(const struct ofp_hello_elem_header *oheh, uint32_t *allowed_versionsp) { uint16_t bitmap_len = ntohs(oheh->length) - sizeof *oheh; const ovs_be32 *bitmap = ALIGNED_CAST(const ovs_be32 *, oheh + 1); uint32_t allowed_versions; if (!bitmap_len || bitmap_len % sizeof *bitmap) { return false; } /* Only use the first 32-bit element of the bitmap as that is all the * current implementation supports. Subsequent elements are ignored which * should have no effect on session negotiation until Open vSwitch supports * wire-protocol versions greater than 31. */ allowed_versions = ntohl(bitmap[0]); if (allowed_versions & 1) { /* There's no OpenFlow version 0. */ VLOG_WARN_RL(&bad_ofmsg_rl, "peer claims to support invalid OpenFlow " "version 0x00"); allowed_versions &= ~1u; } if (!allowed_versions) { VLOG_WARN_RL(&bad_ofmsg_rl, "peer does not support any OpenFlow " "version (between 0x01 and 0x1f)"); return false; } *allowed_versionsp = allowed_versions; return true; } static uint32_t version_bitmap_from_version(uint8_t ofp_version) { return ((ofp_version < 32 ? 1u << ofp_version : 0) - 1) << 1; } /* Decodes OpenFlow OFPT_HELLO message 'oh', storing into '*allowed_versions' * the set of OpenFlow versions for which 'oh' announces support. * * Because of how OpenFlow defines OFPT_HELLO messages, this function is always * successful, and thus '*allowed_versions' is always initialized. However, it * returns false if 'oh' contains some data that could not be fully understood, * true if 'oh' was completely parsed. */ bool ofputil_decode_hello(const struct ofp_header *oh, uint32_t *allowed_versions) { struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpbuf_pull(&msg, sizeof *oh); *allowed_versions = version_bitmap_from_version(oh->version); bool ok = true; while (msg.size) { const struct ofp_hello_elem_header *oheh; unsigned int len; if (msg.size < sizeof *oheh) { return false; } oheh = msg.data; len = ntohs(oheh->length); if (len < sizeof *oheh || !ofpbuf_try_pull(&msg, ROUND_UP(len, 8))) { return false; } if (oheh->type != htons(OFPHET_VERSIONBITMAP) || !ofputil_decode_hello_bitmap(oheh, allowed_versions)) { ok = false; } } return ok; } /* Returns true if 'allowed_versions' needs to be accompanied by a version * bitmap to be correctly expressed in an OFPT_HELLO message. */ static bool should_send_version_bitmap(uint32_t allowed_versions) { return !is_pow2((allowed_versions >> 1) + 1); } /* Create an OFPT_HELLO message that expresses support for the OpenFlow * versions in the 'allowed_versions' bitmaps and returns the message. */ struct ofpbuf * ofputil_encode_hello(uint32_t allowed_versions) { enum ofp_version ofp_version; struct ofpbuf *msg; ofp_version = leftmost_1bit_idx(allowed_versions); msg = ofpraw_alloc(OFPRAW_OFPT_HELLO, ofp_version, 0); if (should_send_version_bitmap(allowed_versions)) { struct ofp_hello_elem_header *oheh; uint16_t map_len; map_len = sizeof allowed_versions; oheh = ofpbuf_put_zeros(msg, ROUND_UP(map_len + sizeof *oheh, 8)); oheh->type = htons(OFPHET_VERSIONBITMAP); oheh->length = htons(map_len + sizeof *oheh); *ALIGNED_CAST(ovs_be32 *, oheh + 1) = htonl(allowed_versions); ofpmsg_update_length(msg); } return msg; } /* Returns an OpenFlow message that, sent on an OpenFlow connection whose * protocol is 'current', at least partly transitions the protocol to 'want'. * Stores in '*next' the protocol that will be in effect on the OpenFlow * connection if the switch processes the returned message correctly. (If * '*next != want' then the caller will have to iterate.) * * If 'current == want', or if it is not possible to transition from 'current' * to 'want' (because, for example, 'current' and 'want' use different OpenFlow * protocol versions), returns NULL and stores 'current' in '*next'. */ struct ofpbuf * ofputil_encode_set_protocol(enum ofputil_protocol current, enum ofputil_protocol want, enum ofputil_protocol *next) { enum ofp_version cur_version, want_version; enum ofputil_protocol cur_base, want_base; bool cur_tid, want_tid; cur_version = ofputil_protocol_to_ofp_version(current); want_version = ofputil_protocol_to_ofp_version(want); if (cur_version != want_version) { *next = current; return NULL; } cur_base = ofputil_protocol_to_base(current); want_base = ofputil_protocol_to_base(want); if (cur_base != want_base) { *next = ofputil_protocol_set_base(current, want_base); switch (want_base) { case OFPUTIL_P_OF10_NXM: return ofputil_encode_nx_set_flow_format(NXFF_NXM); case OFPUTIL_P_OF10_STD: return ofputil_encode_nx_set_flow_format(NXFF_OPENFLOW10); case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: /* There is only one variant of each OpenFlow 1.1+ protocol, and we * verified above that we're not trying to change versions. */ OVS_NOT_REACHED(); case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM_TID: OVS_NOT_REACHED(); } } cur_tid = (current & OFPUTIL_P_TID) != 0; want_tid = (want & OFPUTIL_P_TID) != 0; if (cur_tid != want_tid) { *next = ofputil_protocol_set_tid(current, want_tid); return ofputil_make_flow_mod_table_id(want_tid); } ovs_assert(current == want); *next = current; return NULL; } /* Returns an NXT_SET_FLOW_FORMAT message that can be used to set the flow * format to 'nxff'. */ struct ofpbuf * ofputil_encode_nx_set_flow_format(enum nx_flow_format nxff) { struct nx_set_flow_format *sff; struct ofpbuf *msg; ovs_assert(ofputil_nx_flow_format_is_valid(nxff)); msg = ofpraw_alloc(OFPRAW_NXT_SET_FLOW_FORMAT, OFP10_VERSION, 0); sff = ofpbuf_put_zeros(msg, sizeof *sff); sff->format = htonl(nxff); return msg; } /* Returns the base protocol if 'flow_format' is a valid NXFF_* value, false * otherwise. */ enum ofputil_protocol ofputil_nx_flow_format_to_protocol(enum nx_flow_format flow_format) { switch (flow_format) { case NXFF_OPENFLOW10: return OFPUTIL_P_OF10_STD; case NXFF_NXM: return OFPUTIL_P_OF10_NXM; default: return 0; } } /* Returns true if 'flow_format' is a valid NXFF_* value, false otherwise. */ bool ofputil_nx_flow_format_is_valid(enum nx_flow_format flow_format) { return ofputil_nx_flow_format_to_protocol(flow_format) != 0; } /* Returns a string version of 'flow_format', which must be a valid NXFF_* * value. */ const char * ofputil_nx_flow_format_to_string(enum nx_flow_format flow_format) { switch (flow_format) { case NXFF_OPENFLOW10: return "openflow10"; case NXFF_NXM: return "nxm"; default: OVS_NOT_REACHED(); } } struct ofpbuf * ofputil_make_set_packet_in_format(enum ofp_version ofp_version, enum nx_packet_in_format packet_in_format) { struct nx_set_packet_in_format *spif; struct ofpbuf *msg; msg = ofpraw_alloc(OFPRAW_NXT_SET_PACKET_IN_FORMAT, ofp_version, 0); spif = ofpbuf_put_zeros(msg, sizeof *spif); spif->format = htonl(packet_in_format); return msg; } /* Returns an OpenFlow message that can be used to turn the flow_mod_table_id * extension on or off (according to 'flow_mod_table_id'). */ struct ofpbuf * ofputil_make_flow_mod_table_id(bool flow_mod_table_id) { struct nx_flow_mod_table_id *nfmti; struct ofpbuf *msg; msg = ofpraw_alloc(OFPRAW_NXT_FLOW_MOD_TABLE_ID, OFP10_VERSION, 0); nfmti = ofpbuf_put_zeros(msg, sizeof *nfmti); nfmti->set = flow_mod_table_id; return msg; } struct ofputil_flow_mod_flag { uint16_t raw_flag; enum ofp_version min_version, max_version; enum ofputil_flow_mod_flags flag; }; static const struct ofputil_flow_mod_flag ofputil_flow_mod_flags[] = { { OFPFF_SEND_FLOW_REM, OFP10_VERSION, 0, OFPUTIL_FF_SEND_FLOW_REM }, { OFPFF_CHECK_OVERLAP, OFP10_VERSION, 0, OFPUTIL_FF_CHECK_OVERLAP }, { OFPFF10_EMERG, OFP10_VERSION, OFP10_VERSION, OFPUTIL_FF_EMERG }, { OFPFF12_RESET_COUNTS, OFP12_VERSION, 0, OFPUTIL_FF_RESET_COUNTS }, { OFPFF13_NO_PKT_COUNTS, OFP13_VERSION, 0, OFPUTIL_FF_NO_PKT_COUNTS }, { OFPFF13_NO_BYT_COUNTS, OFP13_VERSION, 0, OFPUTIL_FF_NO_BYT_COUNTS }, { 0, 0, 0, 0 }, }; static enum ofperr ofputil_decode_flow_mod_flags(ovs_be16 raw_flags_, enum ofp_flow_mod_command command, enum ofp_version version, enum ofputil_flow_mod_flags *flagsp) { uint16_t raw_flags = ntohs(raw_flags_); const struct ofputil_flow_mod_flag *f; *flagsp = 0; for (f = ofputil_flow_mod_flags; f->raw_flag; f++) { if (raw_flags & f->raw_flag && version >= f->min_version && (!f->max_version || version <= f->max_version)) { raw_flags &= ~f->raw_flag; *flagsp |= f->flag; } } /* In OF1.0 and OF1.1, "add" always resets counters, and other commands * never do. * * In OF1.2 and later, OFPFF12_RESET_COUNTS controls whether each command * resets counters. */ if ((version == OFP10_VERSION || version == OFP11_VERSION) && command == OFPFC_ADD) { *flagsp |= OFPUTIL_FF_RESET_COUNTS; } return raw_flags ? OFPERR_OFPFMFC_BAD_FLAGS : 0; } static ovs_be16 ofputil_encode_flow_mod_flags(enum ofputil_flow_mod_flags flags, enum ofp_version version) { const struct ofputil_flow_mod_flag *f; uint16_t raw_flags; raw_flags = 0; for (f = ofputil_flow_mod_flags; f->raw_flag; f++) { if (f->flag & flags && version >= f->min_version && (!f->max_version || version <= f->max_version)) { raw_flags |= f->raw_flag; } } return htons(raw_flags); } /* Converts an OFPT_FLOW_MOD or NXT_FLOW_MOD message 'oh' into an abstract * flow_mod in 'fm'. Returns 0 if successful, otherwise an OpenFlow error * code. * * Uses 'ofpacts' to store the abstract OFPACT_* version of 'oh''s actions. * The caller must initialize 'ofpacts' and retains ownership of it. * 'fm->ofpacts' will point into the 'ofpacts' buffer. * * Does not validate the flow_mod actions. The caller should do that, with * ofpacts_check(). */ enum ofperr ofputil_decode_flow_mod(struct ofputil_flow_mod *fm, const struct ofp_header *oh, enum ofputil_protocol protocol, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofpbuf *ofpacts, ofp_port_t max_port, uint8_t max_table) { ovs_be16 raw_flags; enum ofperr error; struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT11_FLOW_MOD) { /* Standard OpenFlow 1.1+ flow_mod. */ const struct ofp11_flow_mod *ofm; ofm = ofpbuf_pull(&b, sizeof *ofm); error = ofputil_pull_ofp11_match(&b, tun_table, vl_mff_map, &fm->match, NULL); if (error) { return error; } /* Translate the message. */ fm->priority = ntohs(ofm->priority); if (ofm->command == OFPFC_ADD || (oh->version == OFP11_VERSION && (ofm->command == OFPFC_MODIFY || ofm->command == OFPFC_MODIFY_STRICT) && ofm->cookie_mask == htonll(0))) { /* In OpenFlow 1.1 only, a "modify" or "modify-strict" that does * not match on the cookie is treated as an "add" if there is no * match. */ fm->cookie = htonll(0); fm->cookie_mask = htonll(0); fm->new_cookie = ofm->cookie; } else { fm->cookie = ofm->cookie; fm->cookie_mask = ofm->cookie_mask; fm->new_cookie = OVS_BE64_MAX; } fm->modify_cookie = false; fm->command = ofm->command; /* Get table ID. * * OF1.1 entirely forbids table_id == OFPTT_ALL. * OF1.2+ allows table_id == OFPTT_ALL only for deletes. */ fm->table_id = ofm->table_id; if (fm->table_id == OFPTT_ALL && (oh->version == OFP11_VERSION || (ofm->command != OFPFC_DELETE && ofm->command != OFPFC_DELETE_STRICT))) { return OFPERR_OFPFMFC_BAD_TABLE_ID; } fm->idle_timeout = ntohs(ofm->idle_timeout); fm->hard_timeout = ntohs(ofm->hard_timeout); if (oh->version >= OFP14_VERSION && ofm->command == OFPFC_ADD) { fm->importance = ntohs(ofm->importance); } else { fm->importance = 0; } fm->buffer_id = ntohl(ofm->buffer_id); error = ofputil_port_from_ofp11(ofm->out_port, &fm->out_port); if (error) { return error; } fm->out_group = (ofm->command == OFPFC_DELETE || ofm->command == OFPFC_DELETE_STRICT ? ntohl(ofm->out_group) : OFPG_ANY); raw_flags = ofm->flags; } else { uint16_t command; if (raw == OFPRAW_OFPT10_FLOW_MOD) { /* Standard OpenFlow 1.0 flow_mod. */ const struct ofp10_flow_mod *ofm; /* Get the ofp10_flow_mod. */ ofm = ofpbuf_pull(&b, sizeof *ofm); /* Translate the rule. */ ofputil_match_from_ofp10_match(&ofm->match, &fm->match); ofputil_normalize_match(&fm->match); /* OpenFlow 1.0 says that exact-match rules have to have the * highest possible priority. */ fm->priority = (ofm->match.wildcards & htonl(OFPFW10_ALL) ? ntohs(ofm->priority) : UINT16_MAX); /* Translate the message. */ command = ntohs(ofm->command); fm->cookie = htonll(0); fm->cookie_mask = htonll(0); fm->new_cookie = ofm->cookie; fm->idle_timeout = ntohs(ofm->idle_timeout); fm->hard_timeout = ntohs(ofm->hard_timeout); fm->importance = 0; fm->buffer_id = ntohl(ofm->buffer_id); fm->out_port = u16_to_ofp(ntohs(ofm->out_port)); fm->out_group = OFPG_ANY; raw_flags = ofm->flags; } else if (raw == OFPRAW_NXT_FLOW_MOD) { /* Nicira extended flow_mod. */ const struct nx_flow_mod *nfm; /* Dissect the message. */ nfm = ofpbuf_pull(&b, sizeof *nfm); error = nx_pull_match(&b, ntohs(nfm->match_len), &fm->match, &fm->cookie, &fm->cookie_mask, tun_table, vl_mff_map); if (error) { return error; } /* Translate the message. */ command = ntohs(nfm->command); if ((command & 0xff) == OFPFC_ADD && fm->cookie_mask) { /* Flow additions may only set a new cookie, not match an * existing cookie. */ return OFPERR_NXBRC_NXM_INVALID; } fm->priority = ntohs(nfm->priority); fm->new_cookie = nfm->cookie; fm->idle_timeout = ntohs(nfm->idle_timeout); fm->hard_timeout = ntohs(nfm->hard_timeout); fm->importance = 0; fm->buffer_id = ntohl(nfm->buffer_id); fm->out_port = u16_to_ofp(ntohs(nfm->out_port)); fm->out_group = OFPG_ANY; raw_flags = nfm->flags; } else { OVS_NOT_REACHED(); } fm->modify_cookie = fm->new_cookie != OVS_BE64_MAX; if (protocol & OFPUTIL_P_TID) { fm->command = command & 0xff; fm->table_id = command >> 8; } else { if (command > 0xff) { VLOG_WARN_RL(&bad_ofmsg_rl, "flow_mod has explicit table_id " "but flow_mod_table_id extension is not enabled"); } fm->command = command; fm->table_id = 0xff; } } if (fm->command > OFPFC_DELETE_STRICT) { return OFPERR_OFPFMFC_BAD_COMMAND; } fm->ofpacts_tlv_bitmap = 0; error = ofpacts_pull_openflow_instructions(&b, b.size, oh->version, vl_mff_map, &fm->ofpacts_tlv_bitmap, ofpacts); if (error) { return error; } fm->ofpacts = ofpacts->data; fm->ofpacts_len = ofpacts->size; error = ofputil_decode_flow_mod_flags(raw_flags, fm->command, oh->version, &fm->flags); if (error) { return error; } if (fm->flags & OFPUTIL_FF_EMERG) { /* We do not support the OpenFlow 1.0 emergency flow cache, which * is not required in OpenFlow 1.0.1 and removed from OpenFlow 1.1. * * OpenFlow 1.0 specifies the error code to use when idle_timeout * or hard_timeout is nonzero. Otherwise, there is no good error * code, so just state that the flow table is full. */ return (fm->hard_timeout || fm->idle_timeout ? OFPERR_OFPFMFC_BAD_EMERG_TIMEOUT : OFPERR_OFPFMFC_TABLE_FULL); } return ofpacts_check_consistency(fm->ofpacts, fm->ofpacts_len, &fm->match.flow, max_port, fm->table_id, max_table, protocol); } static enum ofperr ofputil_pull_bands(struct ofpbuf *msg, size_t len, uint16_t *n_bands, struct ofpbuf *bands) { const struct ofp13_meter_band_header *ombh; struct ofputil_meter_band *mb; uint16_t n = 0; ombh = ofpbuf_try_pull(msg, len); if (!ombh) { return OFPERR_OFPBRC_BAD_LEN; } while (len >= sizeof (struct ofp13_meter_band_drop)) { size_t ombh_len = ntohs(ombh->len); /* All supported band types have the same length. */ if (ombh_len != sizeof (struct ofp13_meter_band_drop)) { return OFPERR_OFPBRC_BAD_LEN; } mb = ofpbuf_put_uninit(bands, sizeof *mb); mb->type = ntohs(ombh->type); if (mb->type != OFPMBT13_DROP && mb->type != OFPMBT13_DSCP_REMARK) { return OFPERR_OFPMMFC_BAD_BAND; } mb->rate = ntohl(ombh->rate); mb->burst_size = ntohl(ombh->burst_size); mb->prec_level = (mb->type == OFPMBT13_DSCP_REMARK) ? ((struct ofp13_meter_band_dscp_remark *)ombh)->prec_level : 0; n++; len -= ombh_len; ombh = ALIGNED_CAST(struct ofp13_meter_band_header *, (char *) ombh + ombh_len); } if (len) { return OFPERR_OFPBRC_BAD_LEN; } *n_bands = n; return 0; } enum ofperr ofputil_decode_meter_mod(const struct ofp_header *oh, struct ofputil_meter_mod *mm, struct ofpbuf *bands) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); const struct ofp13_meter_mod *omm = ofpbuf_pull(&b, sizeof *omm); /* Translate the message. */ mm->command = ntohs(omm->command); if (mm->command != OFPMC13_ADD && mm->command != OFPMC13_MODIFY && mm->command != OFPMC13_DELETE) { return OFPERR_OFPMMFC_BAD_COMMAND; } mm->meter.meter_id = ntohl(omm->meter_id); if (mm->command == OFPMC13_DELETE) { mm->meter.flags = 0; mm->meter.n_bands = 0; mm->meter.bands = NULL; } else { enum ofperr error; mm->meter.flags = ntohs(omm->flags); if (mm->meter.flags & OFPMF13_KBPS && mm->meter.flags & OFPMF13_PKTPS) { return OFPERR_OFPMMFC_BAD_FLAGS; } error = ofputil_pull_bands(&b, b.size, &mm->meter.n_bands, bands); if (error) { return error; } mm->meter.bands = bands->data; } return 0; } void ofputil_decode_meter_request(const struct ofp_header *oh, uint32_t *meter_id) { const struct ofp13_meter_multipart_request *omr = ofpmsg_body(oh); *meter_id = ntohl(omr->meter_id); } struct ofpbuf * ofputil_encode_meter_request(enum ofp_version ofp_version, enum ofputil_meter_request_type type, uint32_t meter_id) { struct ofpbuf *msg; enum ofpraw raw; switch (type) { case OFPUTIL_METER_CONFIG: raw = OFPRAW_OFPST13_METER_CONFIG_REQUEST; break; case OFPUTIL_METER_STATS: raw = OFPRAW_OFPST13_METER_REQUEST; break; default: case OFPUTIL_METER_FEATURES: raw = OFPRAW_OFPST13_METER_FEATURES_REQUEST; break; } msg = ofpraw_alloc(raw, ofp_version, 0); if (type != OFPUTIL_METER_FEATURES) { struct ofp13_meter_multipart_request *omr; omr = ofpbuf_put_zeros(msg, sizeof *omr); omr->meter_id = htonl(meter_id); } return msg; } static void ofputil_put_bands(uint16_t n_bands, const struct ofputil_meter_band *mb, struct ofpbuf *msg) { uint16_t n = 0; for (n = 0; n < n_bands; ++n) { /* Currently all band types have same size. */ struct ofp13_meter_band_dscp_remark *ombh; size_t ombh_len = sizeof *ombh; ombh = ofpbuf_put_zeros(msg, ombh_len); ombh->type = htons(mb->type); ombh->len = htons(ombh_len); ombh->rate = htonl(mb->rate); ombh->burst_size = htonl(mb->burst_size); ombh->prec_level = mb->prec_level; mb++; } } /* Encode a meter stat for 'mc' and append it to 'replies'. */ void ofputil_append_meter_config(struct ovs_list *replies, const struct ofputil_meter_config *mc) { struct ofpbuf *msg = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = msg->size; struct ofp13_meter_config *reply; ofpbuf_put_uninit(msg, sizeof *reply); ofputil_put_bands(mc->n_bands, mc->bands, msg); reply = ofpbuf_at_assert(msg, start_ofs, sizeof *reply); reply->flags = htons(mc->flags); reply->meter_id = htonl(mc->meter_id); reply->length = htons(msg->size - start_ofs); ofpmp_postappend(replies, start_ofs); } /* Encode a meter stat for 'ms' and append it to 'replies'. */ void ofputil_append_meter_stats(struct ovs_list *replies, const struct ofputil_meter_stats *ms) { struct ofp13_meter_stats *reply; uint16_t n = 0; uint16_t len; len = sizeof *reply + ms->n_bands * sizeof(struct ofp13_meter_band_stats); reply = ofpmp_append(replies, len); reply->meter_id = htonl(ms->meter_id); reply->len = htons(len); memset(reply->pad, 0, sizeof reply->pad); reply->flow_count = htonl(ms->flow_count); reply->packet_in_count = htonll(ms->packet_in_count); reply->byte_in_count = htonll(ms->byte_in_count); reply->duration_sec = htonl(ms->duration_sec); reply->duration_nsec = htonl(ms->duration_nsec); for (n = 0; n < ms->n_bands; ++n) { const struct ofputil_meter_band_stats *src = &ms->bands[n]; struct ofp13_meter_band_stats *dst = &reply->band_stats[n]; dst->packet_band_count = htonll(src->packet_count); dst->byte_band_count = htonll(src->byte_count); } } /* Converts an OFPMP_METER_CONFIG reply in 'msg' into an abstract * ofputil_meter_config in 'mc', with mc->bands pointing to bands decoded into * 'bands'. The caller must have initialized 'bands' and retains ownership of * it across the call. * * Multiple OFPST13_METER_CONFIG replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. 'bands' is cleared for each reply. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_meter_config(struct ofpbuf *msg, struct ofputil_meter_config *mc, struct ofpbuf *bands) { const struct ofp13_meter_config *omc; enum ofperr err; /* Pull OpenFlow headers for the first call. */ if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } omc = ofpbuf_try_pull(msg, sizeof *omc); if (!omc) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPMP_METER_CONFIG reply has %"PRIu32" leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } ofpbuf_clear(bands); err = ofputil_pull_bands(msg, ntohs(omc->length) - sizeof *omc, &mc->n_bands, bands); if (err) { return err; } mc->meter_id = ntohl(omc->meter_id); mc->flags = ntohs(omc->flags); mc->bands = bands->data; return 0; } static enum ofperr ofputil_pull_band_stats(struct ofpbuf *msg, size_t len, uint16_t *n_bands, struct ofpbuf *bands) { const struct ofp13_meter_band_stats *ombs; struct ofputil_meter_band_stats *mbs; uint16_t n, i; ombs = ofpbuf_try_pull(msg, len); if (!ombs) { return OFPERR_OFPBRC_BAD_LEN; } n = len / sizeof *ombs; if (len != n * sizeof *ombs) { return OFPERR_OFPBRC_BAD_LEN; } mbs = ofpbuf_put_uninit(bands, len); for (i = 0; i < n; ++i) { mbs[i].packet_count = ntohll(ombs[i].packet_band_count); mbs[i].byte_count = ntohll(ombs[i].byte_band_count); } *n_bands = n; return 0; } /* Converts an OFPMP_METER reply in 'msg' into an abstract * ofputil_meter_stats in 'ms', with ms->bands pointing to band stats * decoded into 'bands'. * * Multiple OFPMP_METER replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. 'bands' is cleared for each reply. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_meter_stats(struct ofpbuf *msg, struct ofputil_meter_stats *ms, struct ofpbuf *bands) { const struct ofp13_meter_stats *oms; enum ofperr err; /* Pull OpenFlow headers for the first call. */ if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } oms = ofpbuf_try_pull(msg, sizeof *oms); if (!oms) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPMP_METER reply has %"PRIu32" leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } ofpbuf_clear(bands); err = ofputil_pull_band_stats(msg, ntohs(oms->len) - sizeof *oms, &ms->n_bands, bands); if (err) { return err; } ms->meter_id = ntohl(oms->meter_id); ms->flow_count = ntohl(oms->flow_count); ms->packet_in_count = ntohll(oms->packet_in_count); ms->byte_in_count = ntohll(oms->byte_in_count); ms->duration_sec = ntohl(oms->duration_sec); ms->duration_nsec = ntohl(oms->duration_nsec); ms->bands = bands->data; return 0; } void ofputil_decode_meter_features(const struct ofp_header *oh, struct ofputil_meter_features *mf) { const struct ofp13_meter_features *omf = ofpmsg_body(oh); mf->max_meters = ntohl(omf->max_meter); mf->band_types = ntohl(omf->band_types); mf->capabilities = ntohl(omf->capabilities); mf->max_bands = omf->max_bands; mf->max_color = omf->max_color; } struct ofpbuf * ofputil_encode_meter_features_reply(const struct ofputil_meter_features *mf, const struct ofp_header *request) { struct ofpbuf *reply; struct ofp13_meter_features *omf; reply = ofpraw_alloc_stats_reply(request, 0); omf = ofpbuf_put_zeros(reply, sizeof *omf); omf->max_meter = htonl(mf->max_meters); omf->band_types = htonl(mf->band_types); omf->capabilities = htonl(mf->capabilities); omf->max_bands = mf->max_bands; omf->max_color = mf->max_color; return reply; } struct ofpbuf * ofputil_encode_meter_mod(enum ofp_version ofp_version, const struct ofputil_meter_mod *mm) { struct ofpbuf *msg; struct ofp13_meter_mod *omm; msg = ofpraw_alloc(OFPRAW_OFPT13_METER_MOD, ofp_version, NXM_TYPICAL_LEN + mm->meter.n_bands * 16); omm = ofpbuf_put_zeros(msg, sizeof *omm); omm->command = htons(mm->command); if (mm->command != OFPMC13_DELETE) { omm->flags = htons(mm->meter.flags); } omm->meter_id = htonl(mm->meter.meter_id); ofputil_put_bands(mm->meter.n_bands, mm->meter.bands, msg); ofpmsg_update_length(msg); return msg; } static ovs_be16 ofputil_tid_command(const struct ofputil_flow_mod *fm, enum ofputil_protocol protocol) { return htons(protocol & OFPUTIL_P_TID ? (fm->command & 0xff) | (fm->table_id << 8) : fm->command); } /* Converts 'fm' into an OFPT_FLOW_MOD or NXT_FLOW_MOD message according to * 'protocol' and returns the message. */ struct ofpbuf * ofputil_encode_flow_mod(const struct ofputil_flow_mod *fm, enum ofputil_protocol protocol) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); ovs_be16 raw_flags = ofputil_encode_flow_mod_flags(fm->flags, version); struct ofpbuf *msg; switch (protocol) { case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: { struct ofp11_flow_mod *ofm; int tailroom; tailroom = ofputil_match_typical_len(protocol) + fm->ofpacts_len; msg = ofpraw_alloc(OFPRAW_OFPT11_FLOW_MOD, version, tailroom); ofm = ofpbuf_put_zeros(msg, sizeof *ofm); if ((protocol == OFPUTIL_P_OF11_STD && (fm->command == OFPFC_MODIFY || fm->command == OFPFC_MODIFY_STRICT) && fm->cookie_mask == htonll(0)) || fm->command == OFPFC_ADD) { ofm->cookie = fm->new_cookie; } else { ofm->cookie = fm->cookie & fm->cookie_mask; } ofm->cookie_mask = fm->cookie_mask; if (fm->table_id != OFPTT_ALL || (protocol != OFPUTIL_P_OF11_STD && (fm->command == OFPFC_DELETE || fm->command == OFPFC_DELETE_STRICT))) { ofm->table_id = fm->table_id; } else { ofm->table_id = 0; } ofm->command = fm->command; ofm->idle_timeout = htons(fm->idle_timeout); ofm->hard_timeout = htons(fm->hard_timeout); ofm->priority = htons(fm->priority); ofm->buffer_id = htonl(fm->buffer_id); ofm->out_port = ofputil_port_to_ofp11(fm->out_port); ofm->out_group = htonl(fm->out_group); ofm->flags = raw_flags; if (version >= OFP14_VERSION && fm->command == OFPFC_ADD) { ofm->importance = htons(fm->importance); } else { ofm->importance = 0; } ofputil_put_ofp11_match(msg, &fm->match, protocol); ofpacts_put_openflow_instructions(fm->ofpacts, fm->ofpacts_len, msg, version); break; } case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: { struct ofp10_flow_mod *ofm; msg = ofpraw_alloc(OFPRAW_OFPT10_FLOW_MOD, OFP10_VERSION, fm->ofpacts_len); ofm = ofpbuf_put_zeros(msg, sizeof *ofm); ofputil_match_to_ofp10_match(&fm->match, &ofm->match); ofm->cookie = fm->new_cookie; ofm->command = ofputil_tid_command(fm, protocol); ofm->idle_timeout = htons(fm->idle_timeout); ofm->hard_timeout = htons(fm->hard_timeout); ofm->priority = htons(fm->priority); ofm->buffer_id = htonl(fm->buffer_id); ofm->out_port = htons(ofp_to_u16(fm->out_port)); ofm->flags = raw_flags; ofpacts_put_openflow_actions(fm->ofpacts, fm->ofpacts_len, msg, version); break; } case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: { struct nx_flow_mod *nfm; int match_len; msg = ofpraw_alloc(OFPRAW_NXT_FLOW_MOD, OFP10_VERSION, NXM_TYPICAL_LEN + fm->ofpacts_len); nfm = ofpbuf_put_zeros(msg, sizeof *nfm); nfm->command = ofputil_tid_command(fm, protocol); nfm->cookie = fm->new_cookie; match_len = nx_put_match(msg, &fm->match, fm->cookie, fm->cookie_mask); nfm = msg->msg; nfm->idle_timeout = htons(fm->idle_timeout); nfm->hard_timeout = htons(fm->hard_timeout); nfm->priority = htons(fm->priority); nfm->buffer_id = htonl(fm->buffer_id); nfm->out_port = htons(ofp_to_u16(fm->out_port)); nfm->flags = raw_flags; nfm->match_len = htons(match_len); ofpacts_put_openflow_actions(fm->ofpacts, fm->ofpacts_len, msg, version); break; } default: OVS_NOT_REACHED(); } ofpmsg_update_length(msg); return msg; } static enum ofperr ofputil_decode_ofpst10_flow_request(struct ofputil_flow_stats_request *fsr, const struct ofp10_flow_stats_request *ofsr, bool aggregate) { fsr->aggregate = aggregate; ofputil_match_from_ofp10_match(&ofsr->match, &fsr->match); fsr->out_port = u16_to_ofp(ntohs(ofsr->out_port)); fsr->out_group = OFPG_ANY; fsr->table_id = ofsr->table_id; fsr->cookie = fsr->cookie_mask = htonll(0); return 0; } static enum ofperr ofputil_decode_ofpst11_flow_request(struct ofputil_flow_stats_request *fsr, struct ofpbuf *b, bool aggregate, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map) { const struct ofp11_flow_stats_request *ofsr; enum ofperr error; ofsr = ofpbuf_pull(b, sizeof *ofsr); fsr->aggregate = aggregate; fsr->table_id = ofsr->table_id; error = ofputil_port_from_ofp11(ofsr->out_port, &fsr->out_port); if (error) { return error; } fsr->out_group = ntohl(ofsr->out_group); fsr->cookie = ofsr->cookie; fsr->cookie_mask = ofsr->cookie_mask; error = ofputil_pull_ofp11_match(b, tun_table, vl_mff_map, &fsr->match, NULL); if (error) { return error; } return 0; } static enum ofperr ofputil_decode_nxst_flow_request(struct ofputil_flow_stats_request *fsr, struct ofpbuf *b, bool aggregate, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map) { const struct nx_flow_stats_request *nfsr; enum ofperr error; nfsr = ofpbuf_pull(b, sizeof *nfsr); error = nx_pull_match(b, ntohs(nfsr->match_len), &fsr->match, &fsr->cookie, &fsr->cookie_mask, tun_table, vl_mff_map); if (error) { return error; } if (b->size) { return OFPERR_OFPBRC_BAD_LEN; } fsr->aggregate = aggregate; fsr->out_port = u16_to_ofp(ntohs(nfsr->out_port)); fsr->out_group = OFPG_ANY; fsr->table_id = nfsr->table_id; return 0; } /* Constructs and returns an OFPT_QUEUE_GET_CONFIG request for the specified * 'port' and 'queue', suitable for OpenFlow version 'version'. * * 'queue' is honored only for OpenFlow 1.4 and later; older versions always * request all queues. */ struct ofpbuf * ofputil_encode_queue_get_config_request(enum ofp_version version, ofp_port_t port, uint32_t queue) { struct ofpbuf *request; if (version == OFP10_VERSION) { struct ofp10_queue_get_config_request *qgcr10; request = ofpraw_alloc(OFPRAW_OFPT10_QUEUE_GET_CONFIG_REQUEST, version, 0); qgcr10 = ofpbuf_put_zeros(request, sizeof *qgcr10); qgcr10->port = htons(ofp_to_u16(port)); } else if (version < OFP14_VERSION) { struct ofp11_queue_get_config_request *qgcr11; request = ofpraw_alloc(OFPRAW_OFPT11_QUEUE_GET_CONFIG_REQUEST, version, 0); qgcr11 = ofpbuf_put_zeros(request, sizeof *qgcr11); qgcr11->port = ofputil_port_to_ofp11(port); } else { struct ofp14_queue_desc_request *qdr14; request = ofpraw_alloc(OFPRAW_OFPST14_QUEUE_DESC_REQUEST, version, 0); qdr14 = ofpbuf_put_zeros(request, sizeof *qdr14); qdr14->port = ofputil_port_to_ofp11(port); qdr14->queue = htonl(queue); } return request; } /* Parses OFPT_QUEUE_GET_CONFIG request 'oh', storing the port specified by the * request into '*port'. Returns 0 if successful, otherwise an OpenFlow error * code. */ enum ofperr ofputil_decode_queue_get_config_request(const struct ofp_header *oh, ofp_port_t *port, uint32_t *queue) { const struct ofp10_queue_get_config_request *qgcr10; const struct ofp11_queue_get_config_request *qgcr11; const struct ofp14_queue_desc_request *qdr14; struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); switch ((int) raw) { case OFPRAW_OFPT10_QUEUE_GET_CONFIG_REQUEST: qgcr10 = b.data; *port = u16_to_ofp(ntohs(qgcr10->port)); *queue = OFPQ_ALL; break; case OFPRAW_OFPT11_QUEUE_GET_CONFIG_REQUEST: qgcr11 = b.data; *queue = OFPQ_ALL; enum ofperr error = ofputil_port_from_ofp11(qgcr11->port, port); if (error || *port == OFPP_ANY) { return error; } break; case OFPRAW_OFPST14_QUEUE_DESC_REQUEST: qdr14 = b.data; *queue = ntohl(qdr14->queue); return ofputil_port_from_ofp11(qdr14->port, port); default: OVS_NOT_REACHED(); } return (ofp_to_u16(*port) < ofp_to_u16(OFPP_MAX) ? 0 : OFPERR_OFPQOFC_BAD_PORT); } /* Constructs and returns the beginning of a reply to * OFPT_QUEUE_GET_CONFIG_REQUEST or OFPMP_QUEUE_DESC request 'oh'. The caller * may append information about individual queues with * ofputil_append_queue_get_config_reply(). */ void ofputil_start_queue_get_config_reply(const struct ofp_header *request, struct ovs_list *replies) { struct ofpbuf *reply; enum ofperr error; ofp_port_t port; uint32_t queue; error = ofputil_decode_queue_get_config_request(request, &port, &queue); ovs_assert(!error); enum ofpraw raw = ofpraw_decode_assert(request); switch ((int) raw) { case OFPRAW_OFPT10_QUEUE_GET_CONFIG_REQUEST: reply = ofpraw_alloc_reply(OFPRAW_OFPT10_QUEUE_GET_CONFIG_REPLY, request, 0); struct ofp10_queue_get_config_reply *qgcr10 = ofpbuf_put_zeros(reply, sizeof *qgcr10); qgcr10->port = htons(ofp_to_u16(port)); break; case OFPRAW_OFPT11_QUEUE_GET_CONFIG_REQUEST: reply = ofpraw_alloc_reply(OFPRAW_OFPT11_QUEUE_GET_CONFIG_REPLY, request, 0); struct ofp11_queue_get_config_reply *qgcr11 = ofpbuf_put_zeros(reply, sizeof *qgcr11); qgcr11->port = ofputil_port_to_ofp11(port); break; case OFPRAW_OFPST14_QUEUE_DESC_REQUEST: reply = ofpraw_alloc_stats_reply(request, 0); break; default: OVS_NOT_REACHED(); } ovs_list_init(replies); ovs_list_push_back(replies, &reply->list_node); } static void put_ofp10_queue_rate(struct ofpbuf *reply, enum ofp10_queue_properties property, uint16_t rate) { if (rate != UINT16_MAX) { struct ofp10_queue_prop_rate *oqpr; oqpr = ofpbuf_put_zeros(reply, sizeof *oqpr); oqpr->prop_header.property = htons(property); oqpr->prop_header.len = htons(sizeof *oqpr); oqpr->rate = htons(rate); } } static void put_ofp14_queue_rate(struct ofpbuf *reply, enum ofp14_queue_desc_prop_type type, uint16_t rate) { if (rate != UINT16_MAX) { ofpprop_put_u16(reply, type, rate); } } void ofputil_append_queue_get_config_reply(const struct ofputil_queue_config *qc, struct ovs_list *replies) { enum ofp_version ofp_version = ofpmp_version(replies); struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = reply->size; size_t len_ofs; ovs_be16 *len; if (ofp_version < OFP14_VERSION) { if (ofp_version < OFP12_VERSION) { struct ofp10_packet_queue *opq10; opq10 = ofpbuf_put_zeros(reply, sizeof *opq10); opq10->queue_id = htonl(qc->queue); len_ofs = (char *) &opq10->len - (char *) reply->data; } else { struct ofp12_packet_queue *opq12; opq12 = ofpbuf_put_zeros(reply, sizeof *opq12); opq12->port = ofputil_port_to_ofp11(qc->port); opq12->queue_id = htonl(qc->queue); len_ofs = (char *) &opq12->len - (char *) reply->data; } put_ofp10_queue_rate(reply, OFPQT10_MIN_RATE, qc->min_rate); put_ofp10_queue_rate(reply, OFPQT11_MAX_RATE, qc->max_rate); } else { struct ofp14_queue_desc *oqd = ofpbuf_put_zeros(reply, sizeof *oqd); oqd->port_no = ofputil_port_to_ofp11(qc->port); oqd->queue_id = htonl(qc->queue); len_ofs = (char *) &oqd->len - (char *) reply->data; put_ofp14_queue_rate(reply, OFPQDPT14_MIN_RATE, qc->min_rate); put_ofp14_queue_rate(reply, OFPQDPT14_MAX_RATE, qc->max_rate); } len = ofpbuf_at(reply, len_ofs, sizeof *len); *len = htons(reply->size - start_ofs); if (ofp_version >= OFP14_VERSION) { ofpmp_postappend(replies, start_ofs); } } static enum ofperr parse_ofp10_queue_rate(const struct ofp10_queue_prop_header *hdr, uint16_t *rate) { const struct ofp10_queue_prop_rate *oqpr; if (hdr->len == htons(sizeof *oqpr)) { oqpr = (const struct ofp10_queue_prop_rate *) hdr; *rate = ntohs(oqpr->rate); return 0; } else { return OFPERR_OFPBRC_BAD_LEN; } } static int ofputil_pull_queue_get_config_reply10(struct ofpbuf *msg, struct ofputil_queue_config *queue) { const struct ofp_header *oh = msg->header; unsigned int opq_len; /* Length of protocol-specific queue header. */ unsigned int len; /* Total length of queue + properties. */ /* Obtain the port number from the message header. */ if (oh->version == OFP10_VERSION) { const struct ofp10_queue_get_config_reply *oqgcr10 = msg->msg; queue->port = u16_to_ofp(ntohs(oqgcr10->port)); } else { const struct ofp11_queue_get_config_reply *oqgcr11 = msg->msg; enum ofperr error = ofputil_port_from_ofp11(oqgcr11->port, &queue->port); if (error) { return error; } } /* Pull off the queue header and get the queue number and length. */ if (oh->version < OFP12_VERSION) { const struct ofp10_packet_queue *opq10; opq10 = ofpbuf_try_pull(msg, sizeof *opq10); if (!opq10) { return OFPERR_OFPBRC_BAD_LEN; } queue->queue = ntohl(opq10->queue_id); len = ntohs(opq10->len); opq_len = sizeof *opq10; } else { const struct ofp12_packet_queue *opq12; opq12 = ofpbuf_try_pull(msg, sizeof *opq12); if (!opq12) { return OFPERR_OFPBRC_BAD_LEN; } queue->queue = ntohl(opq12->queue_id); len = ntohs(opq12->len); opq_len = sizeof *opq12; } /* Length check. */ if (len < opq_len || len > msg->size + opq_len || len % 8) { return OFPERR_OFPBRC_BAD_LEN; } len -= opq_len; /* Pull properties. The format of these properties differs from used in * OF1.4+ so we can't use the common property functions. */ while (len > 0) { const struct ofp10_queue_prop_header *hdr; unsigned int property; unsigned int prop_len; enum ofperr error = 0; hdr = ofpbuf_at_assert(msg, 0, sizeof *hdr); prop_len = ntohs(hdr->len); if (prop_len < sizeof *hdr || prop_len > len || prop_len % 8) { return OFPERR_OFPBRC_BAD_LEN; } property = ntohs(hdr->property); switch (property) { case OFPQT10_MIN_RATE: error = parse_ofp10_queue_rate(hdr, &queue->min_rate); break; case OFPQT11_MAX_RATE: error = parse_ofp10_queue_rate(hdr, &queue->max_rate); break; default: VLOG_INFO_RL(&bad_ofmsg_rl, "unknown queue property %u", property); break; } if (error) { return error; } ofpbuf_pull(msg, prop_len); len -= prop_len; } return 0; } static int ofputil_pull_queue_get_config_reply14(struct ofpbuf *msg, struct ofputil_queue_config *queue) { struct ofp14_queue_desc *oqd14 = ofpbuf_try_pull(msg, sizeof *oqd14); if (!oqd14) { return OFPERR_OFPBRC_BAD_LEN; } enum ofperr error = ofputil_port_from_ofp11(oqd14->port_no, &queue->port); if (error) { return error; } queue->queue = ntohl(oqd14->queue_id); /* Length check. */ unsigned int len = ntohs(oqd14->len); if (len < sizeof *oqd14 || len > msg->size + sizeof *oqd14 || len % 8) { return OFPERR_OFPBRC_BAD_LEN; } len -= sizeof *oqd14; struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); while (properties.size > 0) { struct ofpbuf payload; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPQDPT14_MIN_RATE: error = ofpprop_parse_u16(&payload, &queue->min_rate); break; case OFPQDPT14_MAX_RATE: error = ofpprop_parse_u16(&payload, &queue->max_rate); break; default: error = OFPPROP_UNKNOWN(true, "queue desc", type); break; } if (error) { return error; } } return 0; } /* Decodes information about a queue from the OFPT_QUEUE_GET_CONFIG_REPLY in * 'reply' and stores it in '*queue'. ofputil_decode_queue_get_config_reply() * must already have pulled off the main header. * * This function returns EOF if the last queue has already been decoded, 0 if a * queue was successfully decoded into '*queue', or an ofperr if there was a * problem decoding 'reply'. */ int ofputil_pull_queue_get_config_reply(struct ofpbuf *msg, struct ofputil_queue_config *queue) { enum ofpraw raw; if (!msg->header) { /* Pull OpenFlow header. */ raw = ofpraw_pull_assert(msg); /* Pull protocol-specific ofp_queue_get_config_reply header (OF1.4 * doesn't have one at all). */ if (raw == OFPRAW_OFPT10_QUEUE_GET_CONFIG_REPLY) { ofpbuf_pull(msg, sizeof(struct ofp10_queue_get_config_reply)); } else if (raw == OFPRAW_OFPT11_QUEUE_GET_CONFIG_REPLY) { ofpbuf_pull(msg, sizeof(struct ofp11_queue_get_config_reply)); } else { ovs_assert(raw == OFPRAW_OFPST14_QUEUE_DESC_REPLY); } } else { raw = ofpraw_decode_assert(msg->header); } queue->min_rate = UINT16_MAX; queue->max_rate = UINT16_MAX; if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST14_QUEUE_DESC_REPLY) { return ofputil_pull_queue_get_config_reply14(msg, queue); } else { return ofputil_pull_queue_get_config_reply10(msg, queue); } } /* Converts an OFPST_FLOW, OFPST_AGGREGATE, NXST_FLOW, or NXST_AGGREGATE * request 'oh', into an abstract flow_stats_request in 'fsr'. Returns 0 if * successful, otherwise an OpenFlow error code. * * 'vl_mff_map' is an optional parameter that is used to validate the length * of variable length mf_fields in 'match'. If it is not provided, the * default mf_fields with maximum length will be used. */ enum ofperr ofputil_decode_flow_stats_request(struct ofputil_flow_stats_request *fsr, const struct ofp_header *oh, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); switch ((int) raw) { case OFPRAW_OFPST10_FLOW_REQUEST: return ofputil_decode_ofpst10_flow_request(fsr, b.data, false); case OFPRAW_OFPST10_AGGREGATE_REQUEST: return ofputil_decode_ofpst10_flow_request(fsr, b.data, true); case OFPRAW_OFPST11_FLOW_REQUEST: return ofputil_decode_ofpst11_flow_request(fsr, &b, false, tun_table, vl_mff_map); case OFPRAW_OFPST11_AGGREGATE_REQUEST: return ofputil_decode_ofpst11_flow_request(fsr, &b, true, tun_table, vl_mff_map); case OFPRAW_NXST_FLOW_REQUEST: return ofputil_decode_nxst_flow_request(fsr, &b, false, tun_table, vl_mff_map); case OFPRAW_NXST_AGGREGATE_REQUEST: return ofputil_decode_nxst_flow_request(fsr, &b, true, tun_table, vl_mff_map); default: /* Hey, the caller lied. */ OVS_NOT_REACHED(); } } /* Converts abstract flow_stats_request 'fsr' into an OFPST_FLOW, * OFPST_AGGREGATE, NXST_FLOW, or NXST_AGGREGATE request 'oh' according to * 'protocol', and returns the message. */ struct ofpbuf * ofputil_encode_flow_stats_request(const struct ofputil_flow_stats_request *fsr, enum ofputil_protocol protocol) { struct ofpbuf *msg; enum ofpraw raw; switch (protocol) { case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: { struct ofp11_flow_stats_request *ofsr; raw = (fsr->aggregate ? OFPRAW_OFPST11_AGGREGATE_REQUEST : OFPRAW_OFPST11_FLOW_REQUEST); msg = ofpraw_alloc(raw, ofputil_protocol_to_ofp_version(protocol), ofputil_match_typical_len(protocol)); ofsr = ofpbuf_put_zeros(msg, sizeof *ofsr); ofsr->table_id = fsr->table_id; ofsr->out_port = ofputil_port_to_ofp11(fsr->out_port); ofsr->out_group = htonl(fsr->out_group); ofsr->cookie = fsr->cookie; ofsr->cookie_mask = fsr->cookie_mask; ofputil_put_ofp11_match(msg, &fsr->match, protocol); break; } case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: { struct ofp10_flow_stats_request *ofsr; raw = (fsr->aggregate ? OFPRAW_OFPST10_AGGREGATE_REQUEST : OFPRAW_OFPST10_FLOW_REQUEST); msg = ofpraw_alloc(raw, OFP10_VERSION, 0); ofsr = ofpbuf_put_zeros(msg, sizeof *ofsr); ofputil_match_to_ofp10_match(&fsr->match, &ofsr->match); ofsr->table_id = fsr->table_id; ofsr->out_port = htons(ofp_to_u16(fsr->out_port)); break; } case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: { struct nx_flow_stats_request *nfsr; int match_len; raw = (fsr->aggregate ? OFPRAW_NXST_AGGREGATE_REQUEST : OFPRAW_NXST_FLOW_REQUEST); msg = ofpraw_alloc(raw, OFP10_VERSION, NXM_TYPICAL_LEN); ofpbuf_put_zeros(msg, sizeof *nfsr); match_len = nx_put_match(msg, &fsr->match, fsr->cookie, fsr->cookie_mask); nfsr = msg->msg; nfsr->out_port = htons(ofp_to_u16(fsr->out_port)); nfsr->match_len = htons(match_len); nfsr->table_id = fsr->table_id; break; } default: OVS_NOT_REACHED(); } return msg; } /* Converts an OFPST_FLOW or NXST_FLOW reply in 'msg' into an abstract * ofputil_flow_stats in 'fs'. * * Multiple OFPST_FLOW or NXST_FLOW replies can be packed into a single * OpenFlow message. Calling this function multiple times for a single 'msg' * iterates through the replies. The caller must initially leave 'msg''s layer * pointers null and not modify them between calls. * * Most switches don't send the values needed to populate fs->idle_age and * fs->hard_age, so those members will usually be set to 0. If the switch from * which 'msg' originated is known to implement NXT_FLOW_AGE, then pass * 'flow_age_extension' as true so that the contents of 'msg' determine the * 'idle_age' and 'hard_age' members in 'fs'. * * Uses 'ofpacts' to store the abstract OFPACT_* version of the flow stats * reply's actions. The caller must initialize 'ofpacts' and retains ownership * of it. 'fs->ofpacts' will point into the 'ofpacts' buffer. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_flow_stats_reply(struct ofputil_flow_stats *fs, struct ofpbuf *msg, bool flow_age_extension, struct ofpbuf *ofpacts) { const struct ofp_header *oh; size_t instructions_len; enum ofperr error; enum ofpraw raw; error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } oh = msg->header; if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST11_FLOW_REPLY || raw == OFPRAW_OFPST13_FLOW_REPLY) { const struct ofp11_flow_stats *ofs; size_t length; uint16_t padded_match_len; ofs = ofpbuf_try_pull(msg, sizeof *ofs); if (!ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply has %"PRIu32" leftover " "bytes at end", msg->size); return EINVAL; } length = ntohs(ofs->length); if (length < sizeof *ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply claims invalid " "length %"PRIuSIZE, length); return EINVAL; } if (ofputil_pull_ofp11_match(msg, NULL, NULL, &fs->match, &padded_match_len)) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply bad match"); return EINVAL; } instructions_len = length - sizeof *ofs - padded_match_len; fs->priority = ntohs(ofs->priority); fs->table_id = ofs->table_id; fs->duration_sec = ntohl(ofs->duration_sec); fs->duration_nsec = ntohl(ofs->duration_nsec); fs->idle_timeout = ntohs(ofs->idle_timeout); fs->hard_timeout = ntohs(ofs->hard_timeout); if (oh->version >= OFP14_VERSION) { fs->importance = ntohs(ofs->importance); } else { fs->importance = 0; } if (raw == OFPRAW_OFPST13_FLOW_REPLY) { error = ofputil_decode_flow_mod_flags(ofs->flags, -1, oh->version, &fs->flags); if (error) { return error; } } else { fs->flags = 0; } fs->idle_age = -1; fs->hard_age = -1; fs->cookie = ofs->cookie; fs->packet_count = ntohll(ofs->packet_count); fs->byte_count = ntohll(ofs->byte_count); } else if (raw == OFPRAW_OFPST10_FLOW_REPLY) { const struct ofp10_flow_stats *ofs; size_t length; ofs = ofpbuf_try_pull(msg, sizeof *ofs); if (!ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply has %"PRIu32" leftover " "bytes at end", msg->size); return EINVAL; } length = ntohs(ofs->length); if (length < sizeof *ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply claims invalid " "length %"PRIuSIZE, length); return EINVAL; } instructions_len = length - sizeof *ofs; fs->cookie = get_32aligned_be64(&ofs->cookie); ofputil_match_from_ofp10_match(&ofs->match, &fs->match); fs->priority = ntohs(ofs->priority); fs->table_id = ofs->table_id; fs->duration_sec = ntohl(ofs->duration_sec); fs->duration_nsec = ntohl(ofs->duration_nsec); fs->idle_timeout = ntohs(ofs->idle_timeout); fs->hard_timeout = ntohs(ofs->hard_timeout); fs->importance = 0; fs->idle_age = -1; fs->hard_age = -1; fs->packet_count = ntohll(get_32aligned_be64(&ofs->packet_count)); fs->byte_count = ntohll(get_32aligned_be64(&ofs->byte_count)); fs->flags = 0; } else if (raw == OFPRAW_NXST_FLOW_REPLY) { const struct nx_flow_stats *nfs; size_t match_len, length; nfs = ofpbuf_try_pull(msg, sizeof *nfs); if (!nfs) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW reply has %"PRIu32" leftover " "bytes at end", msg->size); return EINVAL; } length = ntohs(nfs->length); match_len = ntohs(nfs->match_len); if (length < sizeof *nfs + ROUND_UP(match_len, 8)) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW reply with match_len=%"PRIuSIZE" " "claims invalid length %"PRIuSIZE, match_len, length); return EINVAL; } if (nx_pull_match(msg, match_len, &fs->match, NULL, NULL, NULL, NULL)) { return EINVAL; } instructions_len = length - sizeof *nfs - ROUND_UP(match_len, 8); fs->cookie = nfs->cookie; fs->table_id = nfs->table_id; fs->duration_sec = ntohl(nfs->duration_sec); fs->duration_nsec = ntohl(nfs->duration_nsec); fs->priority = ntohs(nfs->priority); fs->idle_timeout = ntohs(nfs->idle_timeout); fs->hard_timeout = ntohs(nfs->hard_timeout); fs->importance = 0; fs->idle_age = -1; fs->hard_age = -1; if (flow_age_extension) { if (nfs->idle_age) { fs->idle_age = ntohs(nfs->idle_age) - 1; } if (nfs->hard_age) { fs->hard_age = ntohs(nfs->hard_age) - 1; } } fs->packet_count = ntohll(nfs->packet_count); fs->byte_count = ntohll(nfs->byte_count); fs->flags = 0; } else { OVS_NOT_REACHED(); } if (ofpacts_pull_openflow_instructions(msg, instructions_len, oh->version, NULL, NULL, ofpacts)) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply bad instructions"); return EINVAL; } fs->ofpacts = ofpacts->data; fs->ofpacts_len = ofpacts->size; return 0; } /* Returns 'count' unchanged except that UINT64_MAX becomes 0. * * We use this in situations where OVS internally uses UINT64_MAX to mean * "value unknown" but OpenFlow 1.0 does not define any unknown value. */ static uint64_t unknown_to_zero(uint64_t count) { return count != UINT64_MAX ? count : 0; } /* Appends an OFPST_FLOW or NXST_FLOW reply that contains the data in 'fs' to * those already present in the list of ofpbufs in 'replies'. 'replies' should * have been initialized with ofpmp_init(). */ void ofputil_append_flow_stats_reply(const struct ofputil_flow_stats *fs, struct ovs_list *replies, const struct tun_table *tun_table) { struct ofputil_flow_stats *fs_ = CONST_CAST(struct ofputil_flow_stats *, fs); const struct tun_table *orig_tun_table; struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = reply->size; enum ofp_version version = ofpmp_version(replies); enum ofpraw raw = ofpmp_decode_raw(replies); orig_tun_table = fs->match.flow.tunnel.metadata.tab; fs_->match.flow.tunnel.metadata.tab = tun_table; if (raw == OFPRAW_OFPST11_FLOW_REPLY || raw == OFPRAW_OFPST13_FLOW_REPLY) { struct ofp11_flow_stats *ofs; ofpbuf_put_uninit(reply, sizeof *ofs); oxm_put_match(reply, &fs->match, version); ofpacts_put_openflow_instructions(fs->ofpacts, fs->ofpacts_len, reply, version); ofs = ofpbuf_at_assert(reply, start_ofs, sizeof *ofs); ofs->length = htons(reply->size - start_ofs); ofs->table_id = fs->table_id; ofs->pad = 0; ofs->duration_sec = htonl(fs->duration_sec); ofs->duration_nsec = htonl(fs->duration_nsec); ofs->priority = htons(fs->priority); ofs->idle_timeout = htons(fs->idle_timeout); ofs->hard_timeout = htons(fs->hard_timeout); if (version >= OFP14_VERSION) { ofs->importance = htons(fs->importance); } else { ofs->importance = 0; } if (raw == OFPRAW_OFPST13_FLOW_REPLY) { ofs->flags = ofputil_encode_flow_mod_flags(fs->flags, version); } else { ofs->flags = 0; } memset(ofs->pad2, 0, sizeof ofs->pad2); ofs->cookie = fs->cookie; ofs->packet_count = htonll(unknown_to_zero(fs->packet_count)); ofs->byte_count = htonll(unknown_to_zero(fs->byte_count)); } else if (raw == OFPRAW_OFPST10_FLOW_REPLY) { struct ofp10_flow_stats *ofs; ofpbuf_put_uninit(reply, sizeof *ofs); ofpacts_put_openflow_actions(fs->ofpacts, fs->ofpacts_len, reply, version); ofs = ofpbuf_at_assert(reply, start_ofs, sizeof *ofs); ofs->length = htons(reply->size - start_ofs); ofs->table_id = fs->table_id; ofs->pad = 0; ofputil_match_to_ofp10_match(&fs->match, &ofs->match); ofs->duration_sec = htonl(fs->duration_sec); ofs->duration_nsec = htonl(fs->duration_nsec); ofs->priority = htons(fs->priority); ofs->idle_timeout = htons(fs->idle_timeout); ofs->hard_timeout = htons(fs->hard_timeout); memset(ofs->pad2, 0, sizeof ofs->pad2); put_32aligned_be64(&ofs->cookie, fs->cookie); put_32aligned_be64(&ofs->packet_count, htonll(unknown_to_zero(fs->packet_count))); put_32aligned_be64(&ofs->byte_count, htonll(unknown_to_zero(fs->byte_count))); } else if (raw == OFPRAW_NXST_FLOW_REPLY) { struct nx_flow_stats *nfs; int match_len; ofpbuf_put_uninit(reply, sizeof *nfs); match_len = nx_put_match(reply, &fs->match, 0, 0); ofpacts_put_openflow_actions(fs->ofpacts, fs->ofpacts_len, reply, version); nfs = ofpbuf_at_assert(reply, start_ofs, sizeof *nfs); nfs->length = htons(reply->size - start_ofs); nfs->table_id = fs->table_id; nfs->pad = 0; nfs->duration_sec = htonl(fs->duration_sec); nfs->duration_nsec = htonl(fs->duration_nsec); nfs->priority = htons(fs->priority); nfs->idle_timeout = htons(fs->idle_timeout); nfs->hard_timeout = htons(fs->hard_timeout); nfs->idle_age = htons(fs->idle_age < 0 ? 0 : fs->idle_age < UINT16_MAX ? fs->idle_age + 1 : UINT16_MAX); nfs->hard_age = htons(fs->hard_age < 0 ? 0 : fs->hard_age < UINT16_MAX ? fs->hard_age + 1 : UINT16_MAX); nfs->match_len = htons(match_len); nfs->cookie = fs->cookie; nfs->packet_count = htonll(fs->packet_count); nfs->byte_count = htonll(fs->byte_count); } else { OVS_NOT_REACHED(); } ofpmp_postappend(replies, start_ofs); fs_->match.flow.tunnel.metadata.tab = orig_tun_table; } /* Converts abstract ofputil_aggregate_stats 'stats' into an OFPST_AGGREGATE or * NXST_AGGREGATE reply matching 'request', and returns the message. */ struct ofpbuf * ofputil_encode_aggregate_stats_reply( const struct ofputil_aggregate_stats *stats, const struct ofp_header *request) { struct ofp_aggregate_stats_reply *asr; uint64_t packet_count; uint64_t byte_count; struct ofpbuf *msg; enum ofpraw raw; ofpraw_decode(&raw, request); if (raw == OFPRAW_OFPST10_AGGREGATE_REQUEST) { packet_count = unknown_to_zero(stats->packet_count); byte_count = unknown_to_zero(stats->byte_count); } else { packet_count = stats->packet_count; byte_count = stats->byte_count; } msg = ofpraw_alloc_stats_reply(request, 0); asr = ofpbuf_put_zeros(msg, sizeof *asr); put_32aligned_be64(&asr->packet_count, htonll(packet_count)); put_32aligned_be64(&asr->byte_count, htonll(byte_count)); asr->flow_count = htonl(stats->flow_count); return msg; } enum ofperr ofputil_decode_aggregate_stats_reply(struct ofputil_aggregate_stats *stats, const struct ofp_header *reply) { struct ofpbuf msg = ofpbuf_const_initializer(reply, ntohs(reply->length)); ofpraw_pull_assert(&msg); struct ofp_aggregate_stats_reply *asr = msg.msg; stats->packet_count = ntohll(get_32aligned_be64(&asr->packet_count)); stats->byte_count = ntohll(get_32aligned_be64(&asr->byte_count)); stats->flow_count = ntohl(asr->flow_count); return 0; } /* Converts an OFPT_FLOW_REMOVED or NXT_FLOW_REMOVED message 'oh' into an * abstract ofputil_flow_removed in 'fr'. Returns 0 if successful, otherwise * an OpenFlow error code. */ enum ofperr ofputil_decode_flow_removed(struct ofputil_flow_removed *fr, const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT11_FLOW_REMOVED) { const struct ofp12_flow_removed *ofr; enum ofperr error; ofr = ofpbuf_pull(&b, sizeof *ofr); error = ofputil_pull_ofp11_match(&b, NULL, NULL, &fr->match, NULL); if (error) { return error; } fr->priority = ntohs(ofr->priority); fr->cookie = ofr->cookie; fr->reason = ofr->reason; fr->table_id = ofr->table_id; fr->duration_sec = ntohl(ofr->duration_sec); fr->duration_nsec = ntohl(ofr->duration_nsec); fr->idle_timeout = ntohs(ofr->idle_timeout); fr->hard_timeout = ntohs(ofr->hard_timeout); fr->packet_count = ntohll(ofr->packet_count); fr->byte_count = ntohll(ofr->byte_count); } else if (raw == OFPRAW_OFPT10_FLOW_REMOVED) { const struct ofp10_flow_removed *ofr; ofr = ofpbuf_pull(&b, sizeof *ofr); ofputil_match_from_ofp10_match(&ofr->match, &fr->match); fr->priority = ntohs(ofr->priority); fr->cookie = ofr->cookie; fr->reason = ofr->reason; fr->table_id = 255; fr->duration_sec = ntohl(ofr->duration_sec); fr->duration_nsec = ntohl(ofr->duration_nsec); fr->idle_timeout = ntohs(ofr->idle_timeout); fr->hard_timeout = 0; fr->packet_count = ntohll(ofr->packet_count); fr->byte_count = ntohll(ofr->byte_count); } else if (raw == OFPRAW_NXT_FLOW_REMOVED) { struct nx_flow_removed *nfr; enum ofperr error; nfr = ofpbuf_pull(&b, sizeof *nfr); error = nx_pull_match(&b, ntohs(nfr->match_len), &fr->match, NULL, NULL, NULL, NULL); if (error) { return error; } if (b.size) { return OFPERR_OFPBRC_BAD_LEN; } fr->priority = ntohs(nfr->priority); fr->cookie = nfr->cookie; fr->reason = nfr->reason; fr->table_id = nfr->table_id ? nfr->table_id - 1 : 255; fr->duration_sec = ntohl(nfr->duration_sec); fr->duration_nsec = ntohl(nfr->duration_nsec); fr->idle_timeout = ntohs(nfr->idle_timeout); fr->hard_timeout = 0; fr->packet_count = ntohll(nfr->packet_count); fr->byte_count = ntohll(nfr->byte_count); } else { OVS_NOT_REACHED(); } return 0; } /* Converts abstract ofputil_flow_removed 'fr' into an OFPT_FLOW_REMOVED or * NXT_FLOW_REMOVED message 'oh' according to 'protocol', and returns the * message. */ struct ofpbuf * ofputil_encode_flow_removed(const struct ofputil_flow_removed *fr, enum ofputil_protocol protocol) { struct ofpbuf *msg; enum ofp_flow_removed_reason reason = fr->reason; if (reason == OFPRR_METER_DELETE && !(protocol & OFPUTIL_P_OF14_UP)) { reason = OFPRR_DELETE; } switch (protocol) { case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: { struct ofp12_flow_removed *ofr; msg = ofpraw_alloc_xid(OFPRAW_OFPT11_FLOW_REMOVED, ofputil_protocol_to_ofp_version(protocol), htonl(0), ofputil_match_typical_len(protocol)); ofr = ofpbuf_put_zeros(msg, sizeof *ofr); ofr->cookie = fr->cookie; ofr->priority = htons(fr->priority); ofr->reason = reason; ofr->table_id = fr->table_id; ofr->duration_sec = htonl(fr->duration_sec); ofr->duration_nsec = htonl(fr->duration_nsec); ofr->idle_timeout = htons(fr->idle_timeout); ofr->hard_timeout = htons(fr->hard_timeout); ofr->packet_count = htonll(fr->packet_count); ofr->byte_count = htonll(fr->byte_count); ofputil_put_ofp11_match(msg, &fr->match, protocol); break; } case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: { struct ofp10_flow_removed *ofr; msg = ofpraw_alloc_xid(OFPRAW_OFPT10_FLOW_REMOVED, OFP10_VERSION, htonl(0), 0); ofr = ofpbuf_put_zeros(msg, sizeof *ofr); ofputil_match_to_ofp10_match(&fr->match, &ofr->match); ofr->cookie = fr->cookie; ofr->priority = htons(fr->priority); ofr->reason = reason; ofr->duration_sec = htonl(fr->duration_sec); ofr->duration_nsec = htonl(fr->duration_nsec); ofr->idle_timeout = htons(fr->idle_timeout); ofr->packet_count = htonll(unknown_to_zero(fr->packet_count)); ofr->byte_count = htonll(unknown_to_zero(fr->byte_count)); break; } case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: { struct nx_flow_removed *nfr; int match_len; msg = ofpraw_alloc_xid(OFPRAW_NXT_FLOW_REMOVED, OFP10_VERSION, htonl(0), NXM_TYPICAL_LEN); ofpbuf_put_zeros(msg, sizeof *nfr); match_len = nx_put_match(msg, &fr->match, 0, 0); nfr = msg->msg; nfr->cookie = fr->cookie; nfr->priority = htons(fr->priority); nfr->reason = reason; nfr->table_id = fr->table_id + 1; nfr->duration_sec = htonl(fr->duration_sec); nfr->duration_nsec = htonl(fr->duration_nsec); nfr->idle_timeout = htons(fr->idle_timeout); nfr->match_len = htons(match_len); nfr->packet_count = htonll(fr->packet_count); nfr->byte_count = htonll(fr->byte_count); break; } default: OVS_NOT_REACHED(); } return msg; } /* The caller has done basic initialization of '*pin'; the other output * arguments needs to be initialized. */ static enum ofperr decode_nx_packet_in2(const struct ofp_header *oh, bool loose, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofputil_packet_in *pin, size_t *total_len, uint32_t *buffer_id, struct ofpbuf *continuation) { *total_len = 0; *buffer_id = UINT32_MAX; struct ofpbuf properties; ofpbuf_use_const(&properties, oh, ntohs(oh->length)); ofpraw_pull_assert(&properties); while (properties.size > 0) { struct ofpbuf payload; uint64_t type; enum ofperr error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case NXPINT_PACKET: pin->packet = payload.msg; pin->packet_len = ofpbuf_msgsize(&payload); break; case NXPINT_FULL_LEN: { uint32_t u32; error = ofpprop_parse_u32(&payload, &u32); *total_len = u32; break; } case NXPINT_BUFFER_ID: error = ofpprop_parse_u32(&payload, buffer_id); break; case NXPINT_TABLE_ID: error = ofpprop_parse_u8(&payload, &pin->table_id); break; case NXPINT_COOKIE: error = ofpprop_parse_be64(&payload, &pin->cookie); break; case NXPINT_REASON: { uint8_t reason; error = ofpprop_parse_u8(&payload, &reason); pin->reason = reason; break; } case NXPINT_METADATA: error = oxm_decode_match(payload.msg, ofpbuf_msgsize(&payload), loose, tun_table, vl_mff_map, &pin->flow_metadata); break; case NXPINT_USERDATA: pin->userdata = payload.msg; pin->userdata_len = ofpbuf_msgsize(&payload); break; case NXPINT_CONTINUATION: if (continuation) { error = ofpprop_parse_nested(&payload, continuation); } break; default: error = OFPPROP_UNKNOWN(loose, "NX_PACKET_IN2", type); break; } if (error) { return error; } } if (!pin->packet_len) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXT_PACKET_IN2 lacks packet"); return OFPERR_OFPBRC_BAD_LEN; } else if (!*total_len) { *total_len = pin->packet_len; } else if (*total_len < pin->packet_len) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXT_PACKET_IN2 claimed full_len < len"); return OFPERR_OFPBRC_BAD_LEN; } return 0; } /* Decodes the packet-in message starting at 'oh' into '*pin'. Populates * 'pin->packet' and 'pin->packet_len' with the part of the packet actually * included in the message. If 'total_lenp' is nonnull, populates * '*total_lenp' with the original length of the packet (which is larger than * 'packet->len' if only part of the packet was included). If 'buffer_idp' is * nonnull, stores the packet's buffer ID in '*buffer_idp' (UINT32_MAX if it * was not buffered). * * Populates 'continuation', if nonnull, with the continuation data from the * packet-in (an empty buffer, if 'oh' did not contain continuation data). The * format of this data is supposed to be opaque to anything other than * ovs-vswitchd, so that in any other process the only reasonable use of this * data is to be copied into an NXT_RESUME message via ofputil_encode_resume(). * * This function points 'pin->packet' into 'oh', so the caller should not free * it separately from the original OpenFlow message. This is also true for * 'pin->userdata' (which could also end up NULL if there is no userdata). * * 'vl_mff_map' is an optional parameter that is used to validate the length * of variable length mf_fields in 'match'. If it is not provided, the * default mf_fields with maximum length will be used. * * Returns 0 if successful, otherwise an OpenFlow error code. */ enum ofperr ofputil_decode_packet_in(const struct ofp_header *oh, bool loose, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofputil_packet_in *pin, size_t *total_lenp, uint32_t *buffer_idp, struct ofpbuf *continuation) { uint32_t buffer_id; size_t total_len; memset(pin, 0, sizeof *pin); pin->cookie = OVS_BE64_MAX; if (continuation) { ofpbuf_use_const(continuation, NULL, 0); } struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT13_PACKET_IN || raw == OFPRAW_OFPT12_PACKET_IN) { const struct ofp12_packet_in *opi = ofpbuf_pull(&b, sizeof *opi); const ovs_be64 *cookie = (raw == OFPRAW_OFPT13_PACKET_IN ? ofpbuf_pull(&b, sizeof *cookie) : NULL); enum ofperr error = oxm_pull_match_loose(&b, tun_table, &pin->flow_metadata); if (error) { return error; } if (!ofpbuf_try_pull(&b, 2)) { return OFPERR_OFPBRC_BAD_LEN; } pin->reason = opi->reason; pin->table_id = opi->table_id; buffer_id = ntohl(opi->buffer_id); total_len = ntohs(opi->total_len); if (cookie) { pin->cookie = *cookie; } pin->packet = b.data; pin->packet_len = b.size; } else if (raw == OFPRAW_OFPT10_PACKET_IN) { const struct ofp10_packet_in *opi; opi = ofpbuf_pull(&b, offsetof(struct ofp10_packet_in, data)); pin->packet = CONST_CAST(uint8_t *, opi->data); pin->packet_len = b.size; match_init_catchall(&pin->flow_metadata); match_set_in_port(&pin->flow_metadata, u16_to_ofp(ntohs(opi->in_port))); pin->reason = opi->reason; buffer_id = ntohl(opi->buffer_id); total_len = ntohs(opi->total_len); } else if (raw == OFPRAW_OFPT11_PACKET_IN) { const struct ofp11_packet_in *opi; ofp_port_t in_port; enum ofperr error; opi = ofpbuf_pull(&b, sizeof *opi); pin->packet = b.data; pin->packet_len = b.size; buffer_id = ntohl(opi->buffer_id); error = ofputil_port_from_ofp11(opi->in_port, &in_port); if (error) { return error; } match_init_catchall(&pin->flow_metadata); match_set_in_port(&pin->flow_metadata, in_port); total_len = ntohs(opi->total_len); pin->reason = opi->reason; pin->table_id = opi->table_id; } else if (raw == OFPRAW_NXT_PACKET_IN) { const struct nx_packet_in *npi; int error; npi = ofpbuf_pull(&b, sizeof *npi); error = nx_pull_match_loose(&b, ntohs(npi->match_len), &pin->flow_metadata, NULL, NULL, NULL); if (error) { return error; } if (!ofpbuf_try_pull(&b, 2)) { return OFPERR_OFPBRC_BAD_LEN; } pin->reason = npi->reason; pin->table_id = npi->table_id; pin->cookie = npi->cookie; buffer_id = ntohl(npi->buffer_id); total_len = ntohs(npi->total_len); pin->packet = b.data; pin->packet_len = b.size; } else if (raw == OFPRAW_NXT_PACKET_IN2 || raw == OFPRAW_NXT_RESUME) { enum ofperr error = decode_nx_packet_in2(oh, loose, tun_table, vl_mff_map, pin, &total_len, &buffer_id, continuation); if (error) { return error; } } else { OVS_NOT_REACHED(); } if (total_lenp) { *total_lenp = total_len; } if (buffer_idp) { *buffer_idp = buffer_id; } return 0; } static int encode_packet_in_reason(enum ofp_packet_in_reason reason, enum ofp_version version) { switch (reason) { case OFPR_NO_MATCH: case OFPR_ACTION: case OFPR_INVALID_TTL: return reason; case OFPR_ACTION_SET: case OFPR_GROUP: case OFPR_PACKET_OUT: return version < OFP14_VERSION ? OFPR_ACTION : reason; case OFPR_EXPLICIT_MISS: return version < OFP13_VERSION ? OFPR_ACTION : OFPR_NO_MATCH; case OFPR_IMPLICIT_MISS: return OFPR_NO_MATCH; case OFPR_N_REASONS: default: OVS_NOT_REACHED(); } } /* Only NXT_PACKET_IN2 (not NXT_RESUME) should include NXCPT_USERDATA, so this * function omits it. The caller can add it itself if desired. */ static void ofputil_put_packet_in(const struct ofputil_packet_in *pin, enum ofp_version version, size_t include_bytes, struct ofpbuf *msg) { /* Add packet properties. */ ofpprop_put(msg, NXPINT_PACKET, pin->packet, include_bytes); if (include_bytes != pin->packet_len) { ofpprop_put_u32(msg, NXPINT_FULL_LEN, pin->packet_len); } /* Add flow properties. */ ofpprop_put_u8(msg, NXPINT_TABLE_ID, pin->table_id); if (pin->cookie != OVS_BE64_MAX) { ofpprop_put_be64(msg, NXPINT_COOKIE, pin->cookie); } /* Add other properties. */ ofpprop_put_u8(msg, NXPINT_REASON, encode_packet_in_reason(pin->reason, version)); size_t start = ofpprop_start(msg, NXPINT_METADATA); oxm_put_raw(msg, &pin->flow_metadata, version); ofpprop_end(msg, start); } static void put_actions_property(struct ofpbuf *msg, uint64_t prop_type, enum ofp_version version, const struct ofpact *actions, size_t actions_len) { if (actions_len) { size_t start = ofpprop_start_nested(msg, prop_type); ofpacts_put_openflow_actions(actions, actions_len, msg, version); ofpprop_end(msg, start); } } enum nx_continuation_prop_type { NXCPT_BRIDGE = 0x8000, NXCPT_STACK, NXCPT_MIRRORS, NXCPT_CONNTRACKED, NXCPT_TABLE_ID, NXCPT_COOKIE, NXCPT_ACTIONS, NXCPT_ACTION_SET, }; /* Only NXT_PACKET_IN2 (not NXT_RESUME) should include NXCPT_USERDATA, so this * function omits it. The caller can add it itself if desired. */ static void ofputil_put_packet_in_private(const struct ofputil_packet_in_private *pin, enum ofp_version version, size_t include_bytes, struct ofpbuf *msg) { ofputil_put_packet_in(&pin->public, version, include_bytes, msg); size_t continuation_ofs = ofpprop_start_nested(msg, NXPINT_CONTINUATION); size_t inner_ofs = msg->size; if (!uuid_is_zero(&pin->bridge)) { ofpprop_put_uuid(msg, NXCPT_BRIDGE, &pin->bridge); } struct ofpbuf pin_stack; ofpbuf_use_const(&pin_stack, pin->stack, pin->stack_size); while (pin_stack.size) { uint8_t len; uint8_t *val = nx_stack_pop(&pin_stack, &len); ofpprop_put(msg, NXCPT_STACK, val, len); } if (pin->mirrors) { ofpprop_put_u32(msg, NXCPT_MIRRORS, pin->mirrors); } if (pin->conntracked) { ofpprop_put_flag(msg, NXCPT_CONNTRACKED); } if (pin->actions_len) { /* Divide 'pin->actions' into groups that begins with an * unroll_xlate action. For each group, emit a NXCPT_TABLE_ID and * NXCPT_COOKIE property (if either has changed; each is initially * assumed 0), then a NXCPT_ACTIONS property with the grouped * actions. * * The alternative is to make OFPACT_UNROLL_XLATE public. We can * always do that later, since this is a private property. */ const struct ofpact *const end = ofpact_end(pin->actions, pin->actions_len); const struct ofpact_unroll_xlate *unroll = NULL; uint8_t table_id = 0; ovs_be64 cookie = 0; const struct ofpact *a; for (a = pin->actions; ; a = ofpact_next(a)) { if (a == end || a->type == OFPACT_UNROLL_XLATE) { if (unroll) { if (table_id != unroll->rule_table_id) { ofpprop_put_u8(msg, NXCPT_TABLE_ID, unroll->rule_table_id); table_id = unroll->rule_table_id; } if (cookie != unroll->rule_cookie) { ofpprop_put_be64(msg, NXCPT_COOKIE, unroll->rule_cookie); cookie = unroll->rule_cookie; } } const struct ofpact *start = unroll ? ofpact_next(&unroll->ofpact) : pin->actions; put_actions_property(msg, NXCPT_ACTIONS, version, start, (a - start) * sizeof *a); if (a == end) { break; } unroll = ofpact_get_UNROLL_XLATE(a); } } } if (pin->action_set_len) { size_t start = ofpprop_start_nested(msg, NXCPT_ACTION_SET); ofpacts_put_openflow_actions(pin->action_set, pin->action_set_len, msg, version); ofpprop_end(msg, start); } if (msg->size > inner_ofs) { ofpprop_end(msg, continuation_ofs); } else { msg->size = continuation_ofs; } } static struct ofpbuf * ofputil_encode_ofp10_packet_in(const struct ofputil_packet_in *pin) { struct ofp10_packet_in *opi; struct ofpbuf *msg; msg = ofpraw_alloc_xid(OFPRAW_OFPT10_PACKET_IN, OFP10_VERSION, htonl(0), pin->packet_len); opi = ofpbuf_put_zeros(msg, offsetof(struct ofp10_packet_in, data)); opi->total_len = htons(pin->packet_len); opi->in_port = htons(ofp_to_u16(pin->flow_metadata.flow.in_port.ofp_port)); opi->reason = encode_packet_in_reason(pin->reason, OFP10_VERSION); opi->buffer_id = htonl(UINT32_MAX); return msg; } static struct ofpbuf * ofputil_encode_nx_packet_in(const struct ofputil_packet_in *pin, enum ofp_version version) { struct nx_packet_in *npi; struct ofpbuf *msg; size_t match_len; /* The final argument is just an estimate of the space required. */ msg = ofpraw_alloc_xid(OFPRAW_NXT_PACKET_IN, version, htonl(0), NXM_TYPICAL_LEN + 2 + pin->packet_len); ofpbuf_put_zeros(msg, sizeof *npi); match_len = nx_put_match(msg, &pin->flow_metadata, 0, 0); ofpbuf_put_zeros(msg, 2); npi = msg->msg; npi->buffer_id = htonl(UINT32_MAX); npi->total_len = htons(pin->packet_len); npi->reason = encode_packet_in_reason(pin->reason, version); npi->table_id = pin->table_id; npi->cookie = pin->cookie; npi->match_len = htons(match_len); return msg; } static struct ofpbuf * ofputil_encode_nx_packet_in2(const struct ofputil_packet_in_private *pin, enum ofp_version version, size_t include_bytes) { /* 'extra' is just an estimate of the space required. */ size_t extra = (pin->public.packet_len + NXM_TYPICAL_LEN /* flow_metadata */ + pin->stack_size * 4 + pin->actions_len + pin->action_set_len + 256); /* fudge factor */ struct ofpbuf *msg = ofpraw_alloc_xid(OFPRAW_NXT_PACKET_IN2, version, htonl(0), extra); ofputil_put_packet_in_private(pin, version, include_bytes, msg); if (pin->public.userdata_len) { ofpprop_put(msg, NXPINT_USERDATA, pin->public.userdata, pin->public.userdata_len); } ofpmsg_update_length(msg); return msg; } static struct ofpbuf * ofputil_encode_ofp11_packet_in(const struct ofputil_packet_in *pin) { struct ofp11_packet_in *opi; struct ofpbuf *msg; msg = ofpraw_alloc_xid(OFPRAW_OFPT11_PACKET_IN, OFP11_VERSION, htonl(0), pin->packet_len); opi = ofpbuf_put_zeros(msg, sizeof *opi); opi->buffer_id = htonl(UINT32_MAX); opi->in_port = ofputil_port_to_ofp11( pin->flow_metadata.flow.in_port.ofp_port); opi->in_phy_port = opi->in_port; opi->total_len = htons(pin->packet_len); opi->reason = encode_packet_in_reason(pin->reason, OFP11_VERSION); opi->table_id = pin->table_id; return msg; } static struct ofpbuf * ofputil_encode_ofp12_packet_in(const struct ofputil_packet_in *pin, enum ofp_version version) { enum ofpraw raw = (version >= OFP13_VERSION ? OFPRAW_OFPT13_PACKET_IN : OFPRAW_OFPT12_PACKET_IN); struct ofpbuf *msg; /* The final argument is just an estimate of the space required. */ msg = ofpraw_alloc_xid(raw, version, htonl(0), NXM_TYPICAL_LEN + 2 + pin->packet_len); struct ofp12_packet_in *opi = ofpbuf_put_zeros(msg, sizeof *opi); opi->buffer_id = htonl(UINT32_MAX); opi->total_len = htons(pin->packet_len); opi->reason = encode_packet_in_reason(pin->reason, version); opi->table_id = pin->table_id; if (version >= OFP13_VERSION) { ovs_be64 cookie = pin->cookie; ofpbuf_put(msg, &cookie, sizeof cookie); } oxm_put_match(msg, &pin->flow_metadata, version); ofpbuf_put_zeros(msg, 2); return msg; } /* Converts abstract ofputil_packet_in_private 'pin' into a PACKET_IN message * for 'protocol', using the packet-in format specified by 'packet_in_format'. * * This function is really meant only for use by ovs-vswitchd. To any other * code, the "continuation" data, i.e. the data that is in struct * ofputil_packet_in_private but not in struct ofputil_packet_in, is supposed * to be opaque (and it might change from one OVS version to another). Thus, * if any other code wants to encode a packet-in, it should use a non-"private" * version of this function. (Such a version doesn't currently exist because * only ovs-vswitchd currently wants to encode packet-ins. If you need one, * write it...) */ struct ofpbuf * ofputil_encode_packet_in_private(const struct ofputil_packet_in_private *pin, enum ofputil_protocol protocol, enum nx_packet_in_format packet_in_format) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *msg; switch (packet_in_format) { case NXPIF_STANDARD: switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: msg = ofputil_encode_ofp10_packet_in(&pin->public); break; case OFPUTIL_P_OF11_STD: msg = ofputil_encode_ofp11_packet_in(&pin->public); break; case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: msg = ofputil_encode_ofp12_packet_in(&pin->public, version); break; default: OVS_NOT_REACHED(); } break; case NXPIF_NXT_PACKET_IN: msg = ofputil_encode_nx_packet_in(&pin->public, version); break; case NXPIF_NXT_PACKET_IN2: return ofputil_encode_nx_packet_in2(pin, version, pin->public.packet_len); default: OVS_NOT_REACHED(); } ofpbuf_put(msg, pin->public.packet, pin->public.packet_len); ofpmsg_update_length(msg); return msg; } /* Returns a string form of 'reason'. The return value is either a statically * allocated constant string or the 'bufsize'-byte buffer 'reasonbuf'. * 'bufsize' should be at least OFPUTIL_PACKET_IN_REASON_BUFSIZE. */ const char * ofputil_packet_in_reason_to_string(enum ofp_packet_in_reason reason, char *reasonbuf, size_t bufsize) { switch (reason) { case OFPR_NO_MATCH: return "no_match"; case OFPR_ACTION: return "action"; case OFPR_INVALID_TTL: return "invalid_ttl"; case OFPR_ACTION_SET: return "action_set"; case OFPR_GROUP: return "group"; case OFPR_PACKET_OUT: return "packet_out"; case OFPR_EXPLICIT_MISS: case OFPR_IMPLICIT_MISS: return ""; case OFPR_N_REASONS: default: snprintf(reasonbuf, bufsize, "%d", (int) reason); return reasonbuf; } } bool ofputil_packet_in_reason_from_string(const char *s, enum ofp_packet_in_reason *reason) { int i; for (i = 0; i < OFPR_N_REASONS; i++) { char reasonbuf[OFPUTIL_PACKET_IN_REASON_BUFSIZE]; const char *reason_s; reason_s = ofputil_packet_in_reason_to_string(i, reasonbuf, sizeof reasonbuf); if (!strcasecmp(s, reason_s)) { *reason = i; return true; } } return false; } /* Returns a newly allocated NXT_RESUME message for 'pin', with the given * 'continuation', for 'protocol'. This message is suitable for resuming the * pipeline traveral of the packet represented by 'pin', if sent to the switch * from which 'pin' was received. */ struct ofpbuf * ofputil_encode_resume(const struct ofputil_packet_in *pin, const struct ofpbuf *continuation, enum ofputil_protocol protocol) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); size_t extra = pin->packet_len + NXM_TYPICAL_LEN + continuation->size; struct ofpbuf *msg = ofpraw_alloc_xid(OFPRAW_NXT_RESUME, version, 0, extra); ofputil_put_packet_in(pin, version, pin->packet_len, msg); ofpprop_put_nested(msg, NXPINT_CONTINUATION, continuation); ofpmsg_update_length(msg); return msg; } static enum ofperr parse_stack_prop(const struct ofpbuf *property, struct ofpbuf *stack) { unsigned int len = ofpbuf_msgsize(property); if (len > sizeof(union mf_subvalue)) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXCPT_STACK property has bad length %u", len); return OFPERR_OFPBPC_BAD_LEN; } nx_stack_push_bottom(stack, property->msg, len); return 0; } static enum ofperr parse_actions_property(struct ofpbuf *property, enum ofp_version version, struct ofpbuf *ofpacts) { if (!ofpbuf_try_pull(property, ROUND_UP(ofpbuf_headersize(property), 8))) { VLOG_WARN_RL(&bad_ofmsg_rl, "actions property has bad length %"PRIu32, property->size); return OFPERR_OFPBPC_BAD_LEN; } return ofpacts_pull_openflow_actions(property, property->size, version, NULL, NULL, ofpacts); } /* This is like ofputil_decode_packet_in(), except that it decodes the * continuation data into 'pin'. The format of this data is supposed to be * opaque to any process other than ovs-vswitchd, so this function should not * be used outside ovs-vswitchd. * * 'vl_mff_map' is an optional parameter that is used to validate the length * of variable length mf_fields in 'match'. If it is not provided, the * default mf_fields with maximum length will be used. * * When successful, 'pin' contains some dynamically allocated data. Call * ofputil_packet_in_private_destroy() to free this data. */ enum ofperr ofputil_decode_packet_in_private(const struct ofp_header *oh, bool loose, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofputil_packet_in_private *pin, size_t *total_len, uint32_t *buffer_id) { memset(pin, 0, sizeof *pin); struct ofpbuf continuation; enum ofperr error; error = ofputil_decode_packet_in(oh, loose, tun_table, vl_mff_map, &pin->public, total_len, buffer_id, &continuation); if (error) { return error; } struct ofpbuf actions, action_set; ofpbuf_init(&actions, 0); ofpbuf_init(&action_set, 0); uint8_t table_id = 0; ovs_be64 cookie = 0; struct ofpbuf stack; ofpbuf_init(&stack, 0); while (continuation.size > 0) { struct ofpbuf payload; uint64_t type; error = ofpprop_pull(&continuation, &payload, &type); if (error) { break; } switch (type) { case NXCPT_BRIDGE: error = ofpprop_parse_uuid(&payload, &pin->bridge); break; case NXCPT_STACK: error = parse_stack_prop(&payload, &stack); break; case NXCPT_MIRRORS: error = ofpprop_parse_u32(&payload, &pin->mirrors); break; case NXCPT_CONNTRACKED: pin->conntracked = true; break; case NXCPT_TABLE_ID: error = ofpprop_parse_u8(&payload, &table_id); break; case NXCPT_COOKIE: error = ofpprop_parse_be64(&payload, &cookie); break; case NXCPT_ACTIONS: { struct ofpact_unroll_xlate *unroll = ofpact_put_UNROLL_XLATE(&actions); unroll->rule_table_id = table_id; unroll->rule_cookie = cookie; error = parse_actions_property(&payload, oh->version, &actions); break; } case NXCPT_ACTION_SET: error = parse_actions_property(&payload, oh->version, &action_set); break; default: error = OFPPROP_UNKNOWN(loose, "continuation", type); break; } if (error) { break; } } pin->actions_len = actions.size; pin->actions = ofpbuf_steal_data(&actions); pin->action_set_len = action_set.size; pin->action_set = ofpbuf_steal_data(&action_set); pin->stack_size = stack.size; pin->stack = ofpbuf_steal_data(&stack); if (error) { ofputil_packet_in_private_destroy(pin); } return error; } /* Frees data in 'pin' that is dynamically allocated by * ofputil_decode_packet_in_private(). * * 'pin->public' contains some pointer members that * ofputil_decode_packet_in_private() doesn't initialize to newly allocated * data, so this function doesn't free those. */ void ofputil_packet_in_private_destroy(struct ofputil_packet_in_private *pin) { if (pin) { free(pin->stack); free(pin->actions); free(pin->action_set); } } /* Converts an OFPT_PACKET_OUT in 'opo' into an abstract ofputil_packet_out in * 'po'. * * Uses 'ofpacts' to store the abstract OFPACT_* version of the packet out * message's actions. The caller must initialize 'ofpacts' and retains * ownership of it. 'po->ofpacts' will point into the 'ofpacts' buffer. * * 'po->packet' refers to the packet data in 'oh', so the buffer containing * 'oh' must not be destroyed while 'po' is being used. * * Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_packet_out(struct ofputil_packet_out *po, const struct ofp_header *oh, struct ofpbuf *ofpacts) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); ofpbuf_clear(ofpacts); if (raw == OFPRAW_OFPT11_PACKET_OUT) { enum ofperr error; const struct ofp11_packet_out *opo = ofpbuf_pull(&b, sizeof *opo); po->buffer_id = ntohl(opo->buffer_id); error = ofputil_port_from_ofp11(opo->in_port, &po->in_port); if (error) { return error; } error = ofpacts_pull_openflow_actions(&b, ntohs(opo->actions_len), oh->version, NULL, NULL, ofpacts); if (error) { return error; } } else if (raw == OFPRAW_OFPT10_PACKET_OUT) { enum ofperr error; const struct ofp10_packet_out *opo = ofpbuf_pull(&b, sizeof *opo); po->buffer_id = ntohl(opo->buffer_id); po->in_port = u16_to_ofp(ntohs(opo->in_port)); error = ofpacts_pull_openflow_actions(&b, ntohs(opo->actions_len), oh->version, NULL, NULL, ofpacts); if (error) { return error; } } else { OVS_NOT_REACHED(); } if (ofp_to_u16(po->in_port) >= ofp_to_u16(OFPP_MAX) && po->in_port != OFPP_LOCAL && po->in_port != OFPP_NONE && po->in_port != OFPP_CONTROLLER) { VLOG_WARN_RL(&bad_ofmsg_rl, "packet-out has bad input port %#"PRIx32, po->in_port); return OFPERR_OFPBRC_BAD_PORT; } po->ofpacts = ofpacts->data; po->ofpacts_len = ofpacts->size; if (po->buffer_id == UINT32_MAX) { po->packet = b.data; po->packet_len = b.size; } else { po->packet = NULL; po->packet_len = 0; } return 0; } /* ofputil_phy_port */ /* NETDEV_F_* to and from OFPPF_* and OFPPF10_*. */ BUILD_ASSERT_DECL((int) NETDEV_F_10MB_HD == OFPPF_10MB_HD); /* bit 0 */ BUILD_ASSERT_DECL((int) NETDEV_F_10MB_FD == OFPPF_10MB_FD); /* bit 1 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_HD == OFPPF_100MB_HD); /* bit 2 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_FD == OFPPF_100MB_FD); /* bit 3 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_HD == OFPPF_1GB_HD); /* bit 4 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_FD == OFPPF_1GB_FD); /* bit 5 */ BUILD_ASSERT_DECL((int) NETDEV_F_10GB_FD == OFPPF_10GB_FD); /* bit 6 */ /* NETDEV_F_ bits 11...15 are OFPPF10_ bits 7...11: */ BUILD_ASSERT_DECL((int) NETDEV_F_COPPER == (OFPPF10_COPPER << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_FIBER == (OFPPF10_FIBER << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_AUTONEG == (OFPPF10_AUTONEG << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE == (OFPPF10_PAUSE << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE_ASYM == (OFPPF10_PAUSE_ASYM << 4)); static enum netdev_features netdev_port_features_from_ofp10(ovs_be32 ofp10_) { uint32_t ofp10 = ntohl(ofp10_); return (ofp10 & 0x7f) | ((ofp10 & 0xf80) << 4); } static ovs_be32 netdev_port_features_to_ofp10(enum netdev_features features) { return htonl((features & 0x7f) | ((features & 0xf800) >> 4)); } BUILD_ASSERT_DECL((int) NETDEV_F_10MB_HD == OFPPF_10MB_HD); /* bit 0 */ BUILD_ASSERT_DECL((int) NETDEV_F_10MB_FD == OFPPF_10MB_FD); /* bit 1 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_HD == OFPPF_100MB_HD); /* bit 2 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_FD == OFPPF_100MB_FD); /* bit 3 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_HD == OFPPF_1GB_HD); /* bit 4 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_FD == OFPPF_1GB_FD); /* bit 5 */ BUILD_ASSERT_DECL((int) NETDEV_F_10GB_FD == OFPPF_10GB_FD); /* bit 6 */ BUILD_ASSERT_DECL((int) NETDEV_F_40GB_FD == OFPPF11_40GB_FD); /* bit 7 */ BUILD_ASSERT_DECL((int) NETDEV_F_100GB_FD == OFPPF11_100GB_FD); /* bit 8 */ BUILD_ASSERT_DECL((int) NETDEV_F_1TB_FD == OFPPF11_1TB_FD); /* bit 9 */ BUILD_ASSERT_DECL((int) NETDEV_F_OTHER == OFPPF11_OTHER); /* bit 10 */ BUILD_ASSERT_DECL((int) NETDEV_F_COPPER == OFPPF11_COPPER); /* bit 11 */ BUILD_ASSERT_DECL((int) NETDEV_F_FIBER == OFPPF11_FIBER); /* bit 12 */ BUILD_ASSERT_DECL((int) NETDEV_F_AUTONEG == OFPPF11_AUTONEG); /* bit 13 */ BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE == OFPPF11_PAUSE); /* bit 14 */ BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE_ASYM == OFPPF11_PAUSE_ASYM);/* bit 15 */ static enum netdev_features netdev_port_features_from_ofp11(ovs_be32 ofp11) { return ntohl(ofp11) & 0xffff; } static ovs_be32 netdev_port_features_to_ofp11(enum netdev_features features) { return htonl(features & 0xffff); } static enum ofperr ofputil_decode_ofp10_phy_port(struct ofputil_phy_port *pp, const struct ofp10_phy_port *opp) { pp->port_no = u16_to_ofp(ntohs(opp->port_no)); pp->hw_addr = opp->hw_addr; ovs_strlcpy(pp->name, opp->name, OFP_MAX_PORT_NAME_LEN); pp->config = ntohl(opp->config) & OFPPC10_ALL; pp->state = ntohl(opp->state) & OFPPS10_ALL; pp->curr = netdev_port_features_from_ofp10(opp->curr); pp->advertised = netdev_port_features_from_ofp10(opp->advertised); pp->supported = netdev_port_features_from_ofp10(opp->supported); pp->peer = netdev_port_features_from_ofp10(opp->peer); pp->curr_speed = netdev_features_to_bps(pp->curr, 0) / 1000; pp->max_speed = netdev_features_to_bps(pp->supported, 0) / 1000; return 0; } static enum ofperr ofputil_decode_ofp11_port(struct ofputil_phy_port *pp, const struct ofp11_port *op) { enum ofperr error; error = ofputil_port_from_ofp11(op->port_no, &pp->port_no); if (error) { return error; } pp->hw_addr = op->hw_addr; ovs_strlcpy(pp->name, op->name, OFP_MAX_PORT_NAME_LEN); pp->config = ntohl(op->config) & OFPPC11_ALL; pp->state = ntohl(op->state) & OFPPS11_ALL; pp->curr = netdev_port_features_from_ofp11(op->curr); pp->advertised = netdev_port_features_from_ofp11(op->advertised); pp->supported = netdev_port_features_from_ofp11(op->supported); pp->peer = netdev_port_features_from_ofp11(op->peer); pp->curr_speed = ntohl(op->curr_speed); pp->max_speed = ntohl(op->max_speed); return 0; } static enum ofperr parse_ofp14_port_ethernet_property(const struct ofpbuf *payload, struct ofputil_phy_port *pp) { struct ofp14_port_desc_prop_ethernet *eth = payload->data; if (payload->size != sizeof *eth) { return OFPERR_OFPBPC_BAD_LEN; } pp->curr = netdev_port_features_from_ofp11(eth->curr); pp->advertised = netdev_port_features_from_ofp11(eth->advertised); pp->supported = netdev_port_features_from_ofp11(eth->supported); pp->peer = netdev_port_features_from_ofp11(eth->peer); pp->curr_speed = ntohl(eth->curr_speed); pp->max_speed = ntohl(eth->max_speed); return 0; } static enum ofperr ofputil_pull_ofp14_port(struct ofputil_phy_port *pp, struct ofpbuf *msg) { struct ofp14_port *op = ofpbuf_try_pull(msg, sizeof *op); if (!op) { return OFPERR_OFPBRC_BAD_LEN; } size_t len = ntohs(op->length); if (len < sizeof *op || len - sizeof *op > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } len -= sizeof *op; enum ofperr error = ofputil_port_from_ofp11(op->port_no, &pp->port_no); if (error) { return error; } pp->hw_addr = op->hw_addr; ovs_strlcpy(pp->name, op->name, OFP_MAX_PORT_NAME_LEN); pp->config = ntohl(op->config) & OFPPC11_ALL; pp->state = ntohl(op->state) & OFPPS11_ALL; struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPPDPT14_ETHERNET: error = parse_ofp14_port_ethernet_property(&payload, pp); break; default: error = OFPPROP_UNKNOWN(true, "port", type); break; } if (error) { return error; } } return 0; } static void ofputil_encode_ofp10_phy_port(const struct ofputil_phy_port *pp, struct ofp10_phy_port *opp) { memset(opp, 0, sizeof *opp); opp->port_no = htons(ofp_to_u16(pp->port_no)); opp->hw_addr = pp->hw_addr; ovs_strlcpy(opp->name, pp->name, OFP_MAX_PORT_NAME_LEN); opp->config = htonl(pp->config & OFPPC10_ALL); opp->state = htonl(pp->state & OFPPS10_ALL); opp->curr = netdev_port_features_to_ofp10(pp->curr); opp->advertised = netdev_port_features_to_ofp10(pp->advertised); opp->supported = netdev_port_features_to_ofp10(pp->supported); opp->peer = netdev_port_features_to_ofp10(pp->peer); } static void ofputil_encode_ofp11_port(const struct ofputil_phy_port *pp, struct ofp11_port *op) { memset(op, 0, sizeof *op); op->port_no = ofputil_port_to_ofp11(pp->port_no); op->hw_addr = pp->hw_addr; ovs_strlcpy(op->name, pp->name, OFP_MAX_PORT_NAME_LEN); op->config = htonl(pp->config & OFPPC11_ALL); op->state = htonl(pp->state & OFPPS11_ALL); op->curr = netdev_port_features_to_ofp11(pp->curr); op->advertised = netdev_port_features_to_ofp11(pp->advertised); op->supported = netdev_port_features_to_ofp11(pp->supported); op->peer = netdev_port_features_to_ofp11(pp->peer); op->curr_speed = htonl(pp->curr_speed); op->max_speed = htonl(pp->max_speed); } static void ofputil_put_ofp14_port(const struct ofputil_phy_port *pp, struct ofpbuf *b) { struct ofp14_port *op; struct ofp14_port_desc_prop_ethernet *eth; ofpbuf_prealloc_tailroom(b, sizeof *op + sizeof *eth); op = ofpbuf_put_zeros(b, sizeof *op); op->port_no = ofputil_port_to_ofp11(pp->port_no); op->length = htons(sizeof *op + sizeof *eth); op->hw_addr = pp->hw_addr; ovs_strlcpy(op->name, pp->name, sizeof op->name); op->config = htonl(pp->config & OFPPC11_ALL); op->state = htonl(pp->state & OFPPS11_ALL); eth = ofpprop_put_zeros(b, OFPPDPT14_ETHERNET, sizeof *eth); eth->curr = netdev_port_features_to_ofp11(pp->curr); eth->advertised = netdev_port_features_to_ofp11(pp->advertised); eth->supported = netdev_port_features_to_ofp11(pp->supported); eth->peer = netdev_port_features_to_ofp11(pp->peer); eth->curr_speed = htonl(pp->curr_speed); eth->max_speed = htonl(pp->max_speed); } static void ofputil_put_phy_port(enum ofp_version ofp_version, const struct ofputil_phy_port *pp, struct ofpbuf *b) { switch (ofp_version) { case OFP10_VERSION: { struct ofp10_phy_port *opp = ofpbuf_put_uninit(b, sizeof *opp); ofputil_encode_ofp10_phy_port(pp, opp); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { struct ofp11_port *op = ofpbuf_put_uninit(b, sizeof *op); ofputil_encode_ofp11_port(pp, op); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: ofputil_put_ofp14_port(pp, b); break; default: OVS_NOT_REACHED(); } } enum ofperr ofputil_decode_port_desc_stats_request(const struct ofp_header *request, ofp_port_t *port) { struct ofpbuf b = ofpbuf_const_initializer(request, ntohs(request->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPST10_PORT_DESC_REQUEST) { *port = OFPP_ANY; return 0; } else if (raw == OFPRAW_OFPST15_PORT_DESC_REQUEST) { ovs_be32 *ofp11_port; ofp11_port = ofpbuf_pull(&b, sizeof *ofp11_port); return ofputil_port_from_ofp11(*ofp11_port, port); } else { OVS_NOT_REACHED(); } } struct ofpbuf * ofputil_encode_port_desc_stats_request(enum ofp_version ofp_version, ofp_port_t port) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: request = ofpraw_alloc(OFPRAW_OFPST10_PORT_DESC_REQUEST, ofp_version, 0); break; case OFP15_VERSION: case OFP16_VERSION:{ struct ofp15_port_desc_request *req; request = ofpraw_alloc(OFPRAW_OFPST15_PORT_DESC_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = ofputil_port_to_ofp11(port); break; } default: OVS_NOT_REACHED(); } return request; } void ofputil_append_port_desc_stats_reply(const struct ofputil_phy_port *pp, struct ovs_list *replies) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = reply->size; ofputil_put_phy_port(ofpmp_version(replies), pp, reply); ofpmp_postappend(replies, start_ofs); } /* ofputil_switch_config */ /* Decodes 'oh', which must be an OFPT_GET_CONFIG_REPLY or OFPT_SET_CONFIG * message, into 'config'. Returns false if 'oh' contained any flags that * aren't specified in its version of OpenFlow, true otherwise. */ static bool ofputil_decode_switch_config(const struct ofp_header *oh, struct ofputil_switch_config *config) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); const struct ofp_switch_config *osc = ofpbuf_pull(&b, sizeof *osc); config->frag = ntohs(osc->flags) & OFPC_FRAG_MASK; config->miss_send_len = ntohs(osc->miss_send_len); ovs_be16 valid_mask = htons(OFPC_FRAG_MASK); if (oh->version < OFP13_VERSION) { const ovs_be16 ttl_bit = htons(OFPC_INVALID_TTL_TO_CONTROLLER); valid_mask |= ttl_bit; config->invalid_ttl_to_controller = (osc->flags & ttl_bit) != 0; } else { config->invalid_ttl_to_controller = -1; } return !(osc->flags & ~valid_mask); } void ofputil_decode_get_config_reply(const struct ofp_header *oh, struct ofputil_switch_config *config) { ofputil_decode_switch_config(oh, config); } enum ofperr ofputil_decode_set_config(const struct ofp_header *oh, struct ofputil_switch_config *config) { return (ofputil_decode_switch_config(oh, config) ? 0 : OFPERR_OFPSCFC_BAD_FLAGS); } static struct ofpbuf * ofputil_put_switch_config(const struct ofputil_switch_config *config, struct ofpbuf *b) { const struct ofp_header *oh = b->data; struct ofp_switch_config *osc = ofpbuf_put_zeros(b, sizeof *osc); osc->flags = htons(config->frag); if (config->invalid_ttl_to_controller > 0 && oh->version < OFP13_VERSION) { osc->flags |= htons(OFPC_INVALID_TTL_TO_CONTROLLER); } osc->miss_send_len = htons(config->miss_send_len); return b; } struct ofpbuf * ofputil_encode_get_config_reply(const struct ofp_header *request, const struct ofputil_switch_config *config) { struct ofpbuf *b = ofpraw_alloc_reply(OFPRAW_OFPT_GET_CONFIG_REPLY, request, 0); return ofputil_put_switch_config(config, b); } struct ofpbuf * ofputil_encode_set_config(const struct ofputil_switch_config *config, enum ofp_version version) { struct ofpbuf *b = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, version, 0); return ofputil_put_switch_config(config, b); } /* ofputil_switch_features */ #define OFPC_COMMON (OFPC_FLOW_STATS | OFPC_TABLE_STATS | OFPC_PORT_STATS | \ OFPC_IP_REASM | OFPC_QUEUE_STATS) BUILD_ASSERT_DECL((int) OFPUTIL_C_FLOW_STATS == OFPC_FLOW_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_TABLE_STATS == OFPC_TABLE_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_PORT_STATS == OFPC_PORT_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_IP_REASM == OFPC_IP_REASM); BUILD_ASSERT_DECL((int) OFPUTIL_C_QUEUE_STATS == OFPC_QUEUE_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_ARP_MATCH_IP == OFPC_ARP_MATCH_IP); BUILD_ASSERT_DECL((int) OFPUTIL_C_PORT_BLOCKED == OFPC12_PORT_BLOCKED); BUILD_ASSERT_DECL((int) OFPUTIL_C_BUNDLES == OFPC14_BUNDLES); BUILD_ASSERT_DECL((int) OFPUTIL_C_FLOW_MONITORING == OFPC14_FLOW_MONITORING); static uint32_t ofputil_capabilities_mask(enum ofp_version ofp_version) { /* Handle capabilities whose bit is unique for all OpenFlow versions */ switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: return OFPC_COMMON | OFPC_ARP_MATCH_IP; case OFP12_VERSION: case OFP13_VERSION: return OFPC_COMMON | OFPC12_PORT_BLOCKED; case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: return OFPC_COMMON | OFPC12_PORT_BLOCKED | OFPC14_BUNDLES | OFPC14_FLOW_MONITORING; default: /* Caller needs to check osf->header.version itself */ return 0; } } /* Pulls an OpenFlow "switch_features" structure from 'b' and decodes it into * an abstract representation in '*features', readying 'b' to iterate over the * OpenFlow port structures following 'osf' with later calls to * ofputil_pull_phy_port(). Returns 0 if successful, otherwise an OFPERR_* * value. */ enum ofperr ofputil_pull_switch_features(struct ofpbuf *b, struct ofputil_switch_features *features) { const struct ofp_header *oh = b->data; enum ofpraw raw = ofpraw_pull_assert(b); const struct ofp_switch_features *osf = ofpbuf_pull(b, sizeof *osf); features->datapath_id = ntohll(osf->datapath_id); features->n_buffers = ntohl(osf->n_buffers); features->n_tables = osf->n_tables; features->auxiliary_id = 0; features->capabilities = ntohl(osf->capabilities) & ofputil_capabilities_mask(oh->version); if (raw == OFPRAW_OFPT10_FEATURES_REPLY) { if (osf->capabilities & htonl(OFPC10_STP)) { features->capabilities |= OFPUTIL_C_STP; } features->ofpacts = ofpact_bitmap_from_openflow(osf->actions, OFP10_VERSION); } else if (raw == OFPRAW_OFPT11_FEATURES_REPLY || raw == OFPRAW_OFPT13_FEATURES_REPLY) { if (osf->capabilities & htonl(OFPC11_GROUP_STATS)) { features->capabilities |= OFPUTIL_C_GROUP_STATS; } features->ofpacts = 0; if (raw == OFPRAW_OFPT13_FEATURES_REPLY) { features->auxiliary_id = osf->auxiliary_id; } } else { return OFPERR_OFPBRC_BAD_VERSION; } return 0; } /* In OpenFlow 1.0, 1.1, and 1.2, an OFPT_FEATURES_REPLY message lists all the * switch's ports, unless there are too many to fit. In OpenFlow 1.3 and * later, an OFPT_FEATURES_REPLY does not list ports at all. * * Given a buffer 'b' that contains a Features Reply message, this message * checks if it contains a complete list of the switch's ports. Returns true, * if so. Returns false if the list is missing (OF1.3+) or incomplete * (OF1.0/1.1/1.2), and in the latter case removes all of the ports from the * message. * * When this function returns false, the caller should send an OFPST_PORT_DESC * stats request to get the ports. */ bool ofputil_switch_features_has_ports(struct ofpbuf *b) { struct ofp_header *oh = b->data; size_t phy_port_size; if (oh->version >= OFP13_VERSION) { /* OpenFlow 1.3+ never has ports in the feature reply. */ return false; } phy_port_size = (oh->version == OFP10_VERSION ? sizeof(struct ofp10_phy_port) : sizeof(struct ofp11_port)); if (ntohs(oh->length) + phy_port_size <= UINT16_MAX) { /* There's room for additional ports in the feature reply. * Assume that the list is complete. */ return true; } /* The feature reply has no room for more ports. Probably the list is * truncated. Drop the ports and tell the caller to retrieve them with * OFPST_PORT_DESC. */ b->size = sizeof *oh + sizeof(struct ofp_switch_features); ofpmsg_update_length(b); return false; } /* Returns a buffer owned by the caller that encodes 'features' in the format * required by 'protocol' with the given 'xid'. The caller should append port * information to the buffer with subsequent calls to * ofputil_put_switch_features_port(). */ struct ofpbuf * ofputil_encode_switch_features(const struct ofputil_switch_features *features, enum ofputil_protocol protocol, ovs_be32 xid) { struct ofp_switch_features *osf; struct ofpbuf *b; enum ofp_version version; enum ofpraw raw; version = ofputil_protocol_to_ofp_version(protocol); switch (version) { case OFP10_VERSION: raw = OFPRAW_OFPT10_FEATURES_REPLY; break; case OFP11_VERSION: case OFP12_VERSION: raw = OFPRAW_OFPT11_FEATURES_REPLY; break; case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: raw = OFPRAW_OFPT13_FEATURES_REPLY; break; default: OVS_NOT_REACHED(); } b = ofpraw_alloc_xid(raw, version, xid, 0); osf = ofpbuf_put_zeros(b, sizeof *osf); osf->datapath_id = htonll(features->datapath_id); osf->n_buffers = htonl(features->n_buffers); osf->n_tables = features->n_tables; osf->capabilities = htonl(features->capabilities & ofputil_capabilities_mask(version)); switch (version) { case OFP10_VERSION: if (features->capabilities & OFPUTIL_C_STP) { osf->capabilities |= htonl(OFPC10_STP); } osf->actions = ofpact_bitmap_to_openflow(features->ofpacts, OFP10_VERSION); break; case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: osf->auxiliary_id = features->auxiliary_id; /* fall through */ case OFP11_VERSION: case OFP12_VERSION: if (features->capabilities & OFPUTIL_C_GROUP_STATS) { osf->capabilities |= htonl(OFPC11_GROUP_STATS); } break; default: OVS_NOT_REACHED(); } return b; } /* Encodes 'pp' into the format required by the switch_features message already * in 'b', which should have been returned by ofputil_encode_switch_features(), * and appends the encoded version to 'b'. */ void ofputil_put_switch_features_port(const struct ofputil_phy_port *pp, struct ofpbuf *b) { const struct ofp_header *oh = b->data; if (oh->version < OFP13_VERSION) { /* Try adding a port description to the message, but drop it again if * the buffer overflows. (This possibility for overflow is why * OpenFlow 1.3+ moved port descriptions into a multipart message.) */ size_t start_ofs = b->size; ofputil_put_phy_port(oh->version, pp, b); if (b->size > UINT16_MAX) { b->size = start_ofs; } } } /* ofputil_port_status */ /* Decodes the OpenFlow "port status" message in '*ops' into an abstract form * in '*ps'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_port_status(const struct ofp_header *oh, struct ofputil_port_status *ps) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); const struct ofp_port_status *ops = ofpbuf_pull(&b, sizeof *ops); if (ops->reason != OFPPR_ADD && ops->reason != OFPPR_DELETE && ops->reason != OFPPR_MODIFY) { return OFPERR_NXBRC_BAD_REASON; } ps->reason = ops->reason; int retval = ofputil_pull_phy_port(oh->version, &b, &ps->desc); ovs_assert(retval != EOF); return retval; } /* Converts the abstract form of a "port status" message in '*ps' into an * OpenFlow message suitable for 'protocol', and returns that encoded form in * a buffer owned by the caller. */ struct ofpbuf * ofputil_encode_port_status(const struct ofputil_port_status *ps, enum ofputil_protocol protocol) { struct ofp_port_status *ops; struct ofpbuf *b; enum ofp_version version; enum ofpraw raw; version = ofputil_protocol_to_ofp_version(protocol); switch (version) { case OFP10_VERSION: raw = OFPRAW_OFPT10_PORT_STATUS; break; case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: raw = OFPRAW_OFPT11_PORT_STATUS; break; case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: raw = OFPRAW_OFPT14_PORT_STATUS; break; default: OVS_NOT_REACHED(); } b = ofpraw_alloc_xid(raw, version, htonl(0), 0); ops = ofpbuf_put_zeros(b, sizeof *ops); ops->reason = ps->reason; ofputil_put_phy_port(version, &ps->desc, b); ofpmsg_update_length(b); return b; } /* ofputil_port_mod */ static enum ofperr parse_port_mod_ethernet_property(struct ofpbuf *property, struct ofputil_port_mod *pm) { ovs_be32 advertise; enum ofperr error; error = ofpprop_parse_be32(property, &advertise); if (!error) { pm->advertise = netdev_port_features_from_ofp11(advertise); } return error; } /* Decodes the OpenFlow "port mod" message in '*oh' into an abstract form in * '*pm'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_port_mod(const struct ofp_header *oh, struct ofputil_port_mod *pm, bool loose) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT10_PORT_MOD) { const struct ofp10_port_mod *opm = b.data; pm->port_no = u16_to_ofp(ntohs(opm->port_no)); pm->hw_addr = opm->hw_addr; pm->config = ntohl(opm->config) & OFPPC10_ALL; pm->mask = ntohl(opm->mask) & OFPPC10_ALL; pm->advertise = netdev_port_features_from_ofp10(opm->advertise); } else if (raw == OFPRAW_OFPT11_PORT_MOD) { const struct ofp11_port_mod *opm = b.data; enum ofperr error; error = ofputil_port_from_ofp11(opm->port_no, &pm->port_no); if (error) { return error; } pm->hw_addr = opm->hw_addr; pm->config = ntohl(opm->config) & OFPPC11_ALL; pm->mask = ntohl(opm->mask) & OFPPC11_ALL; pm->advertise = netdev_port_features_from_ofp11(opm->advertise); } else if (raw == OFPRAW_OFPT14_PORT_MOD) { const struct ofp14_port_mod *opm = ofpbuf_pull(&b, sizeof *opm); enum ofperr error; memset(pm, 0, sizeof *pm); error = ofputil_port_from_ofp11(opm->port_no, &pm->port_no); if (error) { return error; } pm->hw_addr = opm->hw_addr; pm->config = ntohl(opm->config) & OFPPC11_ALL; pm->mask = ntohl(opm->mask) & OFPPC11_ALL; while (b.size > 0) { struct ofpbuf property; enum ofperr error; uint64_t type; error = ofpprop_pull(&b, &property, &type); if (error) { return error; } switch (type) { case OFPPMPT14_ETHERNET: error = parse_port_mod_ethernet_property(&property, pm); break; default: error = OFPPROP_UNKNOWN(loose, "port_mod", type); break; } if (error) { return error; } } } else { return OFPERR_OFPBRC_BAD_TYPE; } pm->config &= pm->mask; return 0; } /* Converts the abstract form of a "port mod" message in '*pm' into an OpenFlow * message suitable for 'protocol', and returns that encoded form in a buffer * owned by the caller. */ struct ofpbuf * ofputil_encode_port_mod(const struct ofputil_port_mod *pm, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *b; switch (ofp_version) { case OFP10_VERSION: { struct ofp10_port_mod *opm; b = ofpraw_alloc(OFPRAW_OFPT10_PORT_MOD, ofp_version, 0); opm = ofpbuf_put_zeros(b, sizeof *opm); opm->port_no = htons(ofp_to_u16(pm->port_no)); opm->hw_addr = pm->hw_addr; opm->config = htonl(pm->config & OFPPC10_ALL); opm->mask = htonl(pm->mask & OFPPC10_ALL); opm->advertise = netdev_port_features_to_ofp10(pm->advertise); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { struct ofp11_port_mod *opm; b = ofpraw_alloc(OFPRAW_OFPT11_PORT_MOD, ofp_version, 0); opm = ofpbuf_put_zeros(b, sizeof *opm); opm->port_no = ofputil_port_to_ofp11(pm->port_no); opm->hw_addr = pm->hw_addr; opm->config = htonl(pm->config & OFPPC11_ALL); opm->mask = htonl(pm->mask & OFPPC11_ALL); opm->advertise = netdev_port_features_to_ofp11(pm->advertise); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp14_port_mod *opm; b = ofpraw_alloc(OFPRAW_OFPT14_PORT_MOD, ofp_version, 0); opm = ofpbuf_put_zeros(b, sizeof *opm); opm->port_no = ofputil_port_to_ofp11(pm->port_no); opm->hw_addr = pm->hw_addr; opm->config = htonl(pm->config & OFPPC11_ALL); opm->mask = htonl(pm->mask & OFPPC11_ALL); if (pm->advertise) { ofpprop_put_be32(b, OFPPMPT14_ETHERNET, netdev_port_features_to_ofp11(pm->advertise)); } break; } default: OVS_NOT_REACHED(); } return b; } /* Table features. */ static enum ofperr pull_table_feature_property(struct ofpbuf *msg, struct ofpbuf *payload, uint64_t *typep) { enum ofperr error; error = ofpprop_pull(msg, payload, typep); if (payload && !error) { ofpbuf_pull(payload, (char *)payload->msg - (char *)payload->header); } return error; } static enum ofperr parse_action_bitmap(struct ofpbuf *payload, enum ofp_version ofp_version, uint64_t *ofpacts) { uint32_t types = 0; while (payload->size > 0) { enum ofperr error; uint64_t type; error = ofpprop_pull__(payload, NULL, 1, 0x10000, &type); if (error) { return error; } if (type < CHAR_BIT * sizeof types) { types |= 1u << type; } } *ofpacts = ofpact_bitmap_from_openflow(htonl(types), ofp_version); return 0; } static enum ofperr parse_instruction_ids(struct ofpbuf *payload, bool loose, uint32_t *insts) { *insts = 0; while (payload->size > 0) { enum ovs_instruction_type inst; enum ofperr error; uint64_t ofpit; /* OF1.3 and OF1.4 aren't clear about padding in the instruction IDs. * It seems clear that they aren't padded to 8 bytes, though, because * both standards say that "non-experimenter instructions are 4 bytes" * and do not mention any padding before the first instruction ID. * (There wouldn't be any point in padding to 8 bytes if the IDs were * aligned on an odd 4-byte boundary.) * * Anyway, we just assume they're all glommed together on byte * boundaries. */ error = ofpprop_pull__(payload, NULL, 1, 0x10000, &ofpit); if (error) { return error; } error = ovs_instruction_type_from_inst_type(&inst, ofpit); if (!error) { *insts |= 1u << inst; } else if (!loose) { return error; } } return 0; } static enum ofperr parse_table_features_next_table(struct ofpbuf *payload, unsigned long int *next_tables) { size_t i; memset(next_tables, 0, bitmap_n_bytes(255)); for (i = 0; i < payload->size; i++) { uint8_t id = ((const uint8_t *) payload->data)[i]; if (id >= 255) { return OFPERR_OFPBPC_BAD_VALUE; } bitmap_set1(next_tables, id); } return 0; } static enum ofperr parse_oxms(struct ofpbuf *payload, bool loose, struct mf_bitmap *exactp, struct mf_bitmap *maskedp) { struct mf_bitmap exact = MF_BITMAP_INITIALIZER; struct mf_bitmap masked = MF_BITMAP_INITIALIZER; while (payload->size > 0) { const struct mf_field *field; enum ofperr error; bool hasmask; error = nx_pull_header(payload, NULL, &field, &hasmask); if (!error) { bitmap_set1(hasmask ? masked.bm : exact.bm, field->id); } else if (error != OFPERR_OFPBMC_BAD_FIELD || !loose) { return error; } } if (exactp) { *exactp = exact; } else if (!bitmap_is_all_zeros(exact.bm, MFF_N_IDS)) { return OFPERR_OFPBMC_BAD_MASK; } if (maskedp) { *maskedp = masked; } else if (!bitmap_is_all_zeros(masked.bm, MFF_N_IDS)) { return OFPERR_OFPBMC_BAD_MASK; } return 0; } /* Converts an OFPMP_TABLE_FEATURES request or reply in 'msg' into an abstract * ofputil_table_features in 'tf'. * * If 'loose' is true, this function ignores properties and values that it does * not understand, as a controller would want to do when interpreting * capabilities provided by a switch. If 'loose' is false, this function * treats unknown properties and values as an error, as a switch would want to * do when interpreting a configuration request made by a controller. * * A single OpenFlow message can specify features for multiple tables. Calling * this function multiple times for a single 'msg' iterates through the tables * in the message. The caller must initially leave 'msg''s layer pointers null * and not modify them between calls. * * Returns 0 if successful, EOF if no tables were left in this 'msg', otherwise * a positive "enum ofperr" value. */ int ofputil_decode_table_features(struct ofpbuf *msg, struct ofputil_table_features *tf, bool loose) { memset(tf, 0, sizeof *tf); if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } const struct ofp_header *oh = msg->header; struct ofp13_table_features *otf = msg->data; if (msg->size < sizeof *otf) { return OFPERR_OFPBPC_BAD_LEN; } unsigned int len = ntohs(otf->length); if (len < sizeof *otf || len % 8 || len > msg->size) { return OFPERR_OFPBPC_BAD_LEN; } tf->table_id = otf->table_id; if (tf->table_id == OFPTT_ALL) { return OFPERR_OFPTFFC_BAD_TABLE; } ovs_strlcpy(tf->name, otf->name, OFP_MAX_TABLE_NAME_LEN); tf->metadata_match = otf->metadata_match; tf->metadata_write = otf->metadata_write; tf->miss_config = OFPUTIL_TABLE_MISS_DEFAULT; if (oh->version >= OFP14_VERSION) { uint32_t caps = ntohl(otf->capabilities); tf->supports_eviction = (caps & OFPTC14_EVICTION) != 0; tf->supports_vacancy_events = (caps & OFPTC14_VACANCY_EVENTS) != 0; } else { tf->supports_eviction = -1; tf->supports_vacancy_events = -1; } tf->max_entries = ntohl(otf->max_entries); struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); ofpbuf_pull(&properties, sizeof *otf); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = pull_table_feature_property(&properties, &payload, &type); if (error) { return error; } switch ((enum ofp13_table_feature_prop_type) type) { case OFPTFPT13_INSTRUCTIONS: error = parse_instruction_ids(&payload, loose, &tf->nonmiss.instructions); break; case OFPTFPT13_INSTRUCTIONS_MISS: error = parse_instruction_ids(&payload, loose, &tf->miss.instructions); break; case OFPTFPT13_NEXT_TABLES: error = parse_table_features_next_table(&payload, tf->nonmiss.next); break; case OFPTFPT13_NEXT_TABLES_MISS: error = parse_table_features_next_table(&payload, tf->miss.next); break; case OFPTFPT13_WRITE_ACTIONS: error = parse_action_bitmap(&payload, oh->version, &tf->nonmiss.write.ofpacts); break; case OFPTFPT13_WRITE_ACTIONS_MISS: error = parse_action_bitmap(&payload, oh->version, &tf->miss.write.ofpacts); break; case OFPTFPT13_APPLY_ACTIONS: error = parse_action_bitmap(&payload, oh->version, &tf->nonmiss.apply.ofpacts); break; case OFPTFPT13_APPLY_ACTIONS_MISS: error = parse_action_bitmap(&payload, oh->version, &tf->miss.apply.ofpacts); break; case OFPTFPT13_MATCH: error = parse_oxms(&payload, loose, &tf->match, &tf->mask); break; case OFPTFPT13_WILDCARDS: error = parse_oxms(&payload, loose, &tf->wildcard, NULL); break; case OFPTFPT13_WRITE_SETFIELD: error = parse_oxms(&payload, loose, &tf->nonmiss.write.set_fields, NULL); break; case OFPTFPT13_WRITE_SETFIELD_MISS: error = parse_oxms(&payload, loose, &tf->miss.write.set_fields, NULL); break; case OFPTFPT13_APPLY_SETFIELD: error = parse_oxms(&payload, loose, &tf->nonmiss.apply.set_fields, NULL); break; case OFPTFPT13_APPLY_SETFIELD_MISS: error = parse_oxms(&payload, loose, &tf->miss.apply.set_fields, NULL); break; case OFPTFPT13_EXPERIMENTER: case OFPTFPT13_EXPERIMENTER_MISS: default: error = OFPPROP_UNKNOWN(loose, "table features", type); break; } if (error) { return error; } } /* Fix inconsistencies: * * - Turn on 'match' bits that are set in 'mask', because maskable * fields are matchable. * * - Turn on 'wildcard' bits that are set in 'mask', because a field * that is arbitrarily maskable can be wildcarded entirely. * * - Turn off 'wildcard' bits that are not in 'match', because a field * must be matchable for it to be meaningfully wildcarded. */ bitmap_or(tf->match.bm, tf->mask.bm, MFF_N_IDS); bitmap_or(tf->wildcard.bm, tf->mask.bm, MFF_N_IDS); bitmap_and(tf->wildcard.bm, tf->match.bm, MFF_N_IDS); return 0; } /* Encodes and returns a request to obtain the table features of a switch. * The message is encoded for OpenFlow version 'ofp_version'. */ struct ofpbuf * ofputil_encode_table_features_request(enum ofp_version ofp_version) { struct ofpbuf *request = NULL; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: case OFP12_VERSION: ovs_fatal(0, "dump-table-features needs OpenFlow 1.3 or later " "(\'-O OpenFlow13\')"); case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: request = ofpraw_alloc(OFPRAW_OFPST13_TABLE_FEATURES_REQUEST, ofp_version, 0); break; default: OVS_NOT_REACHED(); } return request; } static void put_fields_property(struct ofpbuf *reply, const struct mf_bitmap *fields, const struct mf_bitmap *masks, enum ofp13_table_feature_prop_type property, enum ofp_version version) { size_t start_ofs; int field; start_ofs = ofpprop_start(reply, property); BITMAP_FOR_EACH_1 (field, MFF_N_IDS, fields->bm) { nx_put_header(reply, field, version, masks && bitmap_is_set(masks->bm, field)); } ofpprop_end(reply, start_ofs); } static void put_table_action_features(struct ofpbuf *reply, const struct ofputil_table_action_features *taf, enum ofp13_table_feature_prop_type actions_type, enum ofp13_table_feature_prop_type set_fields_type, int miss_offset, enum ofp_version version) { ofpprop_put_bitmap(reply, actions_type + miss_offset, ntohl(ofpact_bitmap_to_openflow(taf->ofpacts, version))); put_fields_property(reply, &taf->set_fields, NULL, set_fields_type + miss_offset, version); } static void put_table_instruction_features( struct ofpbuf *reply, const struct ofputil_table_instruction_features *tif, int miss_offset, enum ofp_version version) { size_t start_ofs; uint8_t table_id; ofpprop_put_bitmap(reply, OFPTFPT13_INSTRUCTIONS + miss_offset, ntohl(ovsinst_bitmap_to_openflow(tif->instructions, version))); start_ofs = ofpprop_start(reply, OFPTFPT13_NEXT_TABLES + miss_offset); BITMAP_FOR_EACH_1 (table_id, 255, tif->next) { ofpbuf_put(reply, &table_id, 1); } ofpprop_end(reply, start_ofs); put_table_action_features(reply, &tif->write, OFPTFPT13_WRITE_ACTIONS, OFPTFPT13_WRITE_SETFIELD, miss_offset, version); put_table_action_features(reply, &tif->apply, OFPTFPT13_APPLY_ACTIONS, OFPTFPT13_APPLY_SETFIELD, miss_offset, version); } void ofputil_append_table_features_reply(const struct ofputil_table_features *tf, struct ovs_list *replies) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); enum ofp_version version = ofpmp_version(replies); size_t start_ofs = reply->size; struct ofp13_table_features *otf; otf = ofpbuf_put_zeros(reply, sizeof *otf); otf->table_id = tf->table_id; ovs_strlcpy(otf->name, tf->name, sizeof otf->name); otf->metadata_match = tf->metadata_match; otf->metadata_write = tf->metadata_write; if (version >= OFP14_VERSION) { if (tf->supports_eviction) { otf->capabilities |= htonl(OFPTC14_EVICTION); } if (tf->supports_vacancy_events) { otf->capabilities |= htonl(OFPTC14_VACANCY_EVENTS); } } otf->max_entries = htonl(tf->max_entries); put_table_instruction_features(reply, &tf->nonmiss, 0, version); put_table_instruction_features(reply, &tf->miss, 1, version); put_fields_property(reply, &tf->match, &tf->mask, OFPTFPT13_MATCH, version); put_fields_property(reply, &tf->wildcard, NULL, OFPTFPT13_WILDCARDS, version); otf = ofpbuf_at_assert(reply, start_ofs, sizeof *otf); otf->length = htons(reply->size - start_ofs); ofpmp_postappend(replies, start_ofs); } static enum ofperr parse_table_desc_vacancy_property(struct ofpbuf *property, struct ofputil_table_desc *td) { struct ofp14_table_mod_prop_vacancy *otv = property->data; if (property->size != sizeof *otv) { return OFPERR_OFPBPC_BAD_LEN; } td->table_vacancy.vacancy_down = otv->vacancy_down; td->table_vacancy.vacancy_up = otv->vacancy_up; td->table_vacancy.vacancy = otv->vacancy; return 0; } /* Decodes the next OpenFlow "table desc" message (of possibly several) from * 'msg' into an abstract form in '*td'. Returns 0 if successful, EOF if the * last "table desc" in 'msg' was already decoded, otherwise an OFPERR_* * value. */ int ofputil_decode_table_desc(struct ofpbuf *msg, struct ofputil_table_desc *td, enum ofp_version version) { memset(td, 0, sizeof *td); if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } struct ofp14_table_desc *otd = ofpbuf_try_pull(msg, sizeof *otd); if (!otd) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFP14_TABLE_DESC reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } td->table_id = otd->table_id; size_t length = ntohs(otd->length); if (length < sizeof *otd || length - sizeof *otd > msg->size) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFP14_TABLE_DESC reply claims invalid " "length %"PRIuSIZE, length); return OFPERR_OFPBRC_BAD_LEN; } length -= sizeof *otd; td->eviction = ofputil_decode_table_eviction(otd->config, version); td->vacancy = ofputil_decode_table_vacancy(otd->config, version); td->eviction_flags = UINT32_MAX; struct ofpbuf properties = ofpbuf_const_initializer( ofpbuf_pull(msg, length), length); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPTMPT14_EVICTION: error = ofpprop_parse_u32(&payload, &td->eviction_flags); break; case OFPTMPT14_VACANCY: error = parse_table_desc_vacancy_property(&payload, td); break; default: error = OFPPROP_UNKNOWN(true, "table_desc", type); break; } if (error) { return error; } } return 0; } /* Encodes and returns a request to obtain description of tables of a switch. * The message is encoded for OpenFlow version 'ofp_version'. */ struct ofpbuf * ofputil_encode_table_desc_request(enum ofp_version ofp_version) { struct ofpbuf *request = NULL; if (ofp_version >= OFP14_VERSION) { request = ofpraw_alloc(OFPRAW_OFPST14_TABLE_DESC_REQUEST, ofp_version, 0); } else { ovs_fatal(0, "dump-table-desc needs OpenFlow 1.4 or later " "(\'-O OpenFlow14\')"); } return request; } /* Function to append Table desc information in a reply list. */ void ofputil_append_table_desc_reply(const struct ofputil_table_desc *td, struct ovs_list *replies, enum ofp_version version) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_otd; struct ofp14_table_desc *otd; start_otd = reply->size; ofpbuf_put_zeros(reply, sizeof *otd); if (td->eviction_flags != UINT32_MAX) { ofpprop_put_u32(reply, OFPTMPT14_EVICTION, td->eviction_flags); } if (td->vacancy == OFPUTIL_TABLE_VACANCY_ON) { struct ofp14_table_mod_prop_vacancy *otv; otv = ofpprop_put_zeros(reply, OFPTMPT14_VACANCY, sizeof *otv); otv->vacancy_down = td->table_vacancy.vacancy_down; otv->vacancy_up = td->table_vacancy.vacancy_up; otv->vacancy = td->table_vacancy.vacancy; } otd = ofpbuf_at_assert(reply, start_otd, sizeof *otd); otd->length = htons(reply->size - start_otd); otd->table_id = td->table_id; otd->config = ofputil_encode_table_config(OFPUTIL_TABLE_MISS_DEFAULT, td->eviction, td->vacancy, version); ofpmp_postappend(replies, start_otd); } /* This function parses Vacancy property, and decodes the * ofp14_table_mod_prop_vacancy in ofputil_table_mod. * Returns OFPERR_OFPBPC_BAD_VALUE error code when vacancy_down is * greater than vacancy_up and also when current vacancy has non-zero * value. Returns 0 on success. */ static enum ofperr parse_table_mod_vacancy_property(struct ofpbuf *property, struct ofputil_table_mod *tm) { struct ofp14_table_mod_prop_vacancy *otv = property->data; if (property->size != sizeof *otv) { return OFPERR_OFPBPC_BAD_LEN; } tm->table_vacancy.vacancy_down = otv->vacancy_down; tm->table_vacancy.vacancy_up = otv->vacancy_up; if (tm->table_vacancy.vacancy_down > tm->table_vacancy.vacancy_up) { OFPPROP_LOG(&bad_ofmsg_rl, false, "Value of vacancy_down is greater than vacancy_up"); return OFPERR_OFPBPC_BAD_VALUE; } if (tm->table_vacancy.vacancy_down > 100 || tm->table_vacancy.vacancy_up > 100) { OFPPROP_LOG(&bad_ofmsg_rl, false, "Vacancy threshold percentage " "should not be greater than 100"); return OFPERR_OFPBPC_BAD_VALUE; } tm->table_vacancy.vacancy = otv->vacancy; if (tm->table_vacancy.vacancy) { OFPPROP_LOG(&bad_ofmsg_rl, false, "Vacancy value should be zero for table-mod messages"); return OFPERR_OFPBPC_BAD_VALUE; } return 0; } /* Given 'config', taken from an OpenFlow 'version' message that specifies * table configuration (a table mod, table stats, or table features message), * returns the table vacancy configuration that it specifies. * * Only OpenFlow 1.4 and later specify table vacancy configuration this way, * so for other 'version' this function always returns * OFPUTIL_TABLE_VACANCY_DEFAULT. */ static enum ofputil_table_vacancy ofputil_decode_table_vacancy(ovs_be32 config, enum ofp_version version) { return (version < OFP14_VERSION ? OFPUTIL_TABLE_VACANCY_DEFAULT : config & htonl(OFPTC14_VACANCY_EVENTS) ? OFPUTIL_TABLE_VACANCY_ON : OFPUTIL_TABLE_VACANCY_OFF); } /* Given 'config', taken from an OpenFlow 'version' message that specifies * table configuration (a table mod, table stats, or table features message), * returns the table eviction configuration that it specifies. * * Only OpenFlow 1.4 and later specify table eviction configuration this way, * so for other 'version' values this function always returns * OFPUTIL_TABLE_EVICTION_DEFAULT. */ static enum ofputil_table_eviction ofputil_decode_table_eviction(ovs_be32 config, enum ofp_version version) { return (version < OFP14_VERSION ? OFPUTIL_TABLE_EVICTION_DEFAULT : config & htonl(OFPTC14_EVICTION) ? OFPUTIL_TABLE_EVICTION_ON : OFPUTIL_TABLE_EVICTION_OFF); } /* Returns a bitmap of OFPTC* values suitable for 'config' fields in various * OpenFlow messages of the given 'version', based on the provided 'miss' and * 'eviction' values. */ static ovs_be32 ofputil_encode_table_config(enum ofputil_table_miss miss, enum ofputil_table_eviction eviction, enum ofputil_table_vacancy vacancy, enum ofp_version version) { uint32_t config = 0; /* Search for "OFPTC_* Table Configuration" in the documentation for more * information on the crazy evolution of this field. */ switch (version) { case OFP10_VERSION: /* OpenFlow 1.0 didn't have such a field, any value ought to do. */ return htonl(0); case OFP11_VERSION: case OFP12_VERSION: /* OpenFlow 1.1 and 1.2 define only OFPTC11_TABLE_MISS_*. */ switch (miss) { case OFPUTIL_TABLE_MISS_DEFAULT: /* Really this shouldn't be used for encoding (the caller should * provide a specific value) but I can't imagine that defaulting to * the fall-through case here will hurt. */ case OFPUTIL_TABLE_MISS_CONTROLLER: default: return htonl(OFPTC11_TABLE_MISS_CONTROLLER); case OFPUTIL_TABLE_MISS_CONTINUE: return htonl(OFPTC11_TABLE_MISS_CONTINUE); case OFPUTIL_TABLE_MISS_DROP: return htonl(OFPTC11_TABLE_MISS_DROP); } OVS_NOT_REACHED(); case OFP13_VERSION: /* OpenFlow 1.3 removed OFPTC11_TABLE_MISS_* and didn't define any new * flags, so this is correct. */ return htonl(0); case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: /* OpenFlow 1.4 introduced OFPTC14_EVICTION and * OFPTC14_VACANCY_EVENTS. */ if (eviction == OFPUTIL_TABLE_EVICTION_ON) { config |= OFPTC14_EVICTION; } if (vacancy == OFPUTIL_TABLE_VACANCY_ON) { config |= OFPTC14_VACANCY_EVENTS; } return htonl(config); } OVS_NOT_REACHED(); } /* Given 'config', taken from an OpenFlow 'version' message that specifies * table configuration (a table mod, table stats, or table features message), * returns the table miss configuration that it specifies. * * Only OpenFlow 1.1 and 1.2 specify table miss configurations this way, so for * other 'version' values this function always returns * OFPUTIL_TABLE_MISS_DEFAULT. */ static enum ofputil_table_miss ofputil_decode_table_miss(ovs_be32 config_, enum ofp_version version) { uint32_t config = ntohl(config_); if (version == OFP11_VERSION || version == OFP12_VERSION) { switch (config & OFPTC11_TABLE_MISS_MASK) { case OFPTC11_TABLE_MISS_CONTROLLER: return OFPUTIL_TABLE_MISS_CONTROLLER; case OFPTC11_TABLE_MISS_CONTINUE: return OFPUTIL_TABLE_MISS_CONTINUE; case OFPTC11_TABLE_MISS_DROP: return OFPUTIL_TABLE_MISS_DROP; default: VLOG_WARN_RL(&bad_ofmsg_rl, "bad table miss config %d", config); return OFPUTIL_TABLE_MISS_CONTROLLER; } } else { return OFPUTIL_TABLE_MISS_DEFAULT; } } /* Decodes the OpenFlow "table mod" message in '*oh' into an abstract form in * '*pm'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_table_mod(const struct ofp_header *oh, struct ofputil_table_mod *pm) { memset(pm, 0, sizeof *pm); pm->miss = OFPUTIL_TABLE_MISS_DEFAULT; pm->eviction = OFPUTIL_TABLE_EVICTION_DEFAULT; pm->eviction_flags = UINT32_MAX; pm->vacancy = OFPUTIL_TABLE_VACANCY_DEFAULT; struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT11_TABLE_MOD) { const struct ofp11_table_mod *otm = b.data; pm->table_id = otm->table_id; pm->miss = ofputil_decode_table_miss(otm->config, oh->version); } else if (raw == OFPRAW_OFPT14_TABLE_MOD) { const struct ofp14_table_mod *otm = ofpbuf_pull(&b, sizeof *otm); pm->table_id = otm->table_id; pm->miss = ofputil_decode_table_miss(otm->config, oh->version); pm->eviction = ofputil_decode_table_eviction(otm->config, oh->version); pm->vacancy = ofputil_decode_table_vacancy(otm->config, oh->version); while (b.size > 0) { struct ofpbuf property; enum ofperr error; uint64_t type; error = ofpprop_pull(&b, &property, &type); if (error) { return error; } switch (type) { case OFPTMPT14_EVICTION: error = ofpprop_parse_u32(&property, &pm->eviction); break; case OFPTMPT14_VACANCY: error = parse_table_mod_vacancy_property(&property, pm); break; default: error = OFPERR_OFPBRC_BAD_TYPE; break; } if (error) { return error; } } } else { return OFPERR_OFPBRC_BAD_TYPE; } return 0; } /* Converts the abstract form of a "table mod" message in '*tm' into an * OpenFlow message suitable for 'protocol', and returns that encoded form in a * buffer owned by the caller. */ struct ofpbuf * ofputil_encode_table_mod(const struct ofputil_table_mod *tm, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *b; switch (ofp_version) { case OFP10_VERSION: { ovs_fatal(0, "table mod needs OpenFlow 1.1 or later " "(\'-O OpenFlow11\')"); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { struct ofp11_table_mod *otm; b = ofpraw_alloc(OFPRAW_OFPT11_TABLE_MOD, ofp_version, 0); otm = ofpbuf_put_zeros(b, sizeof *otm); otm->table_id = tm->table_id; otm->config = ofputil_encode_table_config(tm->miss, tm->eviction, tm->vacancy, ofp_version); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp14_table_mod *otm; b = ofpraw_alloc(OFPRAW_OFPT14_TABLE_MOD, ofp_version, 0); otm = ofpbuf_put_zeros(b, sizeof *otm); otm->table_id = tm->table_id; otm->config = ofputil_encode_table_config(tm->miss, tm->eviction, tm->vacancy, ofp_version); if (tm->eviction_flags != UINT32_MAX) { ofpprop_put_u32(b, OFPTMPT14_EVICTION, tm->eviction_flags); } if (tm->vacancy == OFPUTIL_TABLE_VACANCY_ON) { struct ofp14_table_mod_prop_vacancy *otv; otv = ofpprop_put_zeros(b, OFPTMPT14_VACANCY, sizeof *otv); otv->vacancy_down = tm->table_vacancy.vacancy_down; otv->vacancy_up = tm->table_vacancy.vacancy_up; } break; } default: OVS_NOT_REACHED(); } return b; } /* ofputil_role_request */ /* Decodes the OpenFlow "role request" or "role reply" message in '*oh' into * an abstract form in '*rr'. Returns 0 if successful, otherwise an * OFPERR_* value. */ enum ofperr ofputil_decode_role_message(const struct ofp_header *oh, struct ofputil_role_request *rr) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT12_ROLE_REQUEST || raw == OFPRAW_OFPT12_ROLE_REPLY) { const struct ofp12_role_request *orr = b.msg; if (orr->role != htonl(OFPCR12_ROLE_NOCHANGE) && orr->role != htonl(OFPCR12_ROLE_EQUAL) && orr->role != htonl(OFPCR12_ROLE_MASTER) && orr->role != htonl(OFPCR12_ROLE_SLAVE)) { return OFPERR_OFPRRFC_BAD_ROLE; } rr->role = ntohl(orr->role); if (raw == OFPRAW_OFPT12_ROLE_REQUEST ? orr->role == htonl(OFPCR12_ROLE_NOCHANGE) : orr->generation_id == OVS_BE64_MAX) { rr->have_generation_id = false; rr->generation_id = 0; } else { rr->have_generation_id = true; rr->generation_id = ntohll(orr->generation_id); } } else if (raw == OFPRAW_NXT_ROLE_REQUEST || raw == OFPRAW_NXT_ROLE_REPLY) { const struct nx_role_request *nrr = b.msg; BUILD_ASSERT(NX_ROLE_OTHER + 1 == OFPCR12_ROLE_EQUAL); BUILD_ASSERT(NX_ROLE_MASTER + 1 == OFPCR12_ROLE_MASTER); BUILD_ASSERT(NX_ROLE_SLAVE + 1 == OFPCR12_ROLE_SLAVE); if (nrr->role != htonl(NX_ROLE_OTHER) && nrr->role != htonl(NX_ROLE_MASTER) && nrr->role != htonl(NX_ROLE_SLAVE)) { return OFPERR_OFPRRFC_BAD_ROLE; } rr->role = ntohl(nrr->role) + 1; rr->have_generation_id = false; rr->generation_id = 0; } else { OVS_NOT_REACHED(); } return 0; } /* Returns an encoded form of a role reply suitable for the "request" in a * buffer owned by the caller. */ struct ofpbuf * ofputil_encode_role_reply(const struct ofp_header *request, const struct ofputil_role_request *rr) { struct ofpbuf *buf; enum ofpraw raw; raw = ofpraw_decode_assert(request); if (raw == OFPRAW_OFPT12_ROLE_REQUEST) { struct ofp12_role_request *orr; buf = ofpraw_alloc_reply(OFPRAW_OFPT12_ROLE_REPLY, request, 0); orr = ofpbuf_put_zeros(buf, sizeof *orr); orr->role = htonl(rr->role); orr->generation_id = htonll(rr->have_generation_id ? rr->generation_id : UINT64_MAX); } else if (raw == OFPRAW_NXT_ROLE_REQUEST) { struct nx_role_request *nrr; BUILD_ASSERT(NX_ROLE_OTHER == OFPCR12_ROLE_EQUAL - 1); BUILD_ASSERT(NX_ROLE_MASTER == OFPCR12_ROLE_MASTER - 1); BUILD_ASSERT(NX_ROLE_SLAVE == OFPCR12_ROLE_SLAVE - 1); buf = ofpraw_alloc_reply(OFPRAW_NXT_ROLE_REPLY, request, 0); nrr = ofpbuf_put_zeros(buf, sizeof *nrr); nrr->role = htonl(rr->role - 1); } else { OVS_NOT_REACHED(); } return buf; } /* Encodes "role status" message 'status' for sending in the given * 'protocol'. Returns the role status message, if 'protocol' supports them, * otherwise a null pointer. */ struct ofpbuf * ofputil_encode_role_status(const struct ofputil_role_status *status, enum ofputil_protocol protocol) { enum ofp_version version; version = ofputil_protocol_to_ofp_version(protocol); if (version >= OFP14_VERSION) { struct ofp14_role_status *rstatus; struct ofpbuf *buf; buf = ofpraw_alloc_xid(OFPRAW_OFPT14_ROLE_STATUS, version, htonl(0), 0); rstatus = ofpbuf_put_zeros(buf, sizeof *rstatus); rstatus->role = htonl(status->role); rstatus->reason = status->reason; rstatus->generation_id = htonll(status->generation_id); return buf; } else { return NULL; } } enum ofperr ofputil_decode_role_status(const struct ofp_header *oh, struct ofputil_role_status *rs) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); ovs_assert(raw == OFPRAW_OFPT14_ROLE_STATUS); const struct ofp14_role_status *r = b.msg; if (r->role != htonl(OFPCR12_ROLE_NOCHANGE) && r->role != htonl(OFPCR12_ROLE_EQUAL) && r->role != htonl(OFPCR12_ROLE_MASTER) && r->role != htonl(OFPCR12_ROLE_SLAVE)) { return OFPERR_OFPRRFC_BAD_ROLE; } rs->role = ntohl(r->role); rs->generation_id = ntohll(r->generation_id); rs->reason = r->reason; return 0; } /* Encodes 'rf' according to 'protocol', and returns the encoded message. * 'protocol' must be for OpenFlow 1.4 or later. */ struct ofpbuf * ofputil_encode_requestforward(const struct ofputil_requestforward *rf, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *inner; switch (rf->reason) { case OFPRFR_GROUP_MOD: inner = ofputil_encode_group_mod(ofp_version, rf->group_mod); break; case OFPRFR_METER_MOD: inner = ofputil_encode_meter_mod(ofp_version, rf->meter_mod); break; case OFPRFR_N_REASONS: default: OVS_NOT_REACHED(); } struct ofp_header *inner_oh = inner->data; inner_oh->xid = rf->xid; inner_oh->length = htons(inner->size); struct ofpbuf *outer = ofpraw_alloc_xid(OFPRAW_OFPT14_REQUESTFORWARD, ofp_version, htonl(0), inner->size); ofpbuf_put(outer, inner->data, inner->size); ofpbuf_delete(inner); return outer; } /* Decodes OFPT_REQUESTFORWARD message 'outer'. On success, puts the decoded * form into '*rf' and returns 0, and the caller is later responsible for * freeing the content of 'rf', with ofputil_destroy_requestforward(rf). On * failure, returns an ofperr and '*rf' is indeterminate. */ enum ofperr ofputil_decode_requestforward(const struct ofp_header *outer, struct ofputil_requestforward *rf) { struct ofpbuf b = ofpbuf_const_initializer(outer, ntohs(outer->length)); /* Skip past outer message. */ enum ofpraw outer_raw = ofpraw_pull_assert(&b); ovs_assert(outer_raw == OFPRAW_OFPT14_REQUESTFORWARD); /* Validate inner message. */ if (b.size < sizeof(struct ofp_header)) { return OFPERR_OFPBFC_MSG_BAD_LEN; } const struct ofp_header *inner = b.data; unsigned int inner_len = ntohs(inner->length); if (inner_len < sizeof(struct ofp_header) || inner_len > b.size) { return OFPERR_OFPBFC_MSG_BAD_LEN; } if (inner->version != outer->version) { return OFPERR_OFPBRC_BAD_VERSION; } /* Parse inner message. */ enum ofptype type; enum ofperr error = ofptype_decode(&type, inner); if (error) { return error; } rf->xid = inner->xid; if (type == OFPTYPE_GROUP_MOD) { rf->reason = OFPRFR_GROUP_MOD; rf->group_mod = xmalloc(sizeof *rf->group_mod); error = ofputil_decode_group_mod(inner, rf->group_mod); if (error) { free(rf->group_mod); return error; } } else if (type == OFPTYPE_METER_MOD) { rf->reason = OFPRFR_METER_MOD; rf->meter_mod = xmalloc(sizeof *rf->meter_mod); ofpbuf_init(&rf->bands, 64); error = ofputil_decode_meter_mod(inner, rf->meter_mod, &rf->bands); if (error) { free(rf->meter_mod); ofpbuf_uninit(&rf->bands); return error; } } else { return OFPERR_OFPBFC_MSG_UNSUP; } return 0; } /* Frees the content of 'rf', which should have been initialized through a * successful call to ofputil_decode_requestforward(). */ void ofputil_destroy_requestforward(struct ofputil_requestforward *rf) { if (!rf) { return; } switch (rf->reason) { case OFPRFR_GROUP_MOD: ofputil_uninit_group_mod(rf->group_mod); free(rf->group_mod); break; case OFPRFR_METER_MOD: ofpbuf_uninit(&rf->bands); free(rf->meter_mod); break; case OFPRFR_N_REASONS: OVS_NOT_REACHED(); } } /* Table stats. */ /* OpenFlow 1.0 and 1.1 don't distinguish between a field that cannot be * matched and a field that must be wildcarded. This function returns a bitmap * that contains both kinds of fields. */ static struct mf_bitmap wild_or_nonmatchable_fields(const struct ofputil_table_features *features) { struct mf_bitmap wc = features->match; bitmap_not(wc.bm, MFF_N_IDS); bitmap_or(wc.bm, features->wildcard.bm, MFF_N_IDS); return wc; } struct ofp10_wc_map { enum ofp10_flow_wildcards wc10; enum mf_field_id mf; }; static const struct ofp10_wc_map ofp10_wc_map[] = { { OFPFW10_IN_PORT, MFF_IN_PORT }, { OFPFW10_DL_VLAN, MFF_VLAN_VID }, { OFPFW10_DL_SRC, MFF_ETH_SRC }, { OFPFW10_DL_DST, MFF_ETH_DST}, { OFPFW10_DL_TYPE, MFF_ETH_TYPE }, { OFPFW10_NW_PROTO, MFF_IP_PROTO }, { OFPFW10_TP_SRC, MFF_TCP_SRC }, { OFPFW10_TP_DST, MFF_TCP_DST }, { OFPFW10_NW_SRC_MASK, MFF_IPV4_SRC }, { OFPFW10_NW_DST_MASK, MFF_IPV4_DST }, { OFPFW10_DL_VLAN_PCP, MFF_VLAN_PCP }, { OFPFW10_NW_TOS, MFF_IP_DSCP }, }; static ovs_be32 mf_bitmap_to_of10(const struct mf_bitmap *fields) { const struct ofp10_wc_map *p; uint32_t wc10 = 0; for (p = ofp10_wc_map; p < &ofp10_wc_map[ARRAY_SIZE(ofp10_wc_map)]; p++) { if (bitmap_is_set(fields->bm, p->mf)) { wc10 |= p->wc10; } } return htonl(wc10); } static struct mf_bitmap mf_bitmap_from_of10(ovs_be32 wc10_) { struct mf_bitmap fields = MF_BITMAP_INITIALIZER; const struct ofp10_wc_map *p; uint32_t wc10 = ntohl(wc10_); for (p = ofp10_wc_map; p < &ofp10_wc_map[ARRAY_SIZE(ofp10_wc_map)]; p++) { if (wc10 & p->wc10) { bitmap_set1(fields.bm, p->mf); } } return fields; } static void ofputil_put_ofp10_table_stats(const struct ofputil_table_stats *stats, const struct ofputil_table_features *features, struct ofpbuf *buf) { struct mf_bitmap wc = wild_or_nonmatchable_fields(features); struct ofp10_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = features->table_id; ovs_strlcpy(out->name, features->name, sizeof out->name); out->wildcards = mf_bitmap_to_of10(&wc); out->max_entries = htonl(features->max_entries); out->active_count = htonl(stats->active_count); put_32aligned_be64(&out->lookup_count, htonll(stats->lookup_count)); put_32aligned_be64(&out->matched_count, htonll(stats->matched_count)); } struct ofp11_wc_map { enum ofp11_flow_match_fields wc11; enum mf_field_id mf; }; static const struct ofp11_wc_map ofp11_wc_map[] = { { OFPFMF11_IN_PORT, MFF_IN_PORT }, { OFPFMF11_DL_VLAN, MFF_VLAN_VID }, { OFPFMF11_DL_VLAN_PCP, MFF_VLAN_PCP }, { OFPFMF11_DL_TYPE, MFF_ETH_TYPE }, { OFPFMF11_NW_TOS, MFF_IP_DSCP }, { OFPFMF11_NW_PROTO, MFF_IP_PROTO }, { OFPFMF11_TP_SRC, MFF_TCP_SRC }, { OFPFMF11_TP_DST, MFF_TCP_DST }, { OFPFMF11_MPLS_LABEL, MFF_MPLS_LABEL }, { OFPFMF11_MPLS_TC, MFF_MPLS_TC }, /* I don't know what OFPFMF11_TYPE means. */ { OFPFMF11_DL_SRC, MFF_ETH_SRC }, { OFPFMF11_DL_DST, MFF_ETH_DST }, { OFPFMF11_NW_SRC, MFF_IPV4_SRC }, { OFPFMF11_NW_DST, MFF_IPV4_DST }, { OFPFMF11_METADATA, MFF_METADATA }, }; static ovs_be32 mf_bitmap_to_of11(const struct mf_bitmap *fields) { const struct ofp11_wc_map *p; uint32_t wc11 = 0; for (p = ofp11_wc_map; p < &ofp11_wc_map[ARRAY_SIZE(ofp11_wc_map)]; p++) { if (bitmap_is_set(fields->bm, p->mf)) { wc11 |= p->wc11; } } return htonl(wc11); } static struct mf_bitmap mf_bitmap_from_of11(ovs_be32 wc11_) { struct mf_bitmap fields = MF_BITMAP_INITIALIZER; const struct ofp11_wc_map *p; uint32_t wc11 = ntohl(wc11_); for (p = ofp11_wc_map; p < &ofp11_wc_map[ARRAY_SIZE(ofp11_wc_map)]; p++) { if (wc11 & p->wc11) { bitmap_set1(fields.bm, p->mf); } } return fields; } static void ofputil_put_ofp11_table_stats(const struct ofputil_table_stats *stats, const struct ofputil_table_features *features, struct ofpbuf *buf) { struct mf_bitmap wc = wild_or_nonmatchable_fields(features); struct ofp11_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = features->table_id; ovs_strlcpy(out->name, features->name, sizeof out->name); out->wildcards = mf_bitmap_to_of11(&wc); out->match = mf_bitmap_to_of11(&features->match); out->instructions = ovsinst_bitmap_to_openflow( features->nonmiss.instructions, OFP11_VERSION); out->write_actions = ofpact_bitmap_to_openflow( features->nonmiss.write.ofpacts, OFP11_VERSION); out->apply_actions = ofpact_bitmap_to_openflow( features->nonmiss.apply.ofpacts, OFP11_VERSION); out->config = htonl(features->miss_config); out->max_entries = htonl(features->max_entries); out->active_count = htonl(stats->active_count); out->lookup_count = htonll(stats->lookup_count); out->matched_count = htonll(stats->matched_count); } static void ofputil_put_ofp12_table_stats(const struct ofputil_table_stats *stats, const struct ofputil_table_features *features, struct ofpbuf *buf) { struct ofp12_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = features->table_id; ovs_strlcpy(out->name, features->name, sizeof out->name); out->match = oxm_bitmap_from_mf_bitmap(&features->match, OFP12_VERSION); out->wildcards = oxm_bitmap_from_mf_bitmap(&features->wildcard, OFP12_VERSION); out->write_actions = ofpact_bitmap_to_openflow( features->nonmiss.write.ofpacts, OFP12_VERSION); out->apply_actions = ofpact_bitmap_to_openflow( features->nonmiss.apply.ofpacts, OFP12_VERSION); out->write_setfields = oxm_bitmap_from_mf_bitmap( &features->nonmiss.write.set_fields, OFP12_VERSION); out->apply_setfields = oxm_bitmap_from_mf_bitmap( &features->nonmiss.apply.set_fields, OFP12_VERSION); out->metadata_match = features->metadata_match; out->metadata_write = features->metadata_write; out->instructions = ovsinst_bitmap_to_openflow( features->nonmiss.instructions, OFP12_VERSION); out->config = ofputil_encode_table_config(features->miss_config, OFPUTIL_TABLE_EVICTION_DEFAULT, OFPUTIL_TABLE_VACANCY_DEFAULT, OFP12_VERSION); out->max_entries = htonl(features->max_entries); out->active_count = htonl(stats->active_count); out->lookup_count = htonll(stats->lookup_count); out->matched_count = htonll(stats->matched_count); } static void ofputil_put_ofp13_table_stats(const struct ofputil_table_stats *stats, struct ofpbuf *buf) { struct ofp13_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = stats->table_id; out->active_count = htonl(stats->active_count); out->lookup_count = htonll(stats->lookup_count); out->matched_count = htonll(stats->matched_count); } struct ofpbuf * ofputil_encode_table_stats_reply(const struct ofp_header *request) { return ofpraw_alloc_stats_reply(request, 0); } void ofputil_append_table_stats_reply(struct ofpbuf *reply, const struct ofputil_table_stats *stats, const struct ofputil_table_features *features) { struct ofp_header *oh = reply->header; ovs_assert(stats->table_id == features->table_id); switch ((enum ofp_version) oh->version) { case OFP10_VERSION: ofputil_put_ofp10_table_stats(stats, features, reply); break; case OFP11_VERSION: ofputil_put_ofp11_table_stats(stats, features, reply); break; case OFP12_VERSION: ofputil_put_ofp12_table_stats(stats, features, reply); break; case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: ofputil_put_ofp13_table_stats(stats, reply); break; default: OVS_NOT_REACHED(); } } static int ofputil_decode_ofp10_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp10_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; ovs_strlcpy(features->name, ots->name, sizeof features->name); features->max_entries = ntohl(ots->max_entries); features->match = features->wildcard = mf_bitmap_from_of10(ots->wildcards); stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(get_32aligned_be64(&ots->lookup_count)); stats->matched_count = ntohll(get_32aligned_be64(&ots->matched_count)); return 0; } static int ofputil_decode_ofp11_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp11_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; ovs_strlcpy(features->name, ots->name, sizeof features->name); features->max_entries = ntohl(ots->max_entries); features->nonmiss.instructions = ovsinst_bitmap_from_openflow( ots->instructions, OFP11_VERSION); features->nonmiss.write.ofpacts = ofpact_bitmap_from_openflow( ots->write_actions, OFP11_VERSION); features->nonmiss.apply.ofpacts = ofpact_bitmap_from_openflow( ots->write_actions, OFP11_VERSION); features->miss = features->nonmiss; features->miss_config = ofputil_decode_table_miss(ots->config, OFP11_VERSION); features->match = mf_bitmap_from_of11(ots->match); features->wildcard = mf_bitmap_from_of11(ots->wildcards); bitmap_or(features->match.bm, features->wildcard.bm, MFF_N_IDS); stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(ots->lookup_count); stats->matched_count = ntohll(ots->matched_count); return 0; } static int ofputil_decode_ofp12_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp12_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; ovs_strlcpy(features->name, ots->name, sizeof features->name); features->metadata_match = ots->metadata_match; features->metadata_write = ots->metadata_write; features->miss_config = ofputil_decode_table_miss(ots->config, OFP12_VERSION); features->max_entries = ntohl(ots->max_entries); features->nonmiss.instructions = ovsinst_bitmap_from_openflow( ots->instructions, OFP12_VERSION); features->nonmiss.write.ofpacts = ofpact_bitmap_from_openflow( ots->write_actions, OFP12_VERSION); features->nonmiss.apply.ofpacts = ofpact_bitmap_from_openflow( ots->apply_actions, OFP12_VERSION); features->nonmiss.write.set_fields = oxm_bitmap_to_mf_bitmap( ots->write_setfields, OFP12_VERSION); features->nonmiss.apply.set_fields = oxm_bitmap_to_mf_bitmap( ots->apply_setfields, OFP12_VERSION); features->miss = features->nonmiss; features->match = oxm_bitmap_to_mf_bitmap(ots->match, OFP12_VERSION); features->wildcard = oxm_bitmap_to_mf_bitmap(ots->wildcards, OFP12_VERSION); bitmap_or(features->match.bm, features->wildcard.bm, MFF_N_IDS); stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(ots->lookup_count); stats->matched_count = ntohll(ots->matched_count); return 0; } static int ofputil_decode_ofp13_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp13_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(ots->lookup_count); stats->matched_count = ntohll(ots->matched_count); return 0; } int ofputil_decode_table_stats_reply(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { const struct ofp_header *oh; if (!msg->header) { ofpraw_pull_assert(msg); } oh = msg->header; if (!msg->size) { return EOF; } memset(stats, 0, sizeof *stats); memset(features, 0, sizeof *features); features->supports_eviction = -1; features->supports_vacancy_events = -1; switch ((enum ofp_version) oh->version) { case OFP10_VERSION: return ofputil_decode_ofp10_table_stats(msg, stats, features); case OFP11_VERSION: return ofputil_decode_ofp11_table_stats(msg, stats, features); case OFP12_VERSION: return ofputil_decode_ofp12_table_stats(msg, stats, features); case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: return ofputil_decode_ofp13_table_stats(msg, stats, features); default: OVS_NOT_REACHED(); } } /* ofputil_flow_monitor_request */ /* Converts an NXST_FLOW_MONITOR request in 'msg' into an abstract * ofputil_flow_monitor_request in 'rq'. * * Multiple NXST_FLOW_MONITOR requests can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the requests. The caller must initially leave 'msg''s layer * pointers null and not modify them between calls. * * Returns 0 if successful, EOF if no requests were left in this 'msg', * otherwise an OFPERR_* value. */ int ofputil_decode_flow_monitor_request(struct ofputil_flow_monitor_request *rq, struct ofpbuf *msg) { struct nx_flow_monitor_request *nfmr; uint16_t flags; if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } nfmr = ofpbuf_try_pull(msg, sizeof *nfmr); if (!nfmr) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR request has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } flags = ntohs(nfmr->flags); if (!(flags & (NXFMF_ADD | NXFMF_DELETE | NXFMF_MODIFY)) || flags & ~(NXFMF_INITIAL | NXFMF_ADD | NXFMF_DELETE | NXFMF_MODIFY | NXFMF_ACTIONS | NXFMF_OWN)) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR has bad flags %#"PRIx16, flags); return OFPERR_OFPMOFC_BAD_FLAGS; } if (!is_all_zeros(nfmr->zeros, sizeof nfmr->zeros)) { return OFPERR_NXBRC_MUST_BE_ZERO; } rq->id = ntohl(nfmr->id); rq->flags = flags; rq->out_port = u16_to_ofp(ntohs(nfmr->out_port)); rq->table_id = nfmr->table_id; return nx_pull_match(msg, ntohs(nfmr->match_len), &rq->match, NULL, NULL, NULL, NULL); } void ofputil_append_flow_monitor_request( const struct ofputil_flow_monitor_request *rq, struct ofpbuf *msg) { struct nx_flow_monitor_request *nfmr; size_t start_ofs; int match_len; if (!msg->size) { ofpraw_put(OFPRAW_NXST_FLOW_MONITOR_REQUEST, OFP10_VERSION, msg); } start_ofs = msg->size; ofpbuf_put_zeros(msg, sizeof *nfmr); match_len = nx_put_match(msg, &rq->match, htonll(0), htonll(0)); nfmr = ofpbuf_at_assert(msg, start_ofs, sizeof *nfmr); nfmr->id = htonl(rq->id); nfmr->flags = htons(rq->flags); nfmr->out_port = htons(ofp_to_u16(rq->out_port)); nfmr->match_len = htons(match_len); nfmr->table_id = rq->table_id; } /* Converts an NXST_FLOW_MONITOR reply (also known as a flow update) in 'msg' * into an abstract ofputil_flow_update in 'update'. The caller must have * initialized update->match to point to space allocated for a match. * * Uses 'ofpacts' to store the abstract OFPACT_* version of the update's * actions (except for NXFME_ABBREV, which never includes actions). The caller * must initialize 'ofpacts' and retains ownership of it. 'update->ofpacts' * will point into the 'ofpacts' buffer. * * Multiple flow updates can be packed into a single OpenFlow message. Calling * this function multiple times for a single 'msg' iterates through the * updates. The caller must initially leave 'msg''s layer pointers null and * not modify them between calls. * * Returns 0 if successful, EOF if no updates were left in this 'msg', * otherwise an OFPERR_* value. */ int ofputil_decode_flow_update(struct ofputil_flow_update *update, struct ofpbuf *msg, struct ofpbuf *ofpacts) { struct nx_flow_update_header *nfuh; unsigned int length; struct ofp_header *oh; if (!msg->header) { ofpraw_pull_assert(msg); } ofpbuf_clear(ofpacts); if (!msg->size) { return EOF; } if (msg->size < sizeof(struct nx_flow_update_header)) { goto bad_len; } oh = msg->header; nfuh = msg->data; update->event = ntohs(nfuh->event); length = ntohs(nfuh->length); if (length > msg->size || length % 8) { goto bad_len; } if (update->event == NXFME_ABBREV) { struct nx_flow_update_abbrev *nfua; if (length != sizeof *nfua) { goto bad_len; } nfua = ofpbuf_pull(msg, sizeof *nfua); update->xid = nfua->xid; return 0; } else if (update->event == NXFME_ADDED || update->event == NXFME_DELETED || update->event == NXFME_MODIFIED) { struct nx_flow_update_full *nfuf; unsigned int actions_len; unsigned int match_len; enum ofperr error; if (length < sizeof *nfuf) { goto bad_len; } nfuf = ofpbuf_pull(msg, sizeof *nfuf); match_len = ntohs(nfuf->match_len); if (sizeof *nfuf + match_len > length) { goto bad_len; } update->reason = ntohs(nfuf->reason); update->idle_timeout = ntohs(nfuf->idle_timeout); update->hard_timeout = ntohs(nfuf->hard_timeout); update->table_id = nfuf->table_id; update->cookie = nfuf->cookie; update->priority = ntohs(nfuf->priority); error = nx_pull_match(msg, match_len, &update->match, NULL, NULL, NULL, NULL); if (error) { return error; } actions_len = length - sizeof *nfuf - ROUND_UP(match_len, 8); error = ofpacts_pull_openflow_actions(msg, actions_len, oh->version, NULL, NULL, ofpacts); if (error) { return error; } update->ofpacts = ofpacts->data; update->ofpacts_len = ofpacts->size; return 0; } else { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR reply has bad event %"PRIu16, ntohs(nfuh->event)); return OFPERR_NXBRC_FM_BAD_EVENT; } bad_len: VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } uint32_t ofputil_decode_flow_monitor_cancel(const struct ofp_header *oh) { const struct nx_flow_monitor_cancel *cancel = ofpmsg_body(oh); return ntohl(cancel->id); } struct ofpbuf * ofputil_encode_flow_monitor_cancel(uint32_t id) { struct nx_flow_monitor_cancel *nfmc; struct ofpbuf *msg; msg = ofpraw_alloc(OFPRAW_NXT_FLOW_MONITOR_CANCEL, OFP10_VERSION, 0); nfmc = ofpbuf_put_uninit(msg, sizeof *nfmc); nfmc->id = htonl(id); return msg; } void ofputil_start_flow_update(struct ovs_list *replies) { struct ofpbuf *msg; msg = ofpraw_alloc_xid(OFPRAW_NXST_FLOW_MONITOR_REPLY, OFP10_VERSION, htonl(0), 1024); ovs_list_init(replies); ovs_list_push_back(replies, &msg->list_node); } void ofputil_append_flow_update(const struct ofputil_flow_update *update, struct ovs_list *replies, const struct tun_table *tun_table) { struct ofputil_flow_update *update_ = CONST_CAST(struct ofputil_flow_update *, update); const struct tun_table *orig_tun_table; enum ofp_version version = ofpmp_version(replies); struct nx_flow_update_header *nfuh; struct ofpbuf *msg; size_t start_ofs; orig_tun_table = update->match.flow.tunnel.metadata.tab; update_->match.flow.tunnel.metadata.tab = tun_table; msg = ofpbuf_from_list(ovs_list_back(replies)); start_ofs = msg->size; if (update->event == NXFME_ABBREV) { struct nx_flow_update_abbrev *nfua; nfua = ofpbuf_put_zeros(msg, sizeof *nfua); nfua->xid = update->xid; } else { struct nx_flow_update_full *nfuf; int match_len; ofpbuf_put_zeros(msg, sizeof *nfuf); match_len = nx_put_match(msg, &update->match, htonll(0), htonll(0)); ofpacts_put_openflow_actions(update->ofpacts, update->ofpacts_len, msg, version); nfuf = ofpbuf_at_assert(msg, start_ofs, sizeof *nfuf); nfuf->reason = htons(update->reason); nfuf->priority = htons(update->priority); nfuf->idle_timeout = htons(update->idle_timeout); nfuf->hard_timeout = htons(update->hard_timeout); nfuf->match_len = htons(match_len); nfuf->table_id = update->table_id; nfuf->cookie = update->cookie; } nfuh = ofpbuf_at_assert(msg, start_ofs, sizeof *nfuh); nfuh->length = htons(msg->size - start_ofs); nfuh->event = htons(update->event); ofpmp_postappend(replies, start_ofs); update_->match.flow.tunnel.metadata.tab = orig_tun_table; } struct ofpbuf * ofputil_encode_packet_out(const struct ofputil_packet_out *po, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *msg; size_t size; size = po->ofpacts_len; if (po->buffer_id == UINT32_MAX) { size += po->packet_len; } switch (ofp_version) { case OFP10_VERSION: { struct ofp10_packet_out *opo; size_t actions_ofs; msg = ofpraw_alloc(OFPRAW_OFPT10_PACKET_OUT, OFP10_VERSION, size); ofpbuf_put_zeros(msg, sizeof *opo); actions_ofs = msg->size; ofpacts_put_openflow_actions(po->ofpacts, po->ofpacts_len, msg, ofp_version); opo = msg->msg; opo->buffer_id = htonl(po->buffer_id); opo->in_port = htons(ofp_to_u16(po->in_port)); opo->actions_len = htons(msg->size - actions_ofs); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_packet_out *opo; size_t len; msg = ofpraw_alloc(OFPRAW_OFPT11_PACKET_OUT, ofp_version, size); ofpbuf_put_zeros(msg, sizeof *opo); len = ofpacts_put_openflow_actions(po->ofpacts, po->ofpacts_len, msg, ofp_version); opo = msg->msg; opo->buffer_id = htonl(po->buffer_id); opo->in_port = ofputil_port_to_ofp11(po->in_port); opo->actions_len = htons(len); break; } default: OVS_NOT_REACHED(); } if (po->buffer_id == UINT32_MAX) { ofpbuf_put(msg, po->packet, po->packet_len); } ofpmsg_update_length(msg); return msg; } /* Creates and returns an OFPT_ECHO_REQUEST message with an empty payload. */ struct ofpbuf * make_echo_request(enum ofp_version ofp_version) { return ofpraw_alloc_xid(OFPRAW_OFPT_ECHO_REQUEST, ofp_version, htonl(0), 0); } /* Creates and returns an OFPT_ECHO_REPLY message matching the * OFPT_ECHO_REQUEST message in 'rq'. */ struct ofpbuf * make_echo_reply(const struct ofp_header *rq) { struct ofpbuf rq_buf = ofpbuf_const_initializer(rq, ntohs(rq->length)); ofpraw_pull_assert(&rq_buf); struct ofpbuf *reply = ofpraw_alloc_reply(OFPRAW_OFPT_ECHO_REPLY, rq, rq_buf.size); ofpbuf_put(reply, rq_buf.data, rq_buf.size); return reply; } struct ofpbuf * ofputil_encode_barrier_request(enum ofp_version ofp_version) { enum ofpraw type; switch (ofp_version) { case OFP16_VERSION: case OFP15_VERSION: case OFP14_VERSION: case OFP13_VERSION: case OFP12_VERSION: case OFP11_VERSION: type = OFPRAW_OFPT11_BARRIER_REQUEST; break; case OFP10_VERSION: type = OFPRAW_OFPT10_BARRIER_REQUEST; break; default: OVS_NOT_REACHED(); } return ofpraw_alloc(type, ofp_version, 0); } const char * ofputil_frag_handling_to_string(enum ofputil_frag_handling frag) { switch (frag) { case OFPUTIL_FRAG_NORMAL: return "normal"; case OFPUTIL_FRAG_DROP: return "drop"; case OFPUTIL_FRAG_REASM: return "reassemble"; case OFPUTIL_FRAG_NX_MATCH: return "nx-match"; } OVS_NOT_REACHED(); } bool ofputil_frag_handling_from_string(const char *s, enum ofputil_frag_handling *frag) { if (!strcasecmp(s, "normal")) { *frag = OFPUTIL_FRAG_NORMAL; } else if (!strcasecmp(s, "drop")) { *frag = OFPUTIL_FRAG_DROP; } else if (!strcasecmp(s, "reassemble")) { *frag = OFPUTIL_FRAG_REASM; } else if (!strcasecmp(s, "nx-match")) { *frag = OFPUTIL_FRAG_NX_MATCH; } else { return false; } return true; } /* Converts the OpenFlow 1.1+ port number 'ofp11_port' into an OpenFlow 1.0 * port number and stores the latter in '*ofp10_port', for the purpose of * decoding OpenFlow 1.1+ protocol messages. Returns 0 if successful, * otherwise an OFPERR_* number. On error, stores OFPP_NONE in '*ofp10_port'. * * See the definition of OFP11_MAX for an explanation of the mapping. */ enum ofperr ofputil_port_from_ofp11(ovs_be32 ofp11_port, ofp_port_t *ofp10_port) { uint32_t ofp11_port_h = ntohl(ofp11_port); if (ofp11_port_h < ofp_to_u16(OFPP_MAX)) { *ofp10_port = u16_to_ofp(ofp11_port_h); return 0; } else if (ofp11_port_h >= ofp11_to_u32(OFPP11_MAX)) { *ofp10_port = u16_to_ofp(ofp11_port_h - OFPP11_OFFSET); return 0; } else { *ofp10_port = OFPP_NONE; VLOG_WARN_RL(&bad_ofmsg_rl, "port %"PRIu32" is outside the supported " "range 0 through %d or 0x%"PRIx32" through 0x%"PRIx32, ofp11_port_h, ofp_to_u16(OFPP_MAX) - 1, ofp11_to_u32(OFPP11_MAX), UINT32_MAX); return OFPERR_OFPBAC_BAD_OUT_PORT; } } /* Returns the OpenFlow 1.1+ port number equivalent to the OpenFlow 1.0 port * number 'ofp10_port', for encoding OpenFlow 1.1+ protocol messages. * * See the definition of OFP11_MAX for an explanation of the mapping. */ ovs_be32 ofputil_port_to_ofp11(ofp_port_t ofp10_port) { return htonl(ofp_to_u16(ofp10_port) < ofp_to_u16(OFPP_MAX) ? ofp_to_u16(ofp10_port) : ofp_to_u16(ofp10_port) + OFPP11_OFFSET); } #define OFPUTIL_NAMED_PORTS \ OFPUTIL_NAMED_PORT(IN_PORT) \ OFPUTIL_NAMED_PORT(TABLE) \ OFPUTIL_NAMED_PORT(NORMAL) \ OFPUTIL_NAMED_PORT(FLOOD) \ OFPUTIL_NAMED_PORT(ALL) \ OFPUTIL_NAMED_PORT(CONTROLLER) \ OFPUTIL_NAMED_PORT(LOCAL) \ OFPUTIL_NAMED_PORT(ANY) \ OFPUTIL_NAMED_PORT(UNSET) /* For backwards compatibility, so that "none" is recognized as OFPP_ANY */ #define OFPUTIL_NAMED_PORTS_WITH_NONE \ OFPUTIL_NAMED_PORTS \ OFPUTIL_NAMED_PORT(NONE) /* Stores the port number represented by 's' into '*portp'. 's' may be an * integer or, for reserved ports, the standard OpenFlow name for the port * (e.g. "LOCAL"). * * Returns true if successful, false if 's' is not a valid OpenFlow port number * or name. The caller should issue an error message in this case, because * this function usually does not. (This gives the caller an opportunity to * look up the port name another way, e.g. by contacting the switch and listing * the names of all its ports). * * This function accepts OpenFlow 1.0 port numbers. It also accepts a subset * of OpenFlow 1.1+ port numbers, mapping those port numbers into the 16-bit * range as described in include/openflow/openflow-1.1.h. */ bool ofputil_port_from_string(const char *s, ofp_port_t *portp) { unsigned int port32; /* int is at least 32 bits wide. */ if (*s == '-') { VLOG_WARN("Negative value %s is not a valid port number.", s); return false; } *portp = 0; if (str_to_uint(s, 10, &port32)) { if (port32 < ofp_to_u16(OFPP_MAX)) { /* Pass. */ } else if (port32 < ofp_to_u16(OFPP_FIRST_RESV)) { VLOG_WARN("port %u is a reserved OF1.0 port number that will " "be translated to %u when talking to an OF1.1 or " "later controller", port32, port32 + OFPP11_OFFSET); } else if (port32 <= ofp_to_u16(OFPP_LAST_RESV)) { char name[OFP_MAX_PORT_NAME_LEN]; ofputil_port_to_string(u16_to_ofp(port32), name, sizeof name); VLOG_WARN_ONCE("referring to port %s as %"PRIu32" is deprecated " "for compatibility with OpenFlow 1.1 and later", name, port32); } else if (port32 < ofp11_to_u32(OFPP11_MAX)) { VLOG_WARN("port %u is outside the supported range 0 through " "%"PRIx16" or 0x%x through 0x%"PRIx32, port32, UINT16_MAX, ofp11_to_u32(OFPP11_MAX), UINT32_MAX); return false; } else { port32 -= OFPP11_OFFSET; } *portp = u16_to_ofp(port32); return true; } else { struct pair { const char *name; ofp_port_t value; }; static const struct pair pairs[] = { #define OFPUTIL_NAMED_PORT(NAME) {#NAME, OFPP_##NAME}, OFPUTIL_NAMED_PORTS_WITH_NONE #undef OFPUTIL_NAMED_PORT }; const struct pair *p; for (p = pairs; p < &pairs[ARRAY_SIZE(pairs)]; p++) { if (!strcasecmp(s, p->name)) { *portp = p->value; return true; } } return false; } } /* Appends to 's' a string representation of the OpenFlow port number 'port'. * Most ports' string representation is just the port number, but for special * ports, e.g. OFPP_LOCAL, it is the name, e.g. "LOCAL". */ void ofputil_format_port(ofp_port_t port, struct ds *s) { char name[OFP_MAX_PORT_NAME_LEN]; ofputil_port_to_string(port, name, sizeof name); ds_put_cstr(s, name); } /* Puts in the 'bufsize' byte in 'namebuf' a null-terminated string * representation of OpenFlow port number 'port'. Most ports are represented * as just the port number, but special ports, e.g. OFPP_LOCAL, are represented * by name, e.g. "LOCAL". */ void ofputil_port_to_string(ofp_port_t port, char namebuf[OFP_MAX_PORT_NAME_LEN], size_t bufsize) { switch (port) { #define OFPUTIL_NAMED_PORT(NAME) \ case OFPP_##NAME: \ ovs_strlcpy(namebuf, #NAME, bufsize); \ break; OFPUTIL_NAMED_PORTS #undef OFPUTIL_NAMED_PORT default: snprintf(namebuf, bufsize, "%"PRIu32, port); break; } } /* Stores the group id represented by 's' into '*group_idp'. 's' may be an * integer or, for reserved group IDs, the standard OpenFlow name for the group * (either "ANY" or "ALL"). * * Returns true if successful, false if 's' is not a valid OpenFlow group ID or * name. */ bool ofputil_group_from_string(const char *s, uint32_t *group_idp) { if (!strcasecmp(s, "any")) { *group_idp = OFPG_ANY; } else if (!strcasecmp(s, "all")) { *group_idp = OFPG_ALL; } else if (!str_to_uint(s, 10, group_idp)) { VLOG_WARN("%s is not a valid group ID. (Valid group IDs are " "32-bit nonnegative integers or the keywords ANY or " "ALL.)", s); return false; } return true; } /* Appends to 's' a string representation of the OpenFlow group ID 'group_id'. * Most groups' string representation is just the number, but for special * groups, e.g. OFPG_ALL, it is the name, e.g. "ALL". */ void ofputil_format_group(uint32_t group_id, struct ds *s) { char name[MAX_GROUP_NAME_LEN]; ofputil_group_to_string(group_id, name, sizeof name); ds_put_cstr(s, name); } /* Puts in the 'bufsize' byte in 'namebuf' a null-terminated string * representation of OpenFlow group ID 'group_id'. Most group are represented * as just their number, but special groups, e.g. OFPG_ALL, are represented * by name, e.g. "ALL". */ void ofputil_group_to_string(uint32_t group_id, char namebuf[MAX_GROUP_NAME_LEN + 1], size_t bufsize) { switch (group_id) { case OFPG_ALL: ovs_strlcpy(namebuf, "ALL", bufsize); break; case OFPG_ANY: ovs_strlcpy(namebuf, "ANY", bufsize); break; default: snprintf(namebuf, bufsize, "%"PRIu32, group_id); break; } } /* Given a buffer 'b' that contains an array of OpenFlow ports of type * 'ofp_version', tries to pull the first element from the array. If * successful, initializes '*pp' with an abstract representation of the * port and returns 0. If no ports remain to be decoded, returns EOF. * On an error, returns a positive OFPERR_* value. */ int ofputil_pull_phy_port(enum ofp_version ofp_version, struct ofpbuf *b, struct ofputil_phy_port *pp) { memset(pp, 0, sizeof *pp); switch (ofp_version) { case OFP10_VERSION: { const struct ofp10_phy_port *opp = ofpbuf_try_pull(b, sizeof *opp); return opp ? ofputil_decode_ofp10_phy_port(pp, opp) : EOF; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { const struct ofp11_port *op = ofpbuf_try_pull(b, sizeof *op); return op ? ofputil_decode_ofp11_port(pp, op) : EOF; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: return b->size ? ofputil_pull_ofp14_port(pp, b) : EOF; default: OVS_NOT_REACHED(); } } static void ofputil_normalize_match__(struct match *match, bool may_log) { enum { MAY_NW_ADDR = 1 << 0, /* nw_src, nw_dst */ MAY_TP_ADDR = 1 << 1, /* tp_src, tp_dst */ MAY_NW_PROTO = 1 << 2, /* nw_proto */ MAY_IPVx = 1 << 3, /* tos, frag, ttl */ MAY_ARP_SHA = 1 << 4, /* arp_sha */ MAY_ARP_THA = 1 << 5, /* arp_tha */ MAY_IPV6 = 1 << 6, /* ipv6_src, ipv6_dst, ipv6_label */ MAY_ND_TARGET = 1 << 7, /* nd_target */ MAY_MPLS = 1 << 8, /* mpls label and tc */ } may_match; struct flow_wildcards wc; /* Figure out what fields may be matched. */ if (match->flow.dl_type == htons(ETH_TYPE_IP)) { may_match = MAY_NW_PROTO | MAY_IPVx | MAY_NW_ADDR; if (match->flow.nw_proto == IPPROTO_TCP || match->flow.nw_proto == IPPROTO_UDP || match->flow.nw_proto == IPPROTO_SCTP || match->flow.nw_proto == IPPROTO_ICMP) { may_match |= MAY_TP_ADDR; } } else if (match->flow.dl_type == htons(ETH_TYPE_IPV6)) { may_match = MAY_NW_PROTO | MAY_IPVx | MAY_IPV6; if (match->flow.nw_proto == IPPROTO_TCP || match->flow.nw_proto == IPPROTO_UDP || match->flow.nw_proto == IPPROTO_SCTP) { may_match |= MAY_TP_ADDR; } else if (match->flow.nw_proto == IPPROTO_ICMPV6) { may_match |= MAY_TP_ADDR; if (match->flow.tp_src == htons(ND_NEIGHBOR_SOLICIT)) { may_match |= MAY_ND_TARGET | MAY_ARP_SHA; } else if (match->flow.tp_src == htons(ND_NEIGHBOR_ADVERT)) { may_match |= MAY_ND_TARGET | MAY_ARP_THA; } } } else if (match->flow.dl_type == htons(ETH_TYPE_ARP) || match->flow.dl_type == htons(ETH_TYPE_RARP)) { may_match = MAY_NW_PROTO | MAY_NW_ADDR | MAY_ARP_SHA | MAY_ARP_THA; } else if (eth_type_mpls(match->flow.dl_type)) { may_match = MAY_MPLS; } else { may_match = 0; } /* Clear the fields that may not be matched. */ wc = match->wc; if (!(may_match & MAY_NW_ADDR)) { wc.masks.nw_src = wc.masks.nw_dst = htonl(0); } if (!(may_match & MAY_TP_ADDR)) { wc.masks.tp_src = wc.masks.tp_dst = htons(0); } if (!(may_match & MAY_NW_PROTO)) { wc.masks.nw_proto = 0; } if (!(may_match & MAY_IPVx)) { wc.masks.nw_tos = 0; wc.masks.nw_ttl = 0; } if (!(may_match & MAY_ARP_SHA)) { WC_UNMASK_FIELD(&wc, arp_sha); } if (!(may_match & MAY_ARP_THA)) { WC_UNMASK_FIELD(&wc, arp_tha); } if (!(may_match & MAY_IPV6)) { wc.masks.ipv6_src = wc.masks.ipv6_dst = in6addr_any; wc.masks.ipv6_label = htonl(0); } if (!(may_match & MAY_ND_TARGET)) { wc.masks.nd_target = in6addr_any; } if (!(may_match & MAY_MPLS)) { memset(wc.masks.mpls_lse, 0, sizeof wc.masks.mpls_lse); } /* Log any changes. */ if (!flow_wildcards_equal(&wc, &match->wc)) { bool log = may_log && !VLOG_DROP_INFO(&bad_ofmsg_rl); char *pre = log ? match_to_string(match, OFP_DEFAULT_PRIORITY) : NULL; match->wc = wc; match_zero_wildcarded_fields(match); if (log) { char *post = match_to_string(match, OFP_DEFAULT_PRIORITY); VLOG_INFO("normalization changed ofp_match, details:"); VLOG_INFO(" pre: %s", pre); VLOG_INFO("post: %s", post); free(pre); free(post); } } } /* "Normalizes" the wildcards in 'match'. That means: * * 1. If the type of level N is known, then only the valid fields for that * level may be specified. For example, ARP does not have a TOS field, * so nw_tos must be wildcarded if 'match' specifies an ARP flow. * Similarly, IPv4 does not have any IPv6 addresses, so ipv6_src and * ipv6_dst (and other fields) must be wildcarded if 'match' specifies an * IPv4 flow. * * 2. If the type of level N is not known (or not understood by Open * vSwitch), then no fields at all for that level may be specified. For * example, Open vSwitch does not understand SCTP, an L4 protocol, so the * L4 fields tp_src and tp_dst must be wildcarded if 'match' specifies an * SCTP flow. * * If this function changes 'match', it logs a rate-limited informational * message. */ void ofputil_normalize_match(struct match *match) { ofputil_normalize_match__(match, true); } /* Same as ofputil_normalize_match() without the logging. Thus, this function * is suitable for a program's internal use, whereas ofputil_normalize_match() * sense for use on flows received from elsewhere (so that a bug in the program * that sent them can be reported and corrected). */ void ofputil_normalize_match_quiet(struct match *match) { ofputil_normalize_match__(match, false); } static size_t parse_value(const char *s, const char *delimiters) { size_t n = 0; /* Iterate until we reach a delimiter. * * strchr(s, '\0') returns s+strlen(s), so this test handles the null * terminator at the end of 's'. */ while (!strchr(delimiters, s[n])) { if (s[n] == '(') { int level = 0; do { switch (s[n]) { case '\0': return n; case '(': level++; break; case ')': level--; break; } n++; } while (level > 0); } else { n++; } } return n; } /* Parses a key or a key-value pair from '*stringp'. * * On success: Stores the key into '*keyp'. Stores the value, if present, into * '*valuep', otherwise an empty string. Advances '*stringp' past the end of * the key-value pair, preparing it for another call. '*keyp' and '*valuep' * are substrings of '*stringp' created by replacing some of its bytes by null * terminators. Returns true. * * If '*stringp' is just white space or commas, sets '*keyp' and '*valuep' to * NULL and returns false. */ bool ofputil_parse_key_value(char **stringp, char **keyp, char **valuep) { /* Skip white space and delimiters. If that brings us to the end of the * input string, we are done and there are no more key-value pairs. */ *stringp += strspn(*stringp, ", \t\r\n"); if (**stringp == '\0') { *keyp = *valuep = NULL; return false; } /* Extract the key and the delimiter that ends the key-value pair or begins * the value. Advance the input position past the key and delimiter. */ char *key = *stringp; size_t key_len = strcspn(key, ":=(, \t\r\n"); char key_delim = key[key_len]; key[key_len] = '\0'; *stringp += key_len + (key_delim != '\0'); /* Figure out what delimiter ends the value: * * - If key_delim is ":" or "=", the value extends until white space * or a comma. * * - If key_delim is "(", the value extends until ")". * * If there is no value, we are done. */ const char *value_delims; if (key_delim == ':' || key_delim == '=') { value_delims = ", \t\r\n"; } else if (key_delim == '(') { value_delims = ")"; } else { *keyp = key; *valuep = key + key_len; /* Empty string. */ return true; } /* Extract the value. Advance the input position past the value and * delimiter. */ char *value = *stringp; size_t value_len = parse_value(value, value_delims); char value_delim = value[value_len]; value[value_len] = '\0'; *stringp += value_len + (value_delim != '\0'); *keyp = key; *valuep = value; return true; } /* Encode a dump ports request for 'port', the encoded message * will be for OpenFlow version 'ofp_version'. Returns message * as a struct ofpbuf. Returns encoded message on success, NULL on error */ struct ofpbuf * ofputil_encode_dump_ports_request(enum ofp_version ofp_version, ofp_port_t port) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: { struct ofp10_port_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST10_PORT_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = htons(ofp_to_u16(port)); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_port_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST11_PORT_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = ofputil_port_to_ofp11(port); break; } default: OVS_NOT_REACHED(); } return request; } static void ofputil_port_stats_to_ofp10(const struct ofputil_port_stats *ops, struct ofp10_port_stats *ps10) { ps10->port_no = htons(ofp_to_u16(ops->port_no)); memset(ps10->pad, 0, sizeof ps10->pad); put_32aligned_be64(&ps10->rx_packets, htonll(ops->stats.rx_packets)); put_32aligned_be64(&ps10->tx_packets, htonll(ops->stats.tx_packets)); put_32aligned_be64(&ps10->rx_bytes, htonll(ops->stats.rx_bytes)); put_32aligned_be64(&ps10->tx_bytes, htonll(ops->stats.tx_bytes)); put_32aligned_be64(&ps10->rx_dropped, htonll(ops->stats.rx_dropped)); put_32aligned_be64(&ps10->tx_dropped, htonll(ops->stats.tx_dropped)); put_32aligned_be64(&ps10->rx_errors, htonll(ops->stats.rx_errors)); put_32aligned_be64(&ps10->tx_errors, htonll(ops->stats.tx_errors)); put_32aligned_be64(&ps10->rx_frame_err, htonll(ops->stats.rx_frame_errors)); put_32aligned_be64(&ps10->rx_over_err, htonll(ops->stats.rx_over_errors)); put_32aligned_be64(&ps10->rx_crc_err, htonll(ops->stats.rx_crc_errors)); put_32aligned_be64(&ps10->collisions, htonll(ops->stats.collisions)); } static void ofputil_port_stats_to_ofp11(const struct ofputil_port_stats *ops, struct ofp11_port_stats *ps11) { ps11->port_no = ofputil_port_to_ofp11(ops->port_no); memset(ps11->pad, 0, sizeof ps11->pad); ps11->rx_packets = htonll(ops->stats.rx_packets); ps11->tx_packets = htonll(ops->stats.tx_packets); ps11->rx_bytes = htonll(ops->stats.rx_bytes); ps11->tx_bytes = htonll(ops->stats.tx_bytes); ps11->rx_dropped = htonll(ops->stats.rx_dropped); ps11->tx_dropped = htonll(ops->stats.tx_dropped); ps11->rx_errors = htonll(ops->stats.rx_errors); ps11->tx_errors = htonll(ops->stats.tx_errors); ps11->rx_frame_err = htonll(ops->stats.rx_frame_errors); ps11->rx_over_err = htonll(ops->stats.rx_over_errors); ps11->rx_crc_err = htonll(ops->stats.rx_crc_errors); ps11->collisions = htonll(ops->stats.collisions); } static void ofputil_port_stats_to_ofp13(const struct ofputil_port_stats *ops, struct ofp13_port_stats *ps13) { ofputil_port_stats_to_ofp11(ops, &ps13->ps); ps13->duration_sec = htonl(ops->duration_sec); ps13->duration_nsec = htonl(ops->duration_nsec); } static void ofputil_append_ofp14_port_stats(const struct ofputil_port_stats *ops, struct ovs_list *replies) { struct ofp14_port_stats_prop_ethernet *eth; struct intel_port_stats_rfc2819 *stats_rfc2819; struct ofp14_port_stats *ps14; struct ofpbuf *reply; reply = ofpmp_reserve(replies, sizeof *ps14 + sizeof *eth + sizeof *stats_rfc2819); ps14 = ofpbuf_put_uninit(reply, sizeof *ps14); ps14->length = htons(sizeof *ps14 + sizeof *eth + sizeof *stats_rfc2819); memset(ps14->pad, 0, sizeof ps14->pad); ps14->port_no = ofputil_port_to_ofp11(ops->port_no); ps14->duration_sec = htonl(ops->duration_sec); ps14->duration_nsec = htonl(ops->duration_nsec); ps14->rx_packets = htonll(ops->stats.rx_packets); ps14->tx_packets = htonll(ops->stats.tx_packets); ps14->rx_bytes = htonll(ops->stats.rx_bytes); ps14->tx_bytes = htonll(ops->stats.tx_bytes); ps14->rx_dropped = htonll(ops->stats.rx_dropped); ps14->tx_dropped = htonll(ops->stats.tx_dropped); ps14->rx_errors = htonll(ops->stats.rx_errors); ps14->tx_errors = htonll(ops->stats.tx_errors); eth = ofpprop_put_zeros(reply, OFPPSPT14_ETHERNET, sizeof *eth); eth->rx_frame_err = htonll(ops->stats.rx_frame_errors); eth->rx_over_err = htonll(ops->stats.rx_over_errors); eth->rx_crc_err = htonll(ops->stats.rx_crc_errors); eth->collisions = htonll(ops->stats.collisions); uint64_t prop_type = OFPPROP_EXP(INTEL_VENDOR_ID, INTEL_PORT_STATS_RFC2819); stats_rfc2819 = ofpprop_put_zeros(reply, prop_type, sizeof *stats_rfc2819); memset(stats_rfc2819->pad, 0, sizeof stats_rfc2819->pad); stats_rfc2819->rx_1_to_64_packets = htonll(ops->stats.rx_1_to_64_packets); stats_rfc2819->rx_65_to_127_packets = htonll(ops->stats.rx_65_to_127_packets); stats_rfc2819->rx_128_to_255_packets = htonll(ops->stats.rx_128_to_255_packets); stats_rfc2819->rx_256_to_511_packets = htonll(ops->stats.rx_256_to_511_packets); stats_rfc2819->rx_512_to_1023_packets = htonll(ops->stats.rx_512_to_1023_packets); stats_rfc2819->rx_1024_to_1522_packets = htonll(ops->stats.rx_1024_to_1522_packets); stats_rfc2819->rx_1523_to_max_packets = htonll(ops->stats.rx_1523_to_max_packets); stats_rfc2819->tx_1_to_64_packets = htonll(ops->stats.tx_1_to_64_packets); stats_rfc2819->tx_65_to_127_packets = htonll(ops->stats.tx_65_to_127_packets); stats_rfc2819->tx_128_to_255_packets = htonll(ops->stats.tx_128_to_255_packets); stats_rfc2819->tx_256_to_511_packets = htonll(ops->stats.tx_256_to_511_packets); stats_rfc2819->tx_512_to_1023_packets = htonll(ops->stats.tx_512_to_1023_packets); stats_rfc2819->tx_1024_to_1522_packets = htonll(ops->stats.tx_1024_to_1522_packets); stats_rfc2819->tx_1523_to_max_packets = htonll(ops->stats.tx_1523_to_max_packets); stats_rfc2819->tx_multicast_packets = htonll(ops->stats.tx_multicast_packets); stats_rfc2819->rx_broadcast_packets = htonll(ops->stats.rx_broadcast_packets); stats_rfc2819->tx_broadcast_packets = htonll(ops->stats.tx_broadcast_packets); stats_rfc2819->rx_undersized_errors = htonll(ops->stats.rx_undersized_errors); stats_rfc2819->rx_oversize_errors = htonll(ops->stats.rx_oversize_errors); stats_rfc2819->rx_fragmented_errors = htonll(ops->stats.rx_fragmented_errors); stats_rfc2819->rx_jabber_errors = htonll(ops->stats.rx_jabber_errors); } /* Encode a ports stat for 'ops' and append it to 'replies'. */ void ofputil_append_port_stat(struct ovs_list *replies, const struct ofputil_port_stats *ops) { switch (ofpmp_version(replies)) { case OFP13_VERSION: { struct ofp13_port_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_port_stats_to_ofp13(ops, reply); break; } case OFP12_VERSION: case OFP11_VERSION: { struct ofp11_port_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_port_stats_to_ofp11(ops, reply); break; } case OFP10_VERSION: { struct ofp10_port_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_port_stats_to_ofp10(ops, reply); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: ofputil_append_ofp14_port_stats(ops, replies); break; default: OVS_NOT_REACHED(); } } static enum ofperr ofputil_port_stats_from_ofp10(struct ofputil_port_stats *ops, const struct ofp10_port_stats *ps10) { ops->port_no = u16_to_ofp(ntohs(ps10->port_no)); ops->stats.rx_packets = ntohll(get_32aligned_be64(&ps10->rx_packets)); ops->stats.tx_packets = ntohll(get_32aligned_be64(&ps10->tx_packets)); ops->stats.rx_bytes = ntohll(get_32aligned_be64(&ps10->rx_bytes)); ops->stats.tx_bytes = ntohll(get_32aligned_be64(&ps10->tx_bytes)); ops->stats.rx_dropped = ntohll(get_32aligned_be64(&ps10->rx_dropped)); ops->stats.tx_dropped = ntohll(get_32aligned_be64(&ps10->tx_dropped)); ops->stats.rx_errors = ntohll(get_32aligned_be64(&ps10->rx_errors)); ops->stats.tx_errors = ntohll(get_32aligned_be64(&ps10->tx_errors)); ops->stats.rx_frame_errors = ntohll(get_32aligned_be64(&ps10->rx_frame_err)); ops->stats.rx_over_errors = ntohll(get_32aligned_be64(&ps10->rx_over_err)); ops->stats.rx_crc_errors = ntohll(get_32aligned_be64(&ps10->rx_crc_err)); ops->stats.collisions = ntohll(get_32aligned_be64(&ps10->collisions)); ops->duration_sec = ops->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_port_stats_from_ofp11(struct ofputil_port_stats *ops, const struct ofp11_port_stats *ps11) { enum ofperr error; error = ofputil_port_from_ofp11(ps11->port_no, &ops->port_no); if (error) { return error; } ops->stats.rx_packets = ntohll(ps11->rx_packets); ops->stats.tx_packets = ntohll(ps11->tx_packets); ops->stats.rx_bytes = ntohll(ps11->rx_bytes); ops->stats.tx_bytes = ntohll(ps11->tx_bytes); ops->stats.rx_dropped = ntohll(ps11->rx_dropped); ops->stats.tx_dropped = ntohll(ps11->tx_dropped); ops->stats.rx_errors = ntohll(ps11->rx_errors); ops->stats.tx_errors = ntohll(ps11->tx_errors); ops->stats.rx_frame_errors = ntohll(ps11->rx_frame_err); ops->stats.rx_over_errors = ntohll(ps11->rx_over_err); ops->stats.rx_crc_errors = ntohll(ps11->rx_crc_err); ops->stats.collisions = ntohll(ps11->collisions); ops->duration_sec = ops->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_port_stats_from_ofp13(struct ofputil_port_stats *ops, const struct ofp13_port_stats *ps13) { enum ofperr error = ofputil_port_stats_from_ofp11(ops, &ps13->ps); if (!error) { ops->duration_sec = ntohl(ps13->duration_sec); ops->duration_nsec = ntohl(ps13->duration_nsec); } return error; } static enum ofperr parse_ofp14_port_stats_ethernet_property(const struct ofpbuf *payload, struct ofputil_port_stats *ops) { const struct ofp14_port_stats_prop_ethernet *eth = payload->data; if (payload->size != sizeof *eth) { return OFPERR_OFPBPC_BAD_LEN; } ops->stats.rx_frame_errors = ntohll(eth->rx_frame_err); ops->stats.rx_over_errors = ntohll(eth->rx_over_err); ops->stats.rx_crc_errors = ntohll(eth->rx_crc_err); ops->stats.collisions = ntohll(eth->collisions); return 0; } static enum ofperr parse_intel_port_stats_rfc2819_property(const struct ofpbuf *payload, struct ofputil_port_stats *ops) { const struct intel_port_stats_rfc2819 *rfc2819 = payload->data; if (payload->size != sizeof *rfc2819) { return OFPERR_OFPBPC_BAD_LEN; } ops->stats.rx_1_to_64_packets = ntohll(rfc2819->rx_1_to_64_packets); ops->stats.rx_65_to_127_packets = ntohll(rfc2819->rx_65_to_127_packets); ops->stats.rx_128_to_255_packets = ntohll(rfc2819->rx_128_to_255_packets); ops->stats.rx_256_to_511_packets = ntohll(rfc2819->rx_256_to_511_packets); ops->stats.rx_512_to_1023_packets = ntohll(rfc2819->rx_512_to_1023_packets); ops->stats.rx_1024_to_1522_packets = ntohll(rfc2819->rx_1024_to_1522_packets); ops->stats.rx_1523_to_max_packets = ntohll(rfc2819->rx_1523_to_max_packets); ops->stats.tx_1_to_64_packets = ntohll(rfc2819->tx_1_to_64_packets); ops->stats.tx_65_to_127_packets = ntohll(rfc2819->tx_65_to_127_packets); ops->stats.tx_128_to_255_packets = ntohll(rfc2819->tx_128_to_255_packets); ops->stats.tx_256_to_511_packets = ntohll(rfc2819->tx_256_to_511_packets); ops->stats.tx_512_to_1023_packets = ntohll(rfc2819->tx_512_to_1023_packets); ops->stats.tx_1024_to_1522_packets = ntohll(rfc2819->tx_1024_to_1522_packets); ops->stats.tx_1523_to_max_packets = ntohll(rfc2819->tx_1523_to_max_packets); ops->stats.tx_multicast_packets = ntohll(rfc2819->tx_multicast_packets); ops->stats.rx_broadcast_packets = ntohll(rfc2819->rx_broadcast_packets); ops->stats.tx_broadcast_packets = ntohll(rfc2819->tx_broadcast_packets); ops->stats.rx_undersized_errors = ntohll(rfc2819->rx_undersized_errors); ops->stats.rx_oversize_errors = ntohll(rfc2819->rx_oversize_errors); ops->stats.rx_fragmented_errors = ntohll(rfc2819->rx_fragmented_errors); ops->stats.rx_jabber_errors = ntohll(rfc2819->rx_jabber_errors); return 0; } static enum ofperr parse_intel_port_stats_property(const struct ofpbuf *payload, uint32_t exp_type, struct ofputil_port_stats *ops) { enum ofperr error; switch (exp_type) { case INTEL_PORT_STATS_RFC2819: error = parse_intel_port_stats_rfc2819_property(payload, ops); break; default: error = OFPERR_OFPBPC_BAD_EXP_TYPE; break; } return error; } static enum ofperr ofputil_pull_ofp14_port_stats(struct ofputil_port_stats *ops, struct ofpbuf *msg) { const struct ofp14_port_stats *ps14 = ofpbuf_try_pull(msg, sizeof *ps14); if (!ps14) { return OFPERR_OFPBRC_BAD_LEN; } size_t len = ntohs(ps14->length); if (len < sizeof *ps14 || len - sizeof *ps14 > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } len -= sizeof *ps14; enum ofperr error = ofputil_port_from_ofp11(ps14->port_no, &ops->port_no); if (error) { return error; } ops->duration_sec = ntohl(ps14->duration_sec); ops->duration_nsec = ntohl(ps14->duration_nsec); ops->stats.rx_packets = ntohll(ps14->rx_packets); ops->stats.tx_packets = ntohll(ps14->tx_packets); ops->stats.rx_bytes = ntohll(ps14->rx_bytes); ops->stats.tx_bytes = ntohll(ps14->tx_bytes); ops->stats.rx_dropped = ntohll(ps14->rx_dropped); ops->stats.tx_dropped = ntohll(ps14->tx_dropped); ops->stats.rx_errors = ntohll(ps14->rx_errors); ops->stats.tx_errors = ntohll(ps14->tx_errors); struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type = 0; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPPSPT14_ETHERNET: error = parse_ofp14_port_stats_ethernet_property(&payload, ops); break; case OFPPROP_EXP(INTEL_VENDOR_ID, INTEL_PORT_STATS_RFC2819): error = parse_intel_port_stats_property(&payload, INTEL_PORT_STATS_RFC2819, ops); break; default: error = OFPPROP_UNKNOWN(true, "port stats", type); break; } if (error) { return error; } } return 0; } /* Returns the number of port stats elements in OFPTYPE_PORT_STATS_REPLY * message 'oh'. */ size_t ofputil_count_port_stats(const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); for (size_t n = 0; ; n++) { struct ofputil_port_stats ps; if (ofputil_decode_port_stats(&ps, &b)) { return n; } } } /* Converts an OFPST_PORT_STATS reply in 'msg' into an abstract * ofputil_port_stats in 'ps'. * * Multiple OFPST_PORT_STATS replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. The caller must initially leave 'msg''s layer pointers * null and not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_port_stats(struct ofputil_port_stats *ps, struct ofpbuf *msg) { enum ofperr error; enum ofpraw raw; memset(&(ps->stats), 0xFF, sizeof (ps->stats)); error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST14_PORT_REPLY) { return ofputil_pull_ofp14_port_stats(ps, msg); } else if (raw == OFPRAW_OFPST13_PORT_REPLY) { const struct ofp13_port_stats *ps13; ps13 = ofpbuf_try_pull(msg, sizeof *ps13); if (!ps13) { goto bad_len; } return ofputil_port_stats_from_ofp13(ps, ps13); } else if (raw == OFPRAW_OFPST11_PORT_REPLY) { const struct ofp11_port_stats *ps11; ps11 = ofpbuf_try_pull(msg, sizeof *ps11); if (!ps11) { goto bad_len; } return ofputil_port_stats_from_ofp11(ps, ps11); } else if (raw == OFPRAW_OFPST10_PORT_REPLY) { const struct ofp10_port_stats *ps10; ps10 = ofpbuf_try_pull(msg, sizeof *ps10); if (!ps10) { goto bad_len; } return ofputil_port_stats_from_ofp10(ps, ps10); } else { OVS_NOT_REACHED(); } bad_len: VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_PORT reply has %"PRIu32" leftover " "bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } /* Parse a port status request message into a 16 bit OpenFlow 1.0 * port number and stores the latter in '*ofp10_port'. * Returns 0 if successful, otherwise an OFPERR_* number. */ enum ofperr ofputil_decode_port_stats_request(const struct ofp_header *request, ofp_port_t *ofp10_port) { switch ((enum ofp_version)request->version) { case OFP16_VERSION: case OFP15_VERSION: case OFP14_VERSION: case OFP13_VERSION: case OFP12_VERSION: case OFP11_VERSION: { const struct ofp11_port_stats_request *psr11 = ofpmsg_body(request); return ofputil_port_from_ofp11(psr11->port_no, ofp10_port); } case OFP10_VERSION: { const struct ofp10_port_stats_request *psr10 = ofpmsg_body(request); *ofp10_port = u16_to_ofp(ntohs(psr10->port_no)); return 0; } default: OVS_NOT_REACHED(); } } static void ofputil_ipfix_stats_to_reply(const struct ofputil_ipfix_stats *ois, struct nx_ipfix_stats_reply *reply) { reply->collector_set_id = htonl(ois->collector_set_id); reply->total_flows = htonll(ois->total_flows); reply->current_flows = htonll(ois->current_flows); reply->pkts = htonll(ois->pkts); reply->ipv4_pkts = htonll(ois->ipv4_pkts); reply->ipv6_pkts = htonll(ois->ipv6_pkts); reply->error_pkts = htonll(ois->error_pkts); reply->ipv4_error_pkts = htonll(ois->ipv4_error_pkts); reply->ipv6_error_pkts = htonll(ois->ipv6_error_pkts); reply->tx_pkts = htonll(ois->tx_pkts); reply->tx_errors = htonll(ois->tx_errors); memset(reply->pad, 0, sizeof reply->pad); } /* Encode a ipfix stat for 'ois' and append it to 'replies'. */ void ofputil_append_ipfix_stat(struct ovs_list *replies, const struct ofputil_ipfix_stats *ois) { struct nx_ipfix_stats_reply *reply = ofpmp_append(replies, sizeof *reply); ofputil_ipfix_stats_to_reply(ois, reply); } static enum ofperr ofputil_ipfix_stats_from_nx(struct ofputil_ipfix_stats *is, const struct nx_ipfix_stats_reply *reply) { is->collector_set_id = ntohl(reply->collector_set_id); is->total_flows = ntohll(reply->total_flows); is->current_flows = ntohll(reply->current_flows); is->pkts = ntohll(reply->pkts); is->ipv4_pkts = ntohll(reply->ipv4_pkts); is->ipv6_pkts = ntohll(reply->ipv6_pkts); is->error_pkts = ntohll(reply->error_pkts); is->ipv4_error_pkts = ntohll(reply->ipv4_error_pkts); is->ipv6_error_pkts = ntohll(reply->ipv6_error_pkts); is->tx_pkts = ntohll(reply->tx_pkts); is->tx_errors = ntohll(reply->tx_errors); return 0; } int ofputil_pull_ipfix_stats(struct ofputil_ipfix_stats *is, struct ofpbuf *msg) { enum ofperr error; enum ofpraw raw; memset(is, 0xFF, sizeof (*is)); error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } else if (raw == OFPRAW_NXST_IPFIX_BRIDGE_REPLY || raw == OFPRAW_NXST_IPFIX_FLOW_REPLY) { struct nx_ipfix_stats_reply *reply; reply = ofpbuf_try_pull(msg, sizeof *reply); return ofputil_ipfix_stats_from_nx(is, reply); } else { OVS_NOT_REACHED(); } } /* Returns the number of ipfix stats elements in * OFPTYPE_IPFIX_BRIDGE_STATS_REPLY or OFPTYPE_IPFIX_FLOW_STATS_REPLY * message 'oh'. */ size_t ofputil_count_ipfix_stats(const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); return b.size / sizeof(struct ofputil_ipfix_stats); } /* Frees all of the "struct ofputil_bucket"s in the 'buckets' list. */ void ofputil_bucket_list_destroy(struct ovs_list *buckets) { struct ofputil_bucket *bucket; LIST_FOR_EACH_POP (bucket, list_node, buckets) { free(bucket->ofpacts); free(bucket); } } /* Clones 'bucket' and its ofpacts data */ static struct ofputil_bucket * ofputil_bucket_clone_data(const struct ofputil_bucket *bucket) { struct ofputil_bucket *new; new = xmemdup(bucket, sizeof *bucket); new->ofpacts = xmemdup(bucket->ofpacts, bucket->ofpacts_len); return new; } /* Clones each of the buckets in the list 'src' appending them * in turn to 'dest' which should be an initialised list. * An exception is that if the pointer value of a bucket in 'src' * matches 'skip' then it is not cloned or appended to 'dest'. * This allows all of 'src' or 'all of 'src' except 'skip' to * be cloned and appended to 'dest'. */ void ofputil_bucket_clone_list(struct ovs_list *dest, const struct ovs_list *src, const struct ofputil_bucket *skip) { struct ofputil_bucket *bucket; LIST_FOR_EACH (bucket, list_node, src) { struct ofputil_bucket *new_bucket; if (bucket == skip) { continue; } new_bucket = ofputil_bucket_clone_data(bucket); ovs_list_push_back(dest, &new_bucket->list_node); } } /* Find a bucket in the list 'buckets' whose bucket id is 'bucket_id' * Returns the first bucket found or NULL if no buckets are found. */ struct ofputil_bucket * ofputil_bucket_find(const struct ovs_list *buckets, uint32_t bucket_id) { struct ofputil_bucket *bucket; if (bucket_id > OFPG15_BUCKET_MAX) { return NULL; } LIST_FOR_EACH (bucket, list_node, buckets) { if (bucket->bucket_id == bucket_id) { return bucket; } } return NULL; } /* Returns true if more than one bucket in the list 'buckets' * have the same bucket id. Returns false otherwise. */ bool ofputil_bucket_check_duplicate_id(const struct ovs_list *buckets) { struct ofputil_bucket *i, *j; LIST_FOR_EACH (i, list_node, buckets) { LIST_FOR_EACH_REVERSE (j, list_node, buckets) { if (i == j) { break; } if (i->bucket_id == j->bucket_id) { return true; } } } return false; } /* Returns the bucket at the front of the list 'buckets'. * Undefined if 'buckets is empty. */ struct ofputil_bucket * ofputil_bucket_list_front(const struct ovs_list *buckets) { static struct ofputil_bucket *bucket; ASSIGN_CONTAINER(bucket, ovs_list_front(buckets), list_node); return bucket; } /* Returns the bucket at the back of the list 'buckets'. * Undefined if 'buckets is empty. */ struct ofputil_bucket * ofputil_bucket_list_back(const struct ovs_list *buckets) { static struct ofputil_bucket *bucket; ASSIGN_CONTAINER(bucket, ovs_list_back(buckets), list_node); return bucket; } /* Returns an OpenFlow group stats request for OpenFlow version 'ofp_version', * that requests stats for group 'group_id'. (Use OFPG_ALL to request stats * for all groups.) * * Group statistics include packet and byte counts for each group. */ struct ofpbuf * ofputil_encode_group_stats_request(enum ofp_version ofp_version, uint32_t group_id) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: ovs_fatal(0, "dump-group-stats needs OpenFlow 1.1 or later " "(\'-O OpenFlow11\')"); case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_group_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST11_GROUP_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->group_id = htonl(group_id); break; } default: OVS_NOT_REACHED(); } return request; } void ofputil_uninit_group_desc(struct ofputil_group_desc *gd) { ofputil_bucket_list_destroy(&gd->buckets); ofputil_group_properties_destroy(&gd->props); } /* Decodes the OpenFlow group description request in 'oh', returning the group * whose description is requested, or OFPG_ALL if stats for all groups was * requested. */ uint32_t ofputil_decode_group_desc_request(const struct ofp_header *oh) { struct ofpbuf request = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&request); if (raw == OFPRAW_OFPST11_GROUP_DESC_REQUEST) { return OFPG_ALL; } else if (raw == OFPRAW_OFPST15_GROUP_DESC_REQUEST) { ovs_be32 *group_id = ofpbuf_pull(&request, sizeof *group_id); return ntohl(*group_id); } else { OVS_NOT_REACHED(); } } /* Returns an OpenFlow group description request for OpenFlow version * 'ofp_version', that requests stats for group 'group_id'. Use OFPG_ALL to * request stats for all groups (OpenFlow 1.4 and earlier always request all * groups). * * Group descriptions include the bucket and action configuration for each * group. */ struct ofpbuf * ofputil_encode_group_desc_request(enum ofp_version ofp_version, uint32_t group_id) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: ovs_fatal(0, "dump-groups needs OpenFlow 1.1 or later " "(\'-O OpenFlow11\')"); case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: request = ofpraw_alloc(OFPRAW_OFPST11_GROUP_DESC_REQUEST, ofp_version, 0); break; case OFP15_VERSION: case OFP16_VERSION: { struct ofp15_group_desc_request *req; request = ofpraw_alloc(OFPRAW_OFPST15_GROUP_DESC_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->group_id = htonl(group_id); break; } default: OVS_NOT_REACHED(); } return request; } static void ofputil_group_bucket_counters_to_ofp11(const struct ofputil_group_stats *gs, struct ofp11_bucket_counter bucket_cnts[]) { int i; for (i = 0; i < gs->n_buckets; i++) { bucket_cnts[i].packet_count = htonll(gs->bucket_stats[i].packet_count); bucket_cnts[i].byte_count = htonll(gs->bucket_stats[i].byte_count); } } static void ofputil_group_stats_to_ofp11(const struct ofputil_group_stats *gs, struct ofp11_group_stats *gs11, size_t length, struct ofp11_bucket_counter bucket_cnts[]) { memset(gs11, 0, sizeof *gs11); gs11->length = htons(length); gs11->group_id = htonl(gs->group_id); gs11->ref_count = htonl(gs->ref_count); gs11->packet_count = htonll(gs->packet_count); gs11->byte_count = htonll(gs->byte_count); ofputil_group_bucket_counters_to_ofp11(gs, bucket_cnts); } static void ofputil_group_stats_to_ofp13(const struct ofputil_group_stats *gs, struct ofp13_group_stats *gs13, size_t length, struct ofp11_bucket_counter bucket_cnts[]) { ofputil_group_stats_to_ofp11(gs, &gs13->gs, length, bucket_cnts); gs13->duration_sec = htonl(gs->duration_sec); gs13->duration_nsec = htonl(gs->duration_nsec); } /* Encodes 'gs' properly for the format of the list of group statistics * replies already begun in 'replies' and appends it to the list. 'replies' * must have originally been initialized with ofpmp_init(). */ void ofputil_append_group_stats(struct ovs_list *replies, const struct ofputil_group_stats *gs) { size_t bucket_counter_size; struct ofp11_bucket_counter *bucket_counters; size_t length; bucket_counter_size = gs->n_buckets * sizeof(struct ofp11_bucket_counter); switch (ofpmp_version(replies)) { case OFP11_VERSION: case OFP12_VERSION:{ struct ofp11_group_stats *gs11; length = sizeof *gs11 + bucket_counter_size; gs11 = ofpmp_append(replies, length); bucket_counters = (struct ofp11_bucket_counter *)(gs11 + 1); ofputil_group_stats_to_ofp11(gs, gs11, length, bucket_counters); break; } case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp13_group_stats *gs13; length = sizeof *gs13 + bucket_counter_size; gs13 = ofpmp_append(replies, length); bucket_counters = (struct ofp11_bucket_counter *)(gs13 + 1); ofputil_group_stats_to_ofp13(gs, gs13, length, bucket_counters); break; } case OFP10_VERSION: default: OVS_NOT_REACHED(); } } /* Returns an OpenFlow group features request for OpenFlow version * 'ofp_version'. */ struct ofpbuf * ofputil_encode_group_features_request(enum ofp_version ofp_version) { struct ofpbuf *request = NULL; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: ovs_fatal(0, "dump-group-features needs OpenFlow 1.2 or later " "(\'-O OpenFlow12\')"); case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: request = ofpraw_alloc(OFPRAW_OFPST12_GROUP_FEATURES_REQUEST, ofp_version, 0); break; default: OVS_NOT_REACHED(); } return request; } /* Returns a OpenFlow message that encodes 'features' properly as a reply to * group features request 'request'. */ struct ofpbuf * ofputil_encode_group_features_reply( const struct ofputil_group_features *features, const struct ofp_header *request) { struct ofp12_group_features_stats *ogf; struct ofpbuf *reply; int i; reply = ofpraw_alloc_xid(OFPRAW_OFPST12_GROUP_FEATURES_REPLY, request->version, request->xid, 0); ogf = ofpbuf_put_zeros(reply, sizeof *ogf); ogf->types = htonl(features->types); ogf->capabilities = htonl(features->capabilities); for (i = 0; i < OFPGT12_N_TYPES; i++) { ogf->max_groups[i] = htonl(features->max_groups[i]); ogf->actions[i] = ofpact_bitmap_to_openflow(features->ofpacts[i], request->version); } return reply; } /* Decodes group features reply 'oh' into 'features'. */ void ofputil_decode_group_features_reply(const struct ofp_header *oh, struct ofputil_group_features *features) { const struct ofp12_group_features_stats *ogf = ofpmsg_body(oh); int i; features->types = ntohl(ogf->types); features->capabilities = ntohl(ogf->capabilities); for (i = 0; i < OFPGT12_N_TYPES; i++) { features->max_groups[i] = ntohl(ogf->max_groups[i]); features->ofpacts[i] = ofpact_bitmap_from_openflow( ogf->actions[i], oh->version); } } /* Parse a group status request message into a 32 bit OpenFlow 1.1 * group ID and stores the latter in '*group_id'. * Returns 0 if successful, otherwise an OFPERR_* number. */ enum ofperr ofputil_decode_group_stats_request(const struct ofp_header *request, uint32_t *group_id) { const struct ofp11_group_stats_request *gsr11 = ofpmsg_body(request); *group_id = ntohl(gsr11->group_id); return 0; } /* Converts a group stats reply in 'msg' into an abstract ofputil_group_stats * in 'gs'. Assigns freshly allocated memory to gs->bucket_stats for the * caller to eventually free. * * Multiple group stats replies can be packed into a single OpenFlow message. * Calling this function multiple times for a single 'msg' iterates through the * replies. The caller must initially leave 'msg''s layer pointers null and * not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_group_stats_reply(struct ofpbuf *msg, struct ofputil_group_stats *gs) { struct ofp11_bucket_counter *obc; struct ofp11_group_stats *ogs11; enum ofpraw raw; enum ofperr error; size_t base_len; size_t length; size_t i; gs->bucket_stats = NULL; error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } if (raw == OFPRAW_OFPST11_GROUP_REPLY) { base_len = sizeof *ogs11; ogs11 = ofpbuf_try_pull(msg, sizeof *ogs11); gs->duration_sec = gs->duration_nsec = UINT32_MAX; } else if (raw == OFPRAW_OFPST13_GROUP_REPLY) { struct ofp13_group_stats *ogs13; base_len = sizeof *ogs13; ogs13 = ofpbuf_try_pull(msg, sizeof *ogs13); if (ogs13) { ogs11 = &ogs13->gs; gs->duration_sec = ntohl(ogs13->duration_sec); gs->duration_nsec = ntohl(ogs13->duration_nsec); } else { ogs11 = NULL; } } else { OVS_NOT_REACHED(); } if (!ogs11) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s reply has %"PRIu32" leftover bytes at end", ofpraw_get_name(raw), msg->size); return OFPERR_OFPBRC_BAD_LEN; } length = ntohs(ogs11->length); if (length < sizeof base_len) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s reply claims invalid length %"PRIuSIZE, ofpraw_get_name(raw), length); return OFPERR_OFPBRC_BAD_LEN; } gs->group_id = ntohl(ogs11->group_id); gs->ref_count = ntohl(ogs11->ref_count); gs->packet_count = ntohll(ogs11->packet_count); gs->byte_count = ntohll(ogs11->byte_count); gs->n_buckets = (length - base_len) / sizeof *obc; obc = ofpbuf_try_pull(msg, gs->n_buckets * sizeof *obc); if (!obc) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s reply has %"PRIu32" leftover bytes at end", ofpraw_get_name(raw), msg->size); return OFPERR_OFPBRC_BAD_LEN; } gs->bucket_stats = xmalloc(gs->n_buckets * sizeof *gs->bucket_stats); for (i = 0; i < gs->n_buckets; i++) { gs->bucket_stats[i].packet_count = ntohll(obc[i].packet_count); gs->bucket_stats[i].byte_count = ntohll(obc[i].byte_count); } return 0; } static void ofputil_put_ofp11_bucket(const struct ofputil_bucket *bucket, struct ofpbuf *openflow, enum ofp_version ofp_version) { struct ofp11_bucket *ob; size_t start; start = openflow->size; ofpbuf_put_zeros(openflow, sizeof *ob); ofpacts_put_openflow_actions(bucket->ofpacts, bucket->ofpacts_len, openflow, ofp_version); ob = ofpbuf_at_assert(openflow, start, sizeof *ob); ob->len = htons(openflow->size - start); ob->weight = htons(bucket->weight); ob->watch_port = ofputil_port_to_ofp11(bucket->watch_port); ob->watch_group = htonl(bucket->watch_group); } static void ofputil_put_ofp15_bucket(const struct ofputil_bucket *bucket, uint32_t bucket_id, enum ofp11_group_type group_type, struct ofpbuf *openflow, enum ofp_version ofp_version) { struct ofp15_bucket *ob; size_t start, actions_start, actions_len; start = openflow->size; ofpbuf_put_zeros(openflow, sizeof *ob); actions_start = openflow->size; ofpacts_put_openflow_actions(bucket->ofpacts, bucket->ofpacts_len, openflow, ofp_version); actions_len = openflow->size - actions_start; if (group_type == OFPGT11_SELECT) { ofpprop_put_u16(openflow, OFPGBPT15_WEIGHT, bucket->weight); } if (bucket->watch_port != OFPP_ANY) { ofpprop_put_be32(openflow, OFPGBPT15_WATCH_PORT, ofputil_port_to_ofp11(bucket->watch_port)); } if (bucket->watch_group != OFPG_ANY) { ofpprop_put_u32(openflow, OFPGBPT15_WATCH_GROUP, bucket->watch_group); } ob = ofpbuf_at_assert(openflow, start, sizeof *ob); ob->len = htons(openflow->size - start); ob->action_array_len = htons(actions_len); ob->bucket_id = htonl(bucket_id); } static void ofputil_put_group_prop_ntr_selection_method(enum ofp_version ofp_version, const struct ofputil_group_props *gp, struct ofpbuf *openflow) { struct ntr_group_prop_selection_method *prop; size_t start; start = openflow->size; ofpbuf_put_zeros(openflow, sizeof *prop); oxm_put_field_array(openflow, &gp->fields, ofp_version); prop = ofpbuf_at_assert(openflow, start, sizeof *prop); prop->type = htons(OFPGPT15_EXPERIMENTER); prop->experimenter = htonl(NTR_VENDOR_ID); prop->exp_type = htonl(NTRT_SELECTION_METHOD); strcpy(prop->selection_method, gp->selection_method); prop->selection_method_param = htonll(gp->selection_method_param); ofpprop_end(openflow, start); } static void ofputil_append_ofp11_group_desc_reply(const struct ofputil_group_desc *gds, const struct ovs_list *buckets, struct ovs_list *replies, enum ofp_version version) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); struct ofp11_group_desc_stats *ogds; struct ofputil_bucket *bucket; size_t start_ogds; start_ogds = reply->size; ofpbuf_put_zeros(reply, sizeof *ogds); LIST_FOR_EACH (bucket, list_node, buckets) { ofputil_put_ofp11_bucket(bucket, reply, version); } ogds = ofpbuf_at_assert(reply, start_ogds, sizeof *ogds); ogds->length = htons(reply->size - start_ogds); ogds->type = gds->type; ogds->group_id = htonl(gds->group_id); ofpmp_postappend(replies, start_ogds); } static void ofputil_append_ofp15_group_desc_reply(const struct ofputil_group_desc *gds, const struct ovs_list *buckets, struct ovs_list *replies, enum ofp_version version) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); struct ofp15_group_desc_stats *ogds; struct ofputil_bucket *bucket; size_t start_ogds, start_buckets; start_ogds = reply->size; ofpbuf_put_zeros(reply, sizeof *ogds); start_buckets = reply->size; LIST_FOR_EACH (bucket, list_node, buckets) { ofputil_put_ofp15_bucket(bucket, bucket->bucket_id, gds->type, reply, version); } ogds = ofpbuf_at_assert(reply, start_ogds, sizeof *ogds); ogds->type = gds->type; ogds->group_id = htonl(gds->group_id); ogds->bucket_list_len = htons(reply->size - start_buckets); /* Add group properties */ if (gds->props.selection_method[0]) { ofputil_put_group_prop_ntr_selection_method(version, &gds->props, reply); } ogds = ofpbuf_at_assert(reply, start_ogds, sizeof *ogds); ogds->length = htons(reply->size - start_ogds); ofpmp_postappend(replies, start_ogds); } /* Appends a group stats reply that contains the data in 'gds' to those already * present in the list of ofpbufs in 'replies'. 'replies' should have been * initialized with ofpmp_init(). */ void ofputil_append_group_desc_reply(const struct ofputil_group_desc *gds, const struct ovs_list *buckets, struct ovs_list *replies) { enum ofp_version version = ofpmp_version(replies); switch (version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: ofputil_append_ofp11_group_desc_reply(gds, buckets, replies, version); break; case OFP15_VERSION: case OFP16_VERSION: ofputil_append_ofp15_group_desc_reply(gds, buckets, replies, version); break; case OFP10_VERSION: default: OVS_NOT_REACHED(); } } static enum ofperr ofputil_pull_ofp11_buckets(struct ofpbuf *msg, size_t buckets_length, enum ofp_version version, struct ovs_list *buckets) { struct ofp11_bucket *ob; uint32_t bucket_id = 0; ovs_list_init(buckets); while (buckets_length > 0) { struct ofputil_bucket *bucket; struct ofpbuf ofpacts; enum ofperr error; size_t ob_len; ob = (buckets_length >= sizeof *ob ? ofpbuf_try_pull(msg, sizeof *ob) : NULL); if (!ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "buckets end with %"PRIuSIZE" leftover bytes", buckets_length); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } ob_len = ntohs(ob->len); if (ob_len < sizeof *ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" is not valid", ob_len); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } else if (ob_len > buckets_length) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" exceeds remaining buckets data size %"PRIuSIZE, ob_len, buckets_length); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } buckets_length -= ob_len; ofpbuf_init(&ofpacts, 0); error = ofpacts_pull_openflow_actions(msg, ob_len - sizeof *ob, version, NULL, NULL, &ofpacts); if (error) { ofpbuf_uninit(&ofpacts); ofputil_bucket_list_destroy(buckets); return error; } bucket = xzalloc(sizeof *bucket); bucket->weight = ntohs(ob->weight); error = ofputil_port_from_ofp11(ob->watch_port, &bucket->watch_port); if (error) { ofpbuf_uninit(&ofpacts); ofputil_bucket_list_destroy(buckets); free(bucket); return OFPERR_OFPGMFC_BAD_WATCH; } bucket->watch_group = ntohl(ob->watch_group); bucket->bucket_id = bucket_id++; bucket->ofpacts = ofpbuf_steal_data(&ofpacts); bucket->ofpacts_len = ofpacts.size; ovs_list_push_back(buckets, &bucket->list_node); } return 0; } static enum ofperr ofputil_pull_ofp15_buckets(struct ofpbuf *msg, size_t buckets_length, enum ofp_version version, uint8_t group_type, struct ovs_list *buckets) { struct ofp15_bucket *ob; ovs_list_init(buckets); while (buckets_length > 0) { struct ofputil_bucket *bucket = NULL; struct ofpbuf ofpacts; enum ofperr err = OFPERR_OFPGMFC_BAD_BUCKET; size_t ob_len, actions_len, properties_len; ovs_be32 watch_port = ofputil_port_to_ofp11(OFPP_ANY); ovs_be32 watch_group = htonl(OFPG_ANY); ovs_be16 weight = htons(group_type == OFPGT11_SELECT ? 1 : 0); ofpbuf_init(&ofpacts, 0); ob = ofpbuf_try_pull(msg, sizeof *ob); if (!ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "buckets end with %"PRIuSIZE " leftover bytes", buckets_length); goto err; } ob_len = ntohs(ob->len); actions_len = ntohs(ob->action_array_len); if (ob_len < sizeof *ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" is not valid", ob_len); goto err; } else if (ob_len > buckets_length) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" exceeds remaining buckets data size %" PRIuSIZE, ob_len, buckets_length); goto err; } else if (actions_len > ob_len - sizeof *ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket actions " "length %"PRIuSIZE" exceeds remaining bucket " "data size %"PRIuSIZE, actions_len, ob_len - sizeof *ob); goto err; } buckets_length -= ob_len; err = ofpacts_pull_openflow_actions(msg, actions_len, version, NULL, NULL, &ofpacts); if (err) { goto err; } properties_len = ob_len - sizeof *ob - actions_len; struct ofpbuf properties = ofpbuf_const_initializer( ofpbuf_pull(msg, properties_len), properties_len); while (properties.size > 0) { struct ofpbuf payload; uint64_t type; err = ofpprop_pull(&properties, &payload, &type); if (err) { goto err; } switch (type) { case OFPGBPT15_WEIGHT: err = ofpprop_parse_be16(&payload, &weight); break; case OFPGBPT15_WATCH_PORT: err = ofpprop_parse_be32(&payload, &watch_port); break; case OFPGBPT15_WATCH_GROUP: err = ofpprop_parse_be32(&payload, &watch_group); break; default: err = OFPPROP_UNKNOWN(false, "group bucket", type); break; } if (err) { goto err; } } bucket = xzalloc(sizeof *bucket); bucket->weight = ntohs(weight); err = ofputil_port_from_ofp11(watch_port, &bucket->watch_port); if (err) { err = OFPERR_OFPGMFC_BAD_WATCH; goto err; } bucket->watch_group = ntohl(watch_group); bucket->bucket_id = ntohl(ob->bucket_id); if (bucket->bucket_id > OFPG15_BUCKET_MAX) { VLOG_WARN_RL(&bad_ofmsg_rl, "bucket id (%u) is out of range", bucket->bucket_id); err = OFPERR_OFPGMFC_BAD_BUCKET; goto err; } bucket->ofpacts = ofpbuf_steal_data(&ofpacts); bucket->ofpacts_len = ofpacts.size; ovs_list_push_back(buckets, &bucket->list_node); continue; err: free(bucket); ofpbuf_uninit(&ofpacts); ofputil_bucket_list_destroy(buckets); return err; } if (ofputil_bucket_check_duplicate_id(buckets)) { VLOG_WARN_RL(&bad_ofmsg_rl, "Duplicate bucket id"); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } return 0; } static void ofputil_init_group_properties(struct ofputil_group_props *gp) { memset(gp, 0, sizeof *gp); } void ofputil_group_properties_copy(struct ofputil_group_props *to, const struct ofputil_group_props *from) { *to = *from; to->fields.values = xmemdup(from->fields.values, from->fields.values_size); } void ofputil_group_properties_destroy(struct ofputil_group_props *gp) { free(gp->fields.values); } static enum ofperr parse_group_prop_ntr_selection_method(struct ofpbuf *payload, enum ofp11_group_type group_type, enum ofp15_group_mod_command group_cmd, struct ofputil_group_props *gp) { struct ntr_group_prop_selection_method *prop = payload->data; size_t fields_len, method_len; enum ofperr error; switch (group_type) { case OFPGT11_SELECT: break; case OFPGT11_ALL: case OFPGT11_INDIRECT: case OFPGT11_FF: OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method property is " "only allowed for select groups"); return OFPERR_OFPBPC_BAD_VALUE; default: OVS_NOT_REACHED(); } switch (group_cmd) { case OFPGC15_ADD: case OFPGC15_MODIFY: case OFPGC15_ADD_OR_MOD: break; case OFPGC15_DELETE: case OFPGC15_INSERT_BUCKET: case OFPGC15_REMOVE_BUCKET: OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method property is " "only allowed for add and delete group modifications"); return OFPERR_OFPBPC_BAD_VALUE; default: OVS_NOT_REACHED(); } if (payload->size < sizeof *prop) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method property " "length %u is not valid", payload->size); return OFPERR_OFPBPC_BAD_LEN; } method_len = strnlen(prop->selection_method, NTR_MAX_SELECTION_METHOD_LEN); if (method_len == NTR_MAX_SELECTION_METHOD_LEN) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method is not null terminated"); return OFPERR_OFPBPC_BAD_VALUE; } if (strcmp("hash", prop->selection_method) && strcmp("dp_hash", prop->selection_method)) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method '%s' is not supported", prop->selection_method); return OFPERR_OFPBPC_BAD_VALUE; } /* 'method_len' is now non-zero. */ strcpy(gp->selection_method, prop->selection_method); gp->selection_method_param = ntohll(prop->selection_method_param); ofpbuf_pull(payload, sizeof *prop); fields_len = ntohs(prop->length) - sizeof *prop; if (fields_len && strcmp("hash", gp->selection_method)) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method %s " "does not support fields", gp->selection_method); return OFPERR_OFPBPC_BAD_VALUE; } error = oxm_pull_field_array(payload->data, fields_len, &gp->fields); if (error) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method fields are invalid"); return error; } return 0; } static enum ofperr parse_ofp15_group_properties(struct ofpbuf *msg, enum ofp11_group_type group_type, enum ofp15_group_mod_command group_cmd, struct ofputil_group_props *gp, size_t properties_len) { struct ofpbuf properties = ofpbuf_const_initializer( ofpbuf_pull(msg, properties_len), properties_len); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPPROP_EXP(NTR_VENDOR_ID, NTRT_SELECTION_METHOD): case OFPPROP_EXP(NTR_COMPAT_VENDOR_ID, NTRT_SELECTION_METHOD): error = parse_group_prop_ntr_selection_method(&payload, group_type, group_cmd, gp); break; default: error = OFPPROP_UNKNOWN(false, "group", type); break; } if (error) { return error; } } return 0; } static int ofputil_decode_ofp11_group_desc_reply(struct ofputil_group_desc *gd, struct ofpbuf *msg, enum ofp_version version) { struct ofp11_group_desc_stats *ogds; size_t length; if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } ogds = ofpbuf_try_pull(msg, sizeof *ogds); if (!ogds) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } gd->type = ogds->type; gd->group_id = ntohl(ogds->group_id); length = ntohs(ogds->length); if (length < sizeof *ogds || length - sizeof *ogds > msg->size) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply claims invalid " "length %"PRIuSIZE, length); return OFPERR_OFPBRC_BAD_LEN; } return ofputil_pull_ofp11_buckets(msg, length - sizeof *ogds, version, &gd->buckets); } static int ofputil_decode_ofp15_group_desc_reply(struct ofputil_group_desc *gd, struct ofpbuf *msg, enum ofp_version version) { struct ofp15_group_desc_stats *ogds; uint16_t length, bucket_list_len; int error; if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } ogds = ofpbuf_try_pull(msg, sizeof *ogds); if (!ogds) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } gd->type = ogds->type; gd->group_id = ntohl(ogds->group_id); length = ntohs(ogds->length); if (length < sizeof *ogds || length - sizeof *ogds > msg->size) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply claims invalid " "length %u", length); return OFPERR_OFPBRC_BAD_LEN; } bucket_list_len = ntohs(ogds->bucket_list_len); if (length < bucket_list_len + sizeof *ogds) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply claims invalid " "bucket list length %u", bucket_list_len); return OFPERR_OFPBRC_BAD_LEN; } error = ofputil_pull_ofp15_buckets(msg, bucket_list_len, version, gd->type, &gd->buckets); if (error) { return error; } /* By definition group desc messages don't have a group mod command. * However, parse_group_prop_ntr_selection_method() checks to make sure * that the command is OFPGC15_ADD or OFPGC15_DELETE to guard * against group mod messages with other commands supplying * a NTR selection method group experimenter property. * Such properties are valid for group desc replies so * claim that the group mod command is OFPGC15_ADD to * satisfy the check in parse_group_prop_ntr_selection_method() */ error = parse_ofp15_group_properties( msg, gd->type, OFPGC15_ADD, &gd->props, length - sizeof *ogds - bucket_list_len); if (error) { ofputil_bucket_list_destroy(&gd->buckets); } return error; } /* Converts a group description reply in 'msg' into an abstract * ofputil_group_desc in 'gd'. * * Multiple group description replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. The caller must initially leave 'msg''s layer pointers * null and not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_group_desc_reply(struct ofputil_group_desc *gd, struct ofpbuf *msg, enum ofp_version version) { ofputil_init_group_properties(&gd->props); switch (version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: return ofputil_decode_ofp11_group_desc_reply(gd, msg, version); case OFP15_VERSION: case OFP16_VERSION: return ofputil_decode_ofp15_group_desc_reply(gd, msg, version); case OFP10_VERSION: default: OVS_NOT_REACHED(); } } void ofputil_uninit_group_mod(struct ofputil_group_mod *gm) { ofputil_bucket_list_destroy(&gm->buckets); ofputil_group_properties_destroy(&gm->props); } static struct ofpbuf * ofputil_encode_ofp11_group_mod(enum ofp_version ofp_version, const struct ofputil_group_mod *gm) { struct ofpbuf *b; struct ofp11_group_mod *ogm; size_t start_ogm; struct ofputil_bucket *bucket; b = ofpraw_alloc(OFPRAW_OFPT11_GROUP_MOD, ofp_version, 0); start_ogm = b->size; ofpbuf_put_zeros(b, sizeof *ogm); LIST_FOR_EACH (bucket, list_node, &gm->buckets) { ofputil_put_ofp11_bucket(bucket, b, ofp_version); } ogm = ofpbuf_at_assert(b, start_ogm, sizeof *ogm); ogm->command = htons(gm->command); ogm->type = gm->type; ogm->group_id = htonl(gm->group_id); return b; } static struct ofpbuf * ofputil_encode_ofp15_group_mod(enum ofp_version ofp_version, const struct ofputil_group_mod *gm) { struct ofpbuf *b; struct ofp15_group_mod *ogm; size_t start_ogm; struct ofputil_bucket *bucket; struct id_pool *bucket_ids = NULL; b = ofpraw_alloc(OFPRAW_OFPT15_GROUP_MOD, ofp_version, 0); start_ogm = b->size; ofpbuf_put_zeros(b, sizeof *ogm); LIST_FOR_EACH (bucket, list_node, &gm->buckets) { uint32_t bucket_id; /* Generate a bucket id if none was supplied */ if (bucket->bucket_id > OFPG15_BUCKET_MAX) { if (!bucket_ids) { const struct ofputil_bucket *bkt; bucket_ids = id_pool_create(0, OFPG15_BUCKET_MAX + 1); /* Mark all bucket_ids that are present in gm * as used in the pool. */ LIST_FOR_EACH_REVERSE (bkt, list_node, &gm->buckets) { if (bkt == bucket) { break; } if (bkt->bucket_id <= OFPG15_BUCKET_MAX) { id_pool_add(bucket_ids, bkt->bucket_id); } } } if (!id_pool_alloc_id(bucket_ids, &bucket_id)) { OVS_NOT_REACHED(); } } else { bucket_id = bucket->bucket_id; } ofputil_put_ofp15_bucket(bucket, bucket_id, gm->type, b, ofp_version); } ogm = ofpbuf_at_assert(b, start_ogm, sizeof *ogm); ogm->command = htons(gm->command); ogm->type = gm->type; ogm->group_id = htonl(gm->group_id); ogm->command_bucket_id = htonl(gm->command_bucket_id); ogm->bucket_array_len = htons(b->size - start_ogm - sizeof *ogm); /* Add group properties */ if (gm->props.selection_method[0]) { ofputil_put_group_prop_ntr_selection_method(ofp_version, &gm->props, b); } id_pool_destroy(bucket_ids); return b; } static void bad_group_cmd(enum ofp15_group_mod_command cmd) { const char *opt_version; const char *version; const char *cmd_str; switch (cmd) { case OFPGC15_ADD: case OFPGC15_MODIFY: case OFPGC15_ADD_OR_MOD: case OFPGC15_DELETE: version = "1.1"; opt_version = "11"; break; case OFPGC15_INSERT_BUCKET: case OFPGC15_REMOVE_BUCKET: version = "1.5"; opt_version = "15"; break; default: OVS_NOT_REACHED(); } switch (cmd) { case OFPGC15_ADD: cmd_str = "add-group"; break; case OFPGC15_MODIFY: case OFPGC15_ADD_OR_MOD: cmd_str = "mod-group"; break; case OFPGC15_DELETE: cmd_str = "del-group"; break; case OFPGC15_INSERT_BUCKET: cmd_str = "insert-bucket"; break; case OFPGC15_REMOVE_BUCKET: cmd_str = "remove-bucket"; break; default: OVS_NOT_REACHED(); } ovs_fatal(0, "%s needs OpenFlow %s or later (\'-O OpenFlow%s\')", cmd_str, version, opt_version); } /* Converts abstract group mod 'gm' into a message for OpenFlow version * 'ofp_version' and returns the message. */ struct ofpbuf * ofputil_encode_group_mod(enum ofp_version ofp_version, const struct ofputil_group_mod *gm) { switch (ofp_version) { case OFP10_VERSION: bad_group_cmd(gm->command); case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: if (gm->command > OFPGC11_DELETE && gm->command != OFPGC11_ADD_OR_MOD) { bad_group_cmd(gm->command); } return ofputil_encode_ofp11_group_mod(ofp_version, gm); case OFP15_VERSION: case OFP16_VERSION: return ofputil_encode_ofp15_group_mod(ofp_version, gm); default: OVS_NOT_REACHED(); } } static enum ofperr ofputil_pull_ofp11_group_mod(struct ofpbuf *msg, enum ofp_version ofp_version, struct ofputil_group_mod *gm) { const struct ofp11_group_mod *ogm; enum ofperr error; ogm = ofpbuf_pull(msg, sizeof *ogm); gm->command = ntohs(ogm->command); gm->type = ogm->type; gm->group_id = ntohl(ogm->group_id); gm->command_bucket_id = OFPG15_BUCKET_ALL; error = ofputil_pull_ofp11_buckets(msg, msg->size, ofp_version, &gm->buckets); /* OF1.3.5+ prescribes an error when an OFPGC_DELETE includes buckets. */ if (!error && ofp_version >= OFP13_VERSION && gm->command == OFPGC11_DELETE && !ovs_list_is_empty(&gm->buckets)) { error = OFPERR_OFPGMFC_INVALID_GROUP; ofputil_bucket_list_destroy(&gm->buckets); } return error; } static enum ofperr ofputil_pull_ofp15_group_mod(struct ofpbuf *msg, enum ofp_version ofp_version, struct ofputil_group_mod *gm) { const struct ofp15_group_mod *ogm; uint16_t bucket_list_len; enum ofperr error = OFPERR_OFPGMFC_BAD_BUCKET; ogm = ofpbuf_pull(msg, sizeof *ogm); gm->command = ntohs(ogm->command); gm->type = ogm->type; gm->group_id = ntohl(ogm->group_id); gm->command_bucket_id = ntohl(ogm->command_bucket_id); switch (gm->command) { case OFPGC15_REMOVE_BUCKET: if (gm->command_bucket_id == OFPG15_BUCKET_ALL) { error = 0; } /* Fall through */ case OFPGC15_INSERT_BUCKET: if (gm->command_bucket_id <= OFPG15_BUCKET_MAX || gm->command_bucket_id == OFPG15_BUCKET_FIRST || gm->command_bucket_id == OFPG15_BUCKET_LAST) { error = 0; } break; case OFPGC11_ADD: case OFPGC11_MODIFY: case OFPGC11_ADD_OR_MOD: case OFPGC11_DELETE: default: if (gm->command_bucket_id == OFPG15_BUCKET_ALL) { error = 0; } break; } if (error) { VLOG_WARN_RL(&bad_ofmsg_rl, "group command bucket id (%u) is out of range", gm->command_bucket_id); return OFPERR_OFPGMFC_BAD_BUCKET; } bucket_list_len = ntohs(ogm->bucket_array_len); if (bucket_list_len > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } error = ofputil_pull_ofp15_buckets(msg, bucket_list_len, ofp_version, gm->type, &gm->buckets); if (error) { return error; } error = parse_ofp15_group_properties(msg, gm->type, gm->command, &gm->props, msg->size); if (error) { ofputil_bucket_list_destroy(&gm->buckets); } return error; } static enum ofperr ofputil_check_group_mod(const struct ofputil_group_mod *gm) { switch (gm->type) { case OFPGT11_INDIRECT: if (gm->command != OFPGC11_DELETE && !ovs_list_is_singleton(&gm->buckets) ) { return OFPERR_OFPGMFC_INVALID_GROUP; } break; case OFPGT11_ALL: case OFPGT11_SELECT: case OFPGT11_FF: break; default: return OFPERR_OFPGMFC_BAD_TYPE; } switch (gm->command) { case OFPGC11_ADD: case OFPGC11_MODIFY: case OFPGC11_ADD_OR_MOD: case OFPGC11_DELETE: case OFPGC15_INSERT_BUCKET: break; case OFPGC15_REMOVE_BUCKET: if (!ovs_list_is_empty(&gm->buckets)) { return OFPERR_OFPGMFC_BAD_BUCKET; } break; default: return OFPERR_OFPGMFC_BAD_COMMAND; } struct ofputil_bucket *bucket; LIST_FOR_EACH (bucket, list_node, &gm->buckets) { if (bucket->weight && gm->type != OFPGT11_SELECT) { return OFPERR_OFPGMFC_INVALID_GROUP; } switch (gm->type) { case OFPGT11_ALL: case OFPGT11_INDIRECT: if (ofputil_bucket_has_liveness(bucket)) { return OFPERR_OFPGMFC_WATCH_UNSUPPORTED; } break; case OFPGT11_SELECT: break; case OFPGT11_FF: if (!ofputil_bucket_has_liveness(bucket)) { return OFPERR_OFPGMFC_INVALID_GROUP; } break; default: /* Returning BAD TYPE to be consistent * though gm->type has been checked already. */ return OFPERR_OFPGMFC_BAD_TYPE; } } return 0; } /* Converts OpenFlow group mod message 'oh' into an abstract group mod in * 'gm'. Returns 0 if successful, otherwise an OpenFlow error code. */ enum ofperr ofputil_decode_group_mod(const struct ofp_header *oh, struct ofputil_group_mod *gm) { ofputil_init_group_properties(&gm->props); enum ofp_version ofp_version = oh->version; struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&msg); enum ofperr err; switch (ofp_version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: err = ofputil_pull_ofp11_group_mod(&msg, ofp_version, gm); break; case OFP15_VERSION: case OFP16_VERSION: err = ofputil_pull_ofp15_group_mod(&msg, ofp_version, gm); break; case OFP10_VERSION: default: OVS_NOT_REACHED(); } if (err) { return err; } err = ofputil_check_group_mod(gm); if (err) { ofputil_uninit_group_mod(gm); } return err; } /* Destroys 'bms'. */ void ofputil_free_bundle_msgs(struct ofputil_bundle_msg *bms, size_t n_bms) { for (size_t i = 0; i < n_bms; i++) { switch ((int)bms[i].type) { case OFPTYPE_FLOW_MOD: free(CONST_CAST(struct ofpact *, bms[i].fm.ofpacts)); break; case OFPTYPE_GROUP_MOD: ofputil_uninit_group_mod(&bms[i].gm); break; case OFPTYPE_PACKET_OUT: free(bms[i].po.ofpacts); free(CONST_CAST(void *, bms[i].po.packet)); break; default: break; } } free(bms); } void ofputil_encode_bundle_msgs(const struct ofputil_bundle_msg *bms, size_t n_bms, struct ovs_list *requests, enum ofputil_protocol protocol) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); for (size_t i = 0; i < n_bms; i++) { struct ofpbuf *request = NULL; switch ((int)bms[i].type) { case OFPTYPE_FLOW_MOD: request = ofputil_encode_flow_mod(&bms[i].fm, protocol); break; case OFPTYPE_GROUP_MOD: request = ofputil_encode_group_mod(version, &bms[i].gm); break; case OFPTYPE_PACKET_OUT: request = ofputil_encode_packet_out(&bms[i].po, protocol); break; default: break; } if (request) { ovs_list_push_back(requests, &request->list_node); } } } /* Parse a queue status request message into 'oqsr'. * Returns 0 if successful, otherwise an OFPERR_* number. */ enum ofperr ofputil_decode_queue_stats_request(const struct ofp_header *request, struct ofputil_queue_stats_request *oqsr) { switch ((enum ofp_version)request->version) { case OFP16_VERSION: case OFP15_VERSION: case OFP14_VERSION: case OFP13_VERSION: case OFP12_VERSION: case OFP11_VERSION: { const struct ofp11_queue_stats_request *qsr11 = ofpmsg_body(request); oqsr->queue_id = ntohl(qsr11->queue_id); return ofputil_port_from_ofp11(qsr11->port_no, &oqsr->port_no); } case OFP10_VERSION: { const struct ofp10_queue_stats_request *qsr10 = ofpmsg_body(request); oqsr->queue_id = ntohl(qsr10->queue_id); oqsr->port_no = u16_to_ofp(ntohs(qsr10->port_no)); /* OF 1.0 uses OFPP_ALL for OFPP_ANY */ if (oqsr->port_no == OFPP_ALL) { oqsr->port_no = OFPP_ANY; } return 0; } default: OVS_NOT_REACHED(); } } /* Encode a queue stats request for 'oqsr', the encoded message * will be for OpenFlow version 'ofp_version'. Returns message * as a struct ofpbuf. Returns encoded message on success, NULL on error. */ struct ofpbuf * ofputil_encode_queue_stats_request(enum ofp_version ofp_version, const struct ofputil_queue_stats_request *oqsr) { struct ofpbuf *request; switch (ofp_version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_queue_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST11_QUEUE_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = ofputil_port_to_ofp11(oqsr->port_no); req->queue_id = htonl(oqsr->queue_id); break; } case OFP10_VERSION: { struct ofp10_queue_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST10_QUEUE_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); /* OpenFlow 1.0 needs OFPP_ALL instead of OFPP_ANY */ req->port_no = htons(ofp_to_u16(oqsr->port_no == OFPP_ANY ? OFPP_ALL : oqsr->port_no)); req->queue_id = htonl(oqsr->queue_id); break; } default: OVS_NOT_REACHED(); } return request; } /* Returns the number of queue stats elements in OFPTYPE_QUEUE_STATS_REPLY * message 'oh'. */ size_t ofputil_count_queue_stats(const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); for (size_t n = 0; ; n++) { struct ofputil_queue_stats qs; if (ofputil_decode_queue_stats(&qs, &b)) { return n; } } } static enum ofperr ofputil_queue_stats_from_ofp10(struct ofputil_queue_stats *oqs, const struct ofp10_queue_stats *qs10) { oqs->port_no = u16_to_ofp(ntohs(qs10->port_no)); oqs->queue_id = ntohl(qs10->queue_id); oqs->tx_bytes = ntohll(get_32aligned_be64(&qs10->tx_bytes)); oqs->tx_packets = ntohll(get_32aligned_be64(&qs10->tx_packets)); oqs->tx_errors = ntohll(get_32aligned_be64(&qs10->tx_errors)); oqs->duration_sec = oqs->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_queue_stats_from_ofp11(struct ofputil_queue_stats *oqs, const struct ofp11_queue_stats *qs11) { enum ofperr error; error = ofputil_port_from_ofp11(qs11->port_no, &oqs->port_no); if (error) { return error; } oqs->queue_id = ntohl(qs11->queue_id); oqs->tx_bytes = ntohll(qs11->tx_bytes); oqs->tx_packets = ntohll(qs11->tx_packets); oqs->tx_errors = ntohll(qs11->tx_errors); oqs->duration_sec = oqs->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_queue_stats_from_ofp13(struct ofputil_queue_stats *oqs, const struct ofp13_queue_stats *qs13) { enum ofperr error = ofputil_queue_stats_from_ofp11(oqs, &qs13->qs); if (!error) { oqs->duration_sec = ntohl(qs13->duration_sec); oqs->duration_nsec = ntohl(qs13->duration_nsec); } return error; } static enum ofperr ofputil_pull_ofp14_queue_stats(struct ofputil_queue_stats *oqs, struct ofpbuf *msg) { const struct ofp14_queue_stats *qs14; size_t len; qs14 = ofpbuf_try_pull(msg, sizeof *qs14); if (!qs14) { return OFPERR_OFPBRC_BAD_LEN; } len = ntohs(qs14->length); if (len < sizeof *qs14 || len - sizeof *qs14 > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } ofpbuf_pull(msg, len - sizeof *qs14); /* No properties yet defined, so ignore them for now. */ return ofputil_queue_stats_from_ofp13(oqs, &qs14->qs); } /* Converts an OFPST_QUEUE_STATS reply in 'msg' into an abstract * ofputil_queue_stats in 'qs'. * * Multiple OFPST_QUEUE_STATS replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. The caller must initially leave 'msg''s layer pointers * null and not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_queue_stats(struct ofputil_queue_stats *qs, struct ofpbuf *msg) { enum ofperr error; enum ofpraw raw; error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST14_QUEUE_REPLY) { return ofputil_pull_ofp14_queue_stats(qs, msg); } else if (raw == OFPRAW_OFPST13_QUEUE_REPLY) { const struct ofp13_queue_stats *qs13; qs13 = ofpbuf_try_pull(msg, sizeof *qs13); if (!qs13) { goto bad_len; } return ofputil_queue_stats_from_ofp13(qs, qs13); } else if (raw == OFPRAW_OFPST11_QUEUE_REPLY) { const struct ofp11_queue_stats *qs11; qs11 = ofpbuf_try_pull(msg, sizeof *qs11); if (!qs11) { goto bad_len; } return ofputil_queue_stats_from_ofp11(qs, qs11); } else if (raw == OFPRAW_OFPST10_QUEUE_REPLY) { const struct ofp10_queue_stats *qs10; qs10 = ofpbuf_try_pull(msg, sizeof *qs10); if (!qs10) { goto bad_len; } return ofputil_queue_stats_from_ofp10(qs, qs10); } else { OVS_NOT_REACHED(); } bad_len: VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_QUEUE reply has %"PRIu32" leftover " "bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } static void ofputil_queue_stats_to_ofp10(const struct ofputil_queue_stats *oqs, struct ofp10_queue_stats *qs10) { qs10->port_no = htons(ofp_to_u16(oqs->port_no)); memset(qs10->pad, 0, sizeof qs10->pad); qs10->queue_id = htonl(oqs->queue_id); put_32aligned_be64(&qs10->tx_bytes, htonll(oqs->tx_bytes)); put_32aligned_be64(&qs10->tx_packets, htonll(oqs->tx_packets)); put_32aligned_be64(&qs10->tx_errors, htonll(oqs->tx_errors)); } static void ofputil_queue_stats_to_ofp11(const struct ofputil_queue_stats *oqs, struct ofp11_queue_stats *qs11) { qs11->port_no = ofputil_port_to_ofp11(oqs->port_no); qs11->queue_id = htonl(oqs->queue_id); qs11->tx_bytes = htonll(oqs->tx_bytes); qs11->tx_packets = htonll(oqs->tx_packets); qs11->tx_errors = htonll(oqs->tx_errors); } static void ofputil_queue_stats_to_ofp13(const struct ofputil_queue_stats *oqs, struct ofp13_queue_stats *qs13) { ofputil_queue_stats_to_ofp11(oqs, &qs13->qs); if (oqs->duration_sec != UINT32_MAX) { qs13->duration_sec = htonl(oqs->duration_sec); qs13->duration_nsec = htonl(oqs->duration_nsec); } else { qs13->duration_sec = OVS_BE32_MAX; qs13->duration_nsec = OVS_BE32_MAX; } } static void ofputil_queue_stats_to_ofp14(const struct ofputil_queue_stats *oqs, struct ofp14_queue_stats *qs14) { qs14->length = htons(sizeof *qs14); memset(qs14->pad, 0, sizeof qs14->pad); ofputil_queue_stats_to_ofp13(oqs, &qs14->qs); } /* Encode a queue stat for 'oqs' and append it to 'replies'. */ void ofputil_append_queue_stat(struct ovs_list *replies, const struct ofputil_queue_stats *oqs) { switch (ofpmp_version(replies)) { case OFP13_VERSION: { struct ofp13_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp13(oqs, reply); break; } case OFP12_VERSION: case OFP11_VERSION: { struct ofp11_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp11(oqs, reply); break; } case OFP10_VERSION: { struct ofp10_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp10(oqs, reply); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp14_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp14(oqs, reply); break; } default: OVS_NOT_REACHED(); } } enum ofperr ofputil_decode_bundle_ctrl(const struct ofp_header *oh, struct ofputil_bundle_ctrl_msg *msg) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); ovs_assert(raw == OFPRAW_OFPT14_BUNDLE_CONTROL || raw == OFPRAW_ONFT13_BUNDLE_CONTROL); const struct ofp14_bundle_ctrl_msg *m = b.msg; msg->bundle_id = ntohl(m->bundle_id); msg->type = ntohs(m->type); msg->flags = ntohs(m->flags); return 0; } struct ofpbuf * ofputil_encode_bundle_ctrl_request(enum ofp_version ofp_version, struct ofputil_bundle_ctrl_msg *bc) { struct ofpbuf *request; struct ofp14_bundle_ctrl_msg *m; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: case OFP12_VERSION: ovs_fatal(0, "bundles need OpenFlow 1.3 or later " "(\'-O OpenFlow14\')"); case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: request = ofpraw_alloc(ofp_version == OFP13_VERSION ? OFPRAW_ONFT13_BUNDLE_CONTROL : OFPRAW_OFPT14_BUNDLE_CONTROL, ofp_version, 0); m = ofpbuf_put_zeros(request, sizeof *m); m->bundle_id = htonl(bc->bundle_id); m->type = htons(bc->type); m->flags = htons(bc->flags); break; default: OVS_NOT_REACHED(); } return request; } struct ofpbuf * ofputil_encode_bundle_ctrl_reply(const struct ofp_header *oh, struct ofputil_bundle_ctrl_msg *msg) { struct ofpbuf *buf; struct ofp14_bundle_ctrl_msg *m; buf = ofpraw_alloc_reply(oh->version == OFP13_VERSION ? OFPRAW_ONFT13_BUNDLE_CONTROL : OFPRAW_OFPT14_BUNDLE_CONTROL, oh, 0); m = ofpbuf_put_zeros(buf, sizeof *m); m->bundle_id = htonl(msg->bundle_id); m->type = htons(msg->type); m->flags = htons(msg->flags); return buf; } /* Return true for bundlable state change requests, false for other messages. */ static bool ofputil_is_bundlable(enum ofptype type) { switch (type) { /* Minimum required by OpenFlow 1.4. */ case OFPTYPE_PORT_MOD: case OFPTYPE_FLOW_MOD: /* Other supported types. */ case OFPTYPE_GROUP_MOD: case OFPTYPE_PACKET_OUT: return true; /* Nice to have later. */ case OFPTYPE_FLOW_MOD_TABLE_ID: case OFPTYPE_TABLE_MOD: case OFPTYPE_METER_MOD: case OFPTYPE_NXT_TLV_TABLE_MOD: /* Not to be bundlable. */ case OFPTYPE_ECHO_REQUEST: case OFPTYPE_FEATURES_REQUEST: case OFPTYPE_GET_CONFIG_REQUEST: case OFPTYPE_SET_CONFIG: case OFPTYPE_BARRIER_REQUEST: case OFPTYPE_ROLE_REQUEST: case OFPTYPE_ECHO_REPLY: case OFPTYPE_SET_FLOW_FORMAT: case OFPTYPE_SET_PACKET_IN_FORMAT: case OFPTYPE_SET_CONTROLLER_ID: case OFPTYPE_FLOW_AGE: case OFPTYPE_FLOW_MONITOR_CANCEL: case OFPTYPE_SET_ASYNC_CONFIG: case OFPTYPE_GET_ASYNC_REQUEST: case OFPTYPE_DESC_STATS_REQUEST: case OFPTYPE_FLOW_STATS_REQUEST: case OFPTYPE_AGGREGATE_STATS_REQUEST: case OFPTYPE_TABLE_STATS_REQUEST: case OFPTYPE_TABLE_FEATURES_STATS_REQUEST: case OFPTYPE_TABLE_DESC_REQUEST: case OFPTYPE_PORT_STATS_REQUEST: case OFPTYPE_QUEUE_STATS_REQUEST: case OFPTYPE_PORT_DESC_STATS_REQUEST: case OFPTYPE_FLOW_MONITOR_STATS_REQUEST: case OFPTYPE_METER_STATS_REQUEST: case OFPTYPE_METER_CONFIG_STATS_REQUEST: case OFPTYPE_METER_FEATURES_STATS_REQUEST: case OFPTYPE_GROUP_STATS_REQUEST: case OFPTYPE_GROUP_DESC_STATS_REQUEST: case OFPTYPE_GROUP_FEATURES_STATS_REQUEST: case OFPTYPE_QUEUE_GET_CONFIG_REQUEST: case OFPTYPE_BUNDLE_CONTROL: case OFPTYPE_BUNDLE_ADD_MESSAGE: case OFPTYPE_HELLO: case OFPTYPE_ERROR: case OFPTYPE_FEATURES_REPLY: case OFPTYPE_GET_CONFIG_REPLY: case OFPTYPE_PACKET_IN: case OFPTYPE_FLOW_REMOVED: case OFPTYPE_PORT_STATUS: case OFPTYPE_BARRIER_REPLY: case OFPTYPE_QUEUE_GET_CONFIG_REPLY: case OFPTYPE_DESC_STATS_REPLY: case OFPTYPE_FLOW_STATS_REPLY: case OFPTYPE_QUEUE_STATS_REPLY: case OFPTYPE_PORT_STATS_REPLY: case OFPTYPE_TABLE_STATS_REPLY: case OFPTYPE_AGGREGATE_STATS_REPLY: case OFPTYPE_PORT_DESC_STATS_REPLY: case OFPTYPE_ROLE_REPLY: case OFPTYPE_FLOW_MONITOR_PAUSED: case OFPTYPE_FLOW_MONITOR_RESUMED: case OFPTYPE_FLOW_MONITOR_STATS_REPLY: case OFPTYPE_GET_ASYNC_REPLY: case OFPTYPE_GROUP_STATS_REPLY: case OFPTYPE_GROUP_DESC_STATS_REPLY: case OFPTYPE_GROUP_FEATURES_STATS_REPLY: case OFPTYPE_METER_STATS_REPLY: case OFPTYPE_METER_CONFIG_STATS_REPLY: case OFPTYPE_METER_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_DESC_REPLY: case OFPTYPE_ROLE_STATUS: case OFPTYPE_REQUESTFORWARD: case OFPTYPE_TABLE_STATUS: case OFPTYPE_NXT_TLV_TABLE_REQUEST: case OFPTYPE_NXT_TLV_TABLE_REPLY: case OFPTYPE_NXT_RESUME: case OFPTYPE_IPFIX_BRIDGE_STATS_REQUEST: case OFPTYPE_IPFIX_BRIDGE_STATS_REPLY: case OFPTYPE_IPFIX_FLOW_STATS_REQUEST: case OFPTYPE_IPFIX_FLOW_STATS_REPLY: case OFPTYPE_CT_FLUSH_ZONE: break; } return false; } enum ofperr ofputil_decode_bundle_add(const struct ofp_header *oh, struct ofputil_bundle_add_msg *msg, enum ofptype *typep) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); /* Pull the outer ofp_header. */ enum ofpraw raw = ofpraw_pull_assert(&b); ovs_assert(raw == OFPRAW_OFPT14_BUNDLE_ADD_MESSAGE || raw == OFPRAW_ONFT13_BUNDLE_ADD_MESSAGE); /* Pull the bundle_ctrl header. */ const struct ofp14_bundle_ctrl_msg *m = ofpbuf_pull(&b, sizeof *m); msg->bundle_id = ntohl(m->bundle_id); msg->flags = ntohs(m->flags); /* Pull the inner ofp_header. */ if (b.size < sizeof(struct ofp_header)) { return OFPERR_OFPBFC_MSG_BAD_LEN; } msg->msg = b.data; if (msg->msg->version != oh->version) { return OFPERR_OFPBFC_BAD_VERSION; } size_t inner_len = ntohs(msg->msg->length); if (inner_len < sizeof(struct ofp_header) || inner_len > b.size) { return OFPERR_OFPBFC_MSG_BAD_LEN; } if (msg->msg->xid != oh->xid) { return OFPERR_OFPBFC_MSG_BAD_XID; } /* Reject unbundlable messages. */ enum ofptype type; enum ofperr error = ofptype_decode(&type, msg->msg); if (error) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPT14_BUNDLE_ADD_MESSAGE contained " "message is unparsable (%s)", ofperr_get_name(error)); return OFPERR_OFPBFC_MSG_UNSUP; /* 'error' would be confusing. */ } if (!ofputil_is_bundlable(type)) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s message not allowed inside " "OFPT14_BUNDLE_ADD_MESSAGE", ofptype_get_name(type)); return OFPERR_OFPBFC_MSG_UNSUP; } if (typep) { *typep = type; } return 0; } struct ofpbuf * ofputil_encode_bundle_add(enum ofp_version ofp_version, struct ofputil_bundle_add_msg *msg) { struct ofpbuf *request; struct ofp14_bundle_ctrl_msg *m; /* Must use the same xid as the embedded message. */ request = ofpraw_alloc_xid(ofp_version == OFP13_VERSION ? OFPRAW_ONFT13_BUNDLE_ADD_MESSAGE : OFPRAW_OFPT14_BUNDLE_ADD_MESSAGE, ofp_version, msg->msg->xid, ntohs(msg->msg->length)); m = ofpbuf_put_zeros(request, sizeof *m); m->bundle_id = htonl(msg->bundle_id); m->flags = htons(msg->flags); ofpbuf_put(request, msg->msg, ntohs(msg->msg->length)); ofpmsg_update_length(request); return request; } static void encode_tlv_table_mappings(struct ofpbuf *b, struct ovs_list *mappings) { struct ofputil_tlv_map *map; LIST_FOR_EACH (map, list_node, mappings) { struct nx_tlv_map *nx_map; nx_map = ofpbuf_put_zeros(b, sizeof *nx_map); nx_map->option_class = htons(map->option_class); nx_map->option_type = map->option_type; nx_map->option_len = map->option_len; nx_map->index = htons(map->index); } } struct ofpbuf * ofputil_encode_tlv_table_mod(enum ofp_version ofp_version, struct ofputil_tlv_table_mod *ttm) { struct ofpbuf *b; struct nx_tlv_table_mod *nx_ttm; b = ofpraw_alloc(OFPRAW_NXT_TLV_TABLE_MOD, ofp_version, 0); nx_ttm = ofpbuf_put_zeros(b, sizeof *nx_ttm); nx_ttm->command = htons(ttm->command); encode_tlv_table_mappings(b, &ttm->mappings); return b; } static enum ofperr decode_tlv_table_mappings(struct ofpbuf *msg, unsigned int max_fields, struct ovs_list *mappings) { ovs_list_init(mappings); while (msg->size) { struct nx_tlv_map *nx_map; struct ofputil_tlv_map *map; nx_map = ofpbuf_pull(msg, sizeof *nx_map); map = xmalloc(sizeof *map); ovs_list_push_back(mappings, &map->list_node); map->option_class = ntohs(nx_map->option_class); map->option_type = nx_map->option_type; map->option_len = nx_map->option_len; if (map->option_len % 4 || map->option_len > TLV_MAX_OPT_SIZE) { VLOG_WARN_RL(&bad_ofmsg_rl, "tlv table option length (%u) is not a valid option size", map->option_len); ofputil_uninit_tlv_table(mappings); return OFPERR_NXTTMFC_BAD_OPT_LEN; } map->index = ntohs(nx_map->index); if (map->index >= max_fields) { VLOG_WARN_RL(&bad_ofmsg_rl, "tlv table field index (%u) is too large (max %u)", map->index, max_fields - 1); ofputil_uninit_tlv_table(mappings); return OFPERR_NXTTMFC_BAD_FIELD_IDX; } } return 0; } enum ofperr ofputil_decode_tlv_table_mod(const struct ofp_header *oh, struct ofputil_tlv_table_mod *ttm) { struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&msg); struct nx_tlv_table_mod *nx_ttm = ofpbuf_pull(&msg, sizeof *nx_ttm); ttm->command = ntohs(nx_ttm->command); if (ttm->command > NXTTMC_CLEAR) { VLOG_WARN_RL(&bad_ofmsg_rl, "tlv table mod command (%u) is out of range", ttm->command); return OFPERR_NXTTMFC_BAD_COMMAND; } return decode_tlv_table_mappings(&msg, TUN_METADATA_NUM_OPTS, &ttm->mappings); } struct ofpbuf * ofputil_encode_tlv_table_reply(const struct ofp_header *oh, struct ofputil_tlv_table_reply *ttr) { struct ofpbuf *b; struct nx_tlv_table_reply *nx_ttr; b = ofpraw_alloc_reply(OFPRAW_NXT_TLV_TABLE_REPLY, oh, 0); nx_ttr = ofpbuf_put_zeros(b, sizeof *nx_ttr); nx_ttr->max_option_space = htonl(ttr->max_option_space); nx_ttr->max_fields = htons(ttr->max_fields); encode_tlv_table_mappings(b, &ttr->mappings); return b; } /* Decodes the NXT_TLV_TABLE_REPLY message in 'oh' into '*ttr'. Returns 0 * if successful, otherwise an ofperr. * * The decoder verifies that the indexes in 'ttr->mappings' are less than * 'ttr->max_fields', but the caller must ensure, if necessary, that they are * less than TUN_METADATA_NUM_OPTS. */ enum ofperr ofputil_decode_tlv_table_reply(const struct ofp_header *oh, struct ofputil_tlv_table_reply *ttr) { struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&msg); struct nx_tlv_table_reply *nx_ttr = ofpbuf_pull(&msg, sizeof *nx_ttr); ttr->max_option_space = ntohl(nx_ttr->max_option_space); ttr->max_fields = ntohs(nx_ttr->max_fields); return decode_tlv_table_mappings(&msg, ttr->max_fields, &ttr->mappings); } void ofputil_uninit_tlv_table(struct ovs_list *mappings) { struct ofputil_tlv_map *map; LIST_FOR_EACH_POP (map, list_node, mappings) { free(map); } } const char * ofputil_async_msg_type_to_string(enum ofputil_async_msg_type type) { switch (type) { case OAM_PACKET_IN: return "PACKET_IN"; case OAM_PORT_STATUS: return "PORT_STATUS"; case OAM_FLOW_REMOVED: return "FLOW_REMOVED"; case OAM_ROLE_STATUS: return "ROLE_STATUS"; case OAM_TABLE_STATUS: return "TABLE_STATUS"; case OAM_REQUESTFORWARD: return "REQUESTFORWARD"; case OAM_N_TYPES: default: OVS_NOT_REACHED(); } } struct ofp14_async_prop { uint64_t prop_type; enum ofputil_async_msg_type oam; bool master; uint32_t allowed10, allowed14; }; #define AP_PAIR(SLAVE_PROP_TYPE, OAM, A10, A14) \ { SLAVE_PROP_TYPE, OAM, false, A10, (A14) ? (A14) : (A10) }, \ { (SLAVE_PROP_TYPE + 1), OAM, true, A10, (A14) ? (A14) : (A10) } static const struct ofp14_async_prop async_props[] = { AP_PAIR( 0, OAM_PACKET_IN, OFPR10_BITS, OFPR14_BITS), AP_PAIR( 2, OAM_PORT_STATUS, (1 << OFPPR_N_REASONS) - 1, 0), AP_PAIR( 4, OAM_FLOW_REMOVED, (1 << OVS_OFPRR_NONE) - 1, 0), AP_PAIR( 6, OAM_ROLE_STATUS, (1 << OFPCRR_N_REASONS) - 1, 0), AP_PAIR( 8, OAM_TABLE_STATUS, OFPTR_BITS, 0), AP_PAIR(10, OAM_REQUESTFORWARD, (1 << OFPRFR_N_REASONS) - 1, 0), }; #define FOR_EACH_ASYNC_PROP(VAR) \ for (const struct ofp14_async_prop *VAR = async_props; \ VAR < &async_props[ARRAY_SIZE(async_props)]; VAR++) static const struct ofp14_async_prop * get_ofp14_async_config_prop_by_prop_type(uint64_t prop_type) { FOR_EACH_ASYNC_PROP (ap) { if (prop_type == ap->prop_type) { return ap; } } return NULL; } static const struct ofp14_async_prop * get_ofp14_async_config_prop_by_oam(enum ofputil_async_msg_type oam, bool master) { FOR_EACH_ASYNC_PROP (ap) { if (ap->oam == oam && ap->master == master) { return ap; } } return NULL; } static uint32_t ofp14_async_prop_allowed(const struct ofp14_async_prop *prop, enum ofp_version version) { return version >= OFP14_VERSION ? prop->allowed14 : prop->allowed10; } static ovs_be32 encode_async_mask(const struct ofputil_async_cfg *src, const struct ofp14_async_prop *ap, enum ofp_version version) { uint32_t mask = ap->master ? src->master[ap->oam] : src->slave[ap->oam]; return htonl(mask & ofp14_async_prop_allowed(ap, version)); } static enum ofperr decode_async_mask(ovs_be32 src, const struct ofp14_async_prop *ap, enum ofp_version version, bool loose, struct ofputil_async_cfg *dst) { uint32_t mask = ntohl(src); uint32_t allowed = ofp14_async_prop_allowed(ap, version); if (mask & ~allowed) { OFPPROP_LOG(&bad_ofmsg_rl, loose, "bad value %#x for %s (allowed mask %#x)", mask, ofputil_async_msg_type_to_string(ap->oam), allowed); mask &= allowed; if (!loose) { return OFPERR_OFPACFC_INVALID; } } if (ap->oam == OAM_PACKET_IN) { if (mask & (1u << OFPR_NO_MATCH)) { mask |= 1u << OFPR_EXPLICIT_MISS; if (version < OFP13_VERSION) { mask |= 1u << OFPR_IMPLICIT_MISS; } } } uint32_t *array = ap->master ? dst->master : dst->slave; array[ap->oam] = mask; return 0; } static enum ofperr parse_async_tlv(const struct ofpbuf *property, const struct ofp14_async_prop *ap, struct ofputil_async_cfg *ac, enum ofp_version version, bool loose) { enum ofperr error; ovs_be32 mask; error = ofpprop_parse_be32(property, &mask); if (error) { return error; } if (ofpprop_is_experimenter(ap->prop_type)) { /* For experimenter properties, whether a property is for the master or * slave role is indicated by both 'type' and 'exp_type' in struct * ofp_prop_experimenter. Check that these are consistent. */ const struct ofp_prop_experimenter *ope = property->data; bool should_be_master = ope->type == htons(0xffff); if (should_be_master != ap->master) { VLOG_WARN_RL(&bad_ofmsg_rl, "async property type %#"PRIx16" " "indicates %s role but exp_type %"PRIu32" indicates " "%s role", ntohs(ope->type), should_be_master ? "master" : "slave", ntohl(ope->exp_type), ap->master ? "master" : "slave"); return OFPERR_OFPBPC_BAD_EXP_TYPE; } } return decode_async_mask(mask, ap, version, loose, ac); } static void decode_legacy_async_masks(const ovs_be32 masks[2], enum ofputil_async_msg_type oam, enum ofp_version version, struct ofputil_async_cfg *dst) { for (int i = 0; i < 2; i++) { bool master = i == 0; const struct ofp14_async_prop *ap = get_ofp14_async_config_prop_by_oam(oam, master); decode_async_mask(masks[i], ap, version, true, dst); } } /* Decodes the OpenFlow "set async config" request and "get async config * reply" message in '*oh' into an abstract form in 'ac'. * * Some versions of the "set async config" request change only some of the * settings and leave the others alone. This function uses 'basis' as the * initial state for decoding these. Other versions of the request change all * the settings; this function ignores 'basis' when decoding these. * * If 'loose' is true, this function ignores properties and values that it does * not understand, as a controller would want to do when interpreting * capabilities provided by a switch. If 'loose' is false, this function * treats unknown properties and values as an error, as a switch would want to * do when interpreting a configuration request made by a controller. * * Returns 0 if successful, otherwise an OFPERR_* value. * * Returns error code OFPERR_OFPACFC_INVALID if the value of mask is not in * the valid range of mask. * * Returns error code OFPERR_OFPACFC_UNSUPPORTED if the configuration is not * supported.*/ enum ofperr ofputil_decode_set_async_config(const struct ofp_header *oh, bool loose, const struct ofputil_async_cfg *basis, struct ofputil_async_cfg *ac) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT13_SET_ASYNC || raw == OFPRAW_NXT_SET_ASYNC_CONFIG || raw == OFPRAW_OFPT13_GET_ASYNC_REPLY) { const struct nx_async_config *msg = ofpmsg_body(oh); *ac = OFPUTIL_ASYNC_CFG_INIT; decode_legacy_async_masks(msg->packet_in_mask, OAM_PACKET_IN, oh->version, ac); decode_legacy_async_masks(msg->port_status_mask, OAM_PORT_STATUS, oh->version, ac); decode_legacy_async_masks(msg->flow_removed_mask, OAM_FLOW_REMOVED, oh->version, ac); } else if (raw == OFPRAW_OFPT14_SET_ASYNC || raw == OFPRAW_OFPT14_GET_ASYNC_REPLY || raw == OFPRAW_NXT_SET_ASYNC_CONFIG2) { *ac = *basis; while (b.size > 0) { struct ofpbuf property; enum ofperr error; uint64_t type; error = ofpprop_pull__(&b, &property, 8, 0xfffe, &type); if (error) { return error; } const struct ofp14_async_prop *ap = get_ofp14_async_config_prop_by_prop_type(type); error = (ap ? parse_async_tlv(&property, ap, ac, oh->version, loose) : OFPPROP_UNKNOWN(loose, "async config", type)); if (error) { /* Most messages use OFPBPC_BAD_TYPE but async has its own (who * knows why, it's OpenFlow. */ if (error == OFPERR_OFPBPC_BAD_TYPE) { error = OFPERR_OFPACFC_UNSUPPORTED; } return error; } } } else { return OFPERR_OFPBRC_BAD_VERSION; } return 0; } static void encode_legacy_async_masks(const struct ofputil_async_cfg *ac, enum ofputil_async_msg_type oam, enum ofp_version version, ovs_be32 masks[2]) { for (int i = 0; i < 2; i++) { bool master = i == 0; const struct ofp14_async_prop *ap = get_ofp14_async_config_prop_by_oam(oam, master); masks[i] = encode_async_mask(ac, ap, version); } } static void ofputil_put_async_config__(const struct ofputil_async_cfg *ac, struct ofpbuf *buf, bool tlv, enum ofp_version version, uint32_t oams) { if (!tlv) { struct nx_async_config *msg = ofpbuf_put_zeros(buf, sizeof *msg); encode_legacy_async_masks(ac, OAM_PACKET_IN, version, msg->packet_in_mask); encode_legacy_async_masks(ac, OAM_PORT_STATUS, version, msg->port_status_mask); encode_legacy_async_masks(ac, OAM_FLOW_REMOVED, version, msg->flow_removed_mask); } else { FOR_EACH_ASYNC_PROP (ap) { if (oams & (1u << ap->oam)) { size_t ofs = buf->size; ofpprop_put_be32(buf, ap->prop_type, encode_async_mask(ac, ap, version)); /* For experimenter properties, we need to use type 0xfffe for * master and 0xffff for slaves. */ if (ofpprop_is_experimenter(ap->prop_type)) { struct ofp_prop_experimenter *ope = ofpbuf_at_assert(buf, ofs, sizeof *ope); ope->type = ap->master ? htons(0xffff) : htons(0xfffe); } } } } } /* Encodes and returns a reply to the OFPT_GET_ASYNC_REQUEST in 'oh' that * states that the asynchronous message configuration is 'ac'. */ struct ofpbuf * ofputil_encode_get_async_reply(const struct ofp_header *oh, const struct ofputil_async_cfg *ac) { struct ofpbuf *buf; enum ofpraw raw = (oh->version < OFP14_VERSION ? OFPRAW_OFPT13_GET_ASYNC_REPLY : OFPRAW_OFPT14_GET_ASYNC_REPLY); struct ofpbuf *reply = ofpraw_alloc_reply(raw, oh, 0); ofputil_put_async_config__(ac, reply, raw == OFPRAW_OFPT14_GET_ASYNC_REPLY, oh->version, UINT32_MAX); return reply; return buf; } /* Encodes and returns a message, in a format appropriate for OpenFlow version * 'ofp_version', that sets the asynchronous message configuration to 'ac'. * * Specify 'oams' as a bitmap of OAM_* that indicate the asynchronous messages * to configure. OF1.0 through OF1.3 can't natively configure a subset of * messages, so more messages than requested may be configured. OF1.0 through * OF1.3 also can't configure OVS extension OAM_* values, so if 'oam' includes * any extensions then this function encodes an Open vSwitch extension message * that does support configuring OVS extension OAM_*. */ struct ofpbuf * ofputil_encode_set_async_config(const struct ofputil_async_cfg *ac, uint32_t oams, enum ofp_version ofp_version) { enum ofpraw raw = (ofp_version >= OFP14_VERSION ? OFPRAW_OFPT14_SET_ASYNC : oams & OAM_EXTENSIONS ? OFPRAW_NXT_SET_ASYNC_CONFIG2 : ofp_version >= OFP13_VERSION ? OFPRAW_OFPT13_SET_ASYNC : OFPRAW_NXT_SET_ASYNC_CONFIG); struct ofpbuf *request = ofpraw_alloc(raw, ofp_version, 0); ofputil_put_async_config__(ac, request, (raw == OFPRAW_OFPT14_SET_ASYNC || raw == OFPRAW_NXT_SET_ASYNC_CONFIG2), ofp_version, oams); return request; } struct ofputil_async_cfg ofputil_async_cfg_default(enum ofp_version version) { /* We enable all of the OF1.4 reasons regardless of 'version' because the * reasons added in OF1.4 just are just refinements of the OFPR_ACTION * introduced in OF1.0, breaking it into more specific categories. When we * encode these for earlier OpenFlow versions, we translate them into * OFPR_ACTION. */ uint32_t pin = OFPR14_BITS & ~(1u << OFPR_INVALID_TTL); pin |= 1u << OFPR_EXPLICIT_MISS; if (version <= OFP12_VERSION) { pin |= 1u << OFPR_IMPLICIT_MISS; } return (struct ofputil_async_cfg) { .master[OAM_PACKET_IN] = pin, .master[OAM_FLOW_REMOVED] = (version >= OFP14_VERSION ? OFPRR14_BITS : OFPRR10_BITS), .master[OAM_PORT_STATUS] = OFPPR_BITS, .slave[OAM_PORT_STATUS] = OFPPR_BITS, }; } static void ofputil_put_ofp14_table_desc(const struct ofputil_table_desc *td, struct ofpbuf *b, enum ofp_version version) { struct ofp14_table_desc *otd; struct ofp14_table_mod_prop_vacancy *otv; size_t start_otd; start_otd = b->size; ofpbuf_put_zeros(b, sizeof *otd); ofpprop_put_u32(b, OFPTMPT14_EVICTION, td->eviction_flags); otv = ofpbuf_put_zeros(b, sizeof *otv); otv->type = htons(OFPTMPT14_VACANCY); otv->length = htons(sizeof *otv); otv->vacancy_down = td->table_vacancy.vacancy_down; otv->vacancy_up = td->table_vacancy.vacancy_up; otv->vacancy = td->table_vacancy.vacancy; otd = ofpbuf_at_assert(b, start_otd, sizeof *otd); otd->length = htons(b->size - start_otd); otd->table_id = td->table_id; otd->config = ofputil_encode_table_config(OFPUTIL_TABLE_MISS_DEFAULT, td->eviction, td->vacancy, version); } /* Converts the abstract form of a "table status" message in '*ts' into an * OpenFlow message suitable for 'protocol', and returns that encoded form in * a buffer owned by the caller. */ struct ofpbuf * ofputil_encode_table_status(const struct ofputil_table_status *ts, enum ofputil_protocol protocol) { enum ofp_version version; struct ofpbuf *b; version = ofputil_protocol_to_ofp_version(protocol); if (version >= OFP14_VERSION) { enum ofpraw raw; struct ofp14_table_status *ots; raw = OFPRAW_OFPT14_TABLE_STATUS; b = ofpraw_alloc_xid(raw, version, htonl(0), 0); ots = ofpbuf_put_zeros(b, sizeof *ots); ots->reason = ts->reason; ofputil_put_ofp14_table_desc(&ts->desc, b, version); ofpmsg_update_length(b); return b; } else { return NULL; } } /* Decodes the OpenFlow "table status" message in '*ots' into an abstract form * in '*ts'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_table_status(const struct ofp_header *oh, struct ofputil_table_status *ts) { const struct ofp14_table_status *ots; struct ofpbuf b; enum ofperr error; enum ofpraw raw; ofpbuf_use_const(&b, oh, ntohs(oh->length)); raw = ofpraw_pull_assert(&b); ots = ofpbuf_pull(&b, sizeof *ots); if (raw == OFPRAW_OFPT14_TABLE_STATUS) { if (ots->reason != OFPTR_VACANCY_DOWN && ots->reason != OFPTR_VACANCY_UP) { return OFPERR_OFPBPC_BAD_VALUE; } ts->reason = ots->reason; error = ofputil_decode_table_desc(&b, &ts->desc, oh->version); return error; } else { return OFPERR_OFPBRC_BAD_VERSION; } return 0; }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_389_0
crossvul-cpp_data_good_2571_0
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/do_as_req.c */ /* * Portions Copyright (C) 2007 Apple Inc. * Copyright 1990, 1991, 2007, 2008, 2009, 2013, 2014 by the * Massachusetts Institute of Technology. All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. * * * KDC Routines to deal with AS_REQ's */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include "com_err.h" #include <syslog.h> #ifdef HAVE_NETINET_IN_H #include <sys/types.h> #include <netinet/in.h> #ifndef hpux #include <arpa/inet.h> #endif /* hpux */ #endif /* HAVE_NETINET_IN_H */ #include "kdc_util.h" #include "kdc_audit.h" #include "policy.h" #include <kadm5/admin.h> #include "adm_proto.h" #include "extern.h" static krb5_error_code prepare_error_as(struct kdc_request_state *, krb5_kdc_req *, krb5_db_entry *, int, krb5_pa_data **, krb5_boolean, krb5_principal, krb5_data **, const char *); /* Determine the key-expiration value according to RFC 4120 section 5.4.2. */ static krb5_timestamp get_key_exp(krb5_db_entry *entry) { if (entry->expiration == 0) return entry->pw_expiration; if (entry->pw_expiration == 0) return entry->expiration; return ts_min(entry->expiration, entry->pw_expiration); } /* * Find the key in client for the most preferred enctype in req_enctypes. Fill * in *kb_out with the decrypted keyblock (which the caller must free) and set * *kd_out to an alias to that key data entry. Set *kd_out to NULL and leave * *kb_out zeroed if no key is found for any of the requested enctypes. * kb_out->enctype may differ from the enctype of *kd_out for DES enctypes; in * this case, kb_out->enctype is the requested enctype used to match the key * data entry. */ static krb5_error_code select_client_key(krb5_context context, krb5_db_entry *client, krb5_enctype *req_enctypes, int n_req_enctypes, krb5_keyblock *kb_out, krb5_key_data **kd_out) { krb5_error_code ret; krb5_key_data *kd; krb5_enctype etype; int i; memset(kb_out, 0, sizeof(*kb_out)); *kd_out = NULL; for (i = 0; i < n_req_enctypes; i++) { etype = req_enctypes[i]; if (!krb5_c_valid_enctype(etype)) continue; if (krb5_dbe_find_enctype(context, client, etype, -1, 0, &kd) == 0) { /* Decrypt the client key data and set its enctype to the request * enctype (which may differ from the key data enctype for DES). */ ret = krb5_dbe_decrypt_key_data(context, NULL, kd, kb_out, NULL); if (ret) return ret; kb_out->enctype = etype; *kd_out = kd; return 0; } } return 0; } struct as_req_state { loop_respond_fn respond; void *arg; krb5_principal_data client_princ; krb5_enc_tkt_part enc_tkt_reply; krb5_enc_kdc_rep_part reply_encpart; krb5_ticket ticket_reply; krb5_keyblock server_keyblock; krb5_keyblock client_keyblock; krb5_db_entry *client; krb5_db_entry *server; krb5_db_entry *local_tgt; krb5_db_entry *local_tgt_storage; krb5_key_data *client_key; krb5_kdc_req *request; struct krb5_kdcpreauth_rock_st rock; const char *status; krb5_pa_data **e_data; krb5_boolean typed_e_data; krb5_kdc_rep reply; krb5_timestamp kdc_time; krb5_timestamp authtime; krb5_keyblock session_key; unsigned int c_flags; krb5_data *req_pkt; krb5_data *inner_body; struct kdc_request_state *rstate; char *sname, *cname; void *pa_context; const krb5_fulladdr *local_addr; const krb5_fulladdr *remote_addr; krb5_data **auth_indicators; krb5_error_code preauth_err; kdc_realm_t *active_realm; krb5_audit_state *au_state; }; static void finish_process_as_req(struct as_req_state *state, krb5_error_code errcode) { krb5_key_data *server_key; krb5_keyblock *as_encrypting_key = NULL; krb5_data *response = NULL; const char *emsg = 0; int did_log = 0; loop_respond_fn oldrespond; void *oldarg; kdc_realm_t *kdc_active_realm = state->active_realm; krb5_audit_state *au_state = state->au_state; assert(state); oldrespond = state->respond; oldarg = state->arg; if (errcode) goto egress; au_state->stage = ENCR_REP; if ((errcode = validate_forwardable(state->request, *state->client, *state->server, state->kdc_time, &state->status))) { errcode += ERROR_TABLE_BASE_krb5; goto egress; } errcode = check_indicators(kdc_context, state->server, state->auth_indicators); if (errcode) { state->status = "HIGHER_AUTHENTICATION_REQUIRED"; goto egress; } state->ticket_reply.enc_part2 = &state->enc_tkt_reply; /* * Find the server key */ if ((errcode = krb5_dbe_find_enctype(kdc_context, state->server, -1, /* ignore keytype */ -1, /* Ignore salttype */ 0, /* Get highest kvno */ &server_key))) { state->status = "FINDING_SERVER_KEY"; goto egress; } /* * Convert server->key into a real key * (it may be encrypted in the database) * * server_keyblock is later used to generate auth data signatures */ if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL, server_key, &state->server_keyblock, NULL))) { state->status = "DECRYPT_SERVER_KEY"; goto egress; } /* Start assembling the response */ state->reply.msg_type = KRB5_AS_REP; state->reply.client = state->enc_tkt_reply.client; /* post canonization */ state->reply.ticket = &state->ticket_reply; state->reply_encpart.session = &state->session_key; if ((errcode = fetch_last_req_info(state->client, &state->reply_encpart.last_req))) { state->status = "FETCH_LAST_REQ"; goto egress; } state->reply_encpart.nonce = state->request->nonce; state->reply_encpart.key_exp = get_key_exp(state->client); state->reply_encpart.flags = state->enc_tkt_reply.flags; state->reply_encpart.server = state->ticket_reply.server; /* copy the time fields EXCEPT for authtime; it's location * is used for ktime */ state->reply_encpart.times = state->enc_tkt_reply.times; state->reply_encpart.times.authtime = state->authtime = state->kdc_time; state->reply_encpart.caddrs = state->enc_tkt_reply.caddrs; state->reply_encpart.enc_padata = NULL; /* Fetch the padata info to be returned (do this before * authdata to handle possible replacement of reply key */ errcode = return_padata(kdc_context, &state->rock, state->req_pkt, state->request, &state->reply, &state->client_keyblock, &state->pa_context); if (errcode) { state->status = "KDC_RETURN_PADATA"; goto egress; } /* If we didn't find a client long-term key and no preauth mechanism * replaced the reply key, error out now. */ if (state->client_keyblock.enctype == ENCTYPE_NULL) { state->status = "CANT_FIND_CLIENT_KEY"; errcode = KRB5KDC_ERR_ETYPE_NOSUPP; goto egress; } errcode = handle_authdata(kdc_context, state->c_flags, state->client, state->server, NULL, state->local_tgt, &state->client_keyblock, &state->server_keyblock, NULL, state->req_pkt, state->request, NULL, /* for_user_princ */ NULL, /* enc_tkt_request */ state->auth_indicators, &state->enc_tkt_reply); if (errcode) { krb5_klog_syslog(LOG_INFO, _("AS_REQ : handle_authdata (%d)"), errcode); state->status = "HANDLE_AUTHDATA"; goto egress; } errcode = krb5_encrypt_tkt_part(kdc_context, &state->server_keyblock, &state->ticket_reply); if (errcode) { state->status = "ENCRYPT_TICKET"; goto egress; } errcode = kau_make_tkt_id(kdc_context, &state->ticket_reply, &au_state->tkt_out_id); if (errcode) { state->status = "GENERATE_TICKET_ID"; goto egress; } state->ticket_reply.enc_part.kvno = server_key->key_data_kvno; errcode = kdc_fast_response_handle_padata(state->rstate, state->request, &state->reply, state->client_keyblock.enctype); if (errcode) { state->status = "MAKE_FAST_RESPONSE"; goto egress; } /* now encode/encrypt the response */ state->reply.enc_part.enctype = state->client_keyblock.enctype; errcode = kdc_fast_handle_reply_key(state->rstate, &state->client_keyblock, &as_encrypting_key); if (errcode) { state->status = "MAKE_FAST_REPLY_KEY"; goto egress; } errcode = return_enc_padata(kdc_context, state->req_pkt, state->request, as_encrypting_key, state->server, &state->reply_encpart, FALSE); if (errcode) { state->status = "KDC_RETURN_ENC_PADATA"; goto egress; } if (kdc_fast_hide_client(state->rstate)) state->reply.client = (krb5_principal)krb5_anonymous_principal(); errcode = krb5_encode_kdc_rep(kdc_context, KRB5_AS_REP, &state->reply_encpart, 0, as_encrypting_key, &state->reply, &response); if (state->client_key != NULL) state->reply.enc_part.kvno = state->client_key->key_data_kvno; if (errcode) { state->status = "ENCODE_KDC_REP"; goto egress; } /* these parts are left on as a courtesy from krb5_encode_kdc_rep so we can use them in raw form if needed. But, we don't... */ memset(state->reply.enc_part.ciphertext.data, 0, state->reply.enc_part.ciphertext.length); free(state->reply.enc_part.ciphertext.data); log_as_req(kdc_context, state->local_addr, state->remote_addr, state->request, &state->reply, state->client, state->cname, state->server, state->sname, state->authtime, 0, 0, 0); did_log = 1; egress: if (errcode != 0 && state->status == NULL) state->status = "UNKNOWN_REASON"; au_state->status = state->status; au_state->reply = &state->reply; kau_as_req(kdc_context, (errcode || state->preauth_err) ? FALSE : TRUE, au_state); kau_free_kdc_req(au_state); free_padata_context(kdc_context, state->pa_context); if (as_encrypting_key) krb5_free_keyblock(kdc_context, as_encrypting_key); if (errcode) emsg = krb5_get_error_message(kdc_context, errcode); if (state->status) { log_as_req(kdc_context, state->local_addr, state->remote_addr, state->request, &state->reply, state->client, state->cname, state->server, state->sname, state->authtime, state->status, errcode, emsg); did_log = 1; } if (errcode) { if (state->status == 0) { state->status = emsg; } if (errcode != KRB5KDC_ERR_DISCARD) { errcode -= ERROR_TABLE_BASE_krb5; if (errcode < 0 || errcode > KRB_ERR_MAX) errcode = KRB_ERR_GENERIC; errcode = prepare_error_as(state->rstate, state->request, state->local_tgt, errcode, state->e_data, state->typed_e_data, ((state->client != NULL) ? state->client->princ : NULL), &response, state->status); state->status = 0; } } if (emsg) krb5_free_error_message(kdc_context, emsg); if (state->enc_tkt_reply.authorization_data != NULL) krb5_free_authdata(kdc_context, state->enc_tkt_reply.authorization_data); if (state->server_keyblock.contents != NULL) krb5_free_keyblock_contents(kdc_context, &state->server_keyblock); if (state->client_keyblock.contents != NULL) krb5_free_keyblock_contents(kdc_context, &state->client_keyblock); if (state->reply.padata != NULL) krb5_free_pa_data(kdc_context, state->reply.padata); if (state->reply_encpart.enc_padata) krb5_free_pa_data(kdc_context, state->reply_encpart.enc_padata); if (state->cname != NULL) free(state->cname); if (state->sname != NULL) free(state->sname); krb5_db_free_principal(kdc_context, state->client); krb5_db_free_principal(kdc_context, state->server); krb5_db_free_principal(kdc_context, state->local_tgt_storage); if (state->session_key.contents != NULL) krb5_free_keyblock_contents(kdc_context, &state->session_key); if (state->ticket_reply.enc_part.ciphertext.data != NULL) { memset(state->ticket_reply.enc_part.ciphertext.data , 0, state->ticket_reply.enc_part.ciphertext.length); free(state->ticket_reply.enc_part.ciphertext.data); } krb5_free_pa_data(kdc_context, state->e_data); krb5_free_data(kdc_context, state->inner_body); kdc_free_rstate(state->rstate); krb5_free_kdc_req(kdc_context, state->request); k5_free_data_ptr_list(state->auth_indicators); assert(did_log != 0); free(state); (*oldrespond)(oldarg, errcode, response); } static void finish_missing_required_preauth(void *arg) { struct as_req_state *state = (struct as_req_state *)arg; finish_process_as_req(state, state->preauth_err); } static void finish_preauth(void *arg, krb5_error_code code) { struct as_req_state *state = arg; krb5_error_code real_code = code; if (code) { if (vague_errors) code = KRB5KRB_ERR_GENERIC; state->status = "PREAUTH_FAILED"; if (real_code == KRB5KDC_ERR_PREAUTH_FAILED) { state->preauth_err = code; get_preauth_hint_list(state->request, &state->rock, &state->e_data, finish_missing_required_preauth, state); return; } } else { /* * Final check before handing out ticket: If the client requires * preauthentication, verify that the proper kind of * preauthentication was carried out. */ state->status = missing_required_preauth(state->client, state->server, &state->enc_tkt_reply); if (state->status) { state->preauth_err = KRB5KDC_ERR_PREAUTH_REQUIRED; get_preauth_hint_list(state->request, &state->rock, &state->e_data, finish_missing_required_preauth, state); return; } } finish_process_as_req(state, code); } /*ARGSUSED*/ void process_as_req(krb5_kdc_req *request, krb5_data *req_pkt, const krb5_fulladdr *local_addr, const krb5_fulladdr *remote_addr, kdc_realm_t *kdc_active_realm, verto_ctx *vctx, loop_respond_fn respond, void *arg) { krb5_error_code errcode; unsigned int s_flags = 0; krb5_data encoded_req_body; krb5_enctype useenctype; struct as_req_state *state; krb5_audit_state *au_state = NULL; state = k5alloc(sizeof(*state), &errcode); if (state == NULL) { (*respond)(arg, errcode, NULL); return; } state->respond = respond; state->arg = arg; state->request = request; state->req_pkt = req_pkt; state->local_addr = local_addr; state->remote_addr = remote_addr; state->active_realm = kdc_active_realm; errcode = kdc_make_rstate(kdc_active_realm, &state->rstate); if (errcode != 0) { (*respond)(arg, errcode, NULL); free(state); return; } /* Initialize audit state. */ errcode = kau_init_kdc_req(kdc_context, state->request, remote_addr, &au_state); if (errcode) { (*respond)(arg, errcode, NULL); kdc_free_rstate(state->rstate); free(state); return; } state->au_state = au_state; if (state->request->msg_type != KRB5_AS_REQ) { state->status = "VALIDATE_MESSAGE_TYPE"; errcode = KRB5_BADMSGTYPE; goto errout; } /* Seed the audit trail with the request ID and basic information. */ kau_as_req(kdc_context, TRUE, au_state); if (fetch_asn1_field((unsigned char *) req_pkt->data, 1, 4, &encoded_req_body) != 0) { errcode = ASN1_BAD_ID; state->status = "FETCH_REQ_BODY"; goto errout; } errcode = kdc_find_fast(&state->request, &encoded_req_body, NULL, NULL, state->rstate, &state->inner_body); if (errcode) { state->status = "FIND_FAST"; goto errout; } if (state->inner_body == NULL) { /* Not a FAST request; copy the encoded request body. */ errcode = krb5_copy_data(kdc_context, &encoded_req_body, &state->inner_body); if (errcode) { state->status = "COPY_REQ_BODY"; goto errout; } } au_state->request = state->request; state->rock.request = state->request; state->rock.inner_body = state->inner_body; state->rock.rstate = state->rstate; state->rock.vctx = vctx; state->rock.auth_indicators = &state->auth_indicators; if (!state->request->client) { state->status = "NULL_CLIENT"; errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; goto errout; } if ((errcode = krb5_unparse_name(kdc_context, state->request->client, &state->cname))) { state->status = "UNPARSE_CLIENT"; goto errout; } limit_string(state->cname); if (!state->request->server) { state->status = "NULL_SERVER"; errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } if ((errcode = krb5_unparse_name(kdc_context, state->request->server, &state->sname))) { state->status = "UNPARSE_SERVER"; goto errout; } limit_string(state->sname); /* * We set KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY as a hint * to the backend to return naming information in lieu * of cross realm TGS entries. */ setflag(state->c_flags, KRB5_KDB_FLAG_CLIENT_REFERRALS_ONLY); /* * Note that according to the referrals draft we should * always canonicalize enterprise principal names. */ if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE) || state->request->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) { setflag(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE); setflag(state->c_flags, KRB5_KDB_FLAG_ALIAS_OK); } if (include_pac_p(kdc_context, state->request)) { setflag(state->c_flags, KRB5_KDB_FLAG_INCLUDE_PAC); } errcode = krb5_db_get_principal(kdc_context, state->request->client, state->c_flags, &state->client); if (errcode == KRB5_KDB_CANTLOCK_DB) errcode = KRB5KDC_ERR_SVC_UNAVAILABLE; if (errcode == KRB5_KDB_NOENTRY) { state->status = "CLIENT_NOT_FOUND"; if (vague_errors) errcode = KRB5KRB_ERR_GENERIC; else errcode = KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; goto errout; } else if (errcode) { state->status = "LOOKING_UP_CLIENT"; goto errout; } state->rock.client = state->client; /* * If the backend returned a principal that is not in the local * realm, then we need to refer the client to that realm. */ if (!is_local_principal(kdc_active_realm, state->client->princ)) { /* Entry is a referral to another realm */ state->status = "REFERRAL"; au_state->cl_realm = &state->client->princ->realm; errcode = KRB5KDC_ERR_WRONG_REALM; goto errout; } au_state->stage = SRVC_PRINC; s_flags = 0; setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK); if (isflagset(state->request->kdc_options, KDC_OPT_CANONICALIZE)) { setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE); } errcode = krb5_db_get_principal(kdc_context, state->request->server, s_flags, &state->server); if (errcode == KRB5_KDB_CANTLOCK_DB) errcode = KRB5KDC_ERR_SVC_UNAVAILABLE; if (errcode == KRB5_KDB_NOENTRY) { state->status = "SERVER_NOT_FOUND"; errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } else if (errcode) { state->status = "LOOKING_UP_SERVER"; goto errout; } errcode = get_local_tgt(kdc_context, &state->request->server->realm, state->server, &state->local_tgt, &state->local_tgt_storage); if (errcode) { state->status = "GET_LOCAL_TGT"; goto errout; } au_state->stage = VALIDATE_POL; if ((errcode = krb5_timeofday(kdc_context, &state->kdc_time))) { state->status = "TIMEOFDAY"; goto errout; } state->authtime = state->kdc_time; /* for audit_as_request() */ if ((errcode = validate_as_request(kdc_active_realm, state->request, *state->client, *state->server, state->kdc_time, &state->status, &state->e_data))) { if (!state->status) state->status = "UNKNOWN_REASON"; errcode += ERROR_TABLE_BASE_krb5; goto errout; } au_state->stage = ISSUE_TKT; /* * Select the keytype for the ticket session key. */ if ((useenctype = select_session_keytype(kdc_active_realm, state->server, state->request->nktypes, state->request->ktype)) == 0) { /* unsupported ktype */ state->status = "BAD_ENCRYPTION_TYPE"; errcode = KRB5KDC_ERR_ETYPE_NOSUPP; goto errout; } if ((errcode = krb5_c_make_random_key(kdc_context, useenctype, &state->session_key))) { state->status = "MAKE_RANDOM_KEY"; goto errout; } /* * Canonicalization is only effective if we are issuing a TGT * (the intention is to allow support for Windows "short" realm * aliases, nothing more). */ if (isflagset(s_flags, KRB5_KDB_FLAG_CANONICALIZE) && krb5_is_tgs_principal(state->request->server) && krb5_is_tgs_principal(state->server->princ)) { state->ticket_reply.server = state->server->princ; } else { state->ticket_reply.server = state->request->server; } /* Copy options that request the corresponding ticket flags. */ state->enc_tkt_reply.flags = OPTS2FLAGS(state->request->kdc_options); state->enc_tkt_reply.times.authtime = state->authtime; setflag(state->enc_tkt_reply.flags, TKT_FLG_INITIAL); setflag(state->enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP); /* * It should be noted that local policy may affect the * processing of any of these flags. For example, some * realms may refuse to issue renewable tickets */ state->enc_tkt_reply.session = &state->session_key; if (isflagset(state->c_flags, KRB5_KDB_FLAG_CANONICALIZE)) { state->client_princ = *(state->client->princ); } else { state->client_princ = *(state->request->client); /* The realm is always canonicalized */ state->client_princ.realm = state->client->princ->realm; } state->enc_tkt_reply.client = &state->client_princ; state->enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; state->enc_tkt_reply.transited.tr_contents = empty_string; if (isflagset(state->request->kdc_options, KDC_OPT_POSTDATED)) { setflag(state->enc_tkt_reply.flags, TKT_FLG_INVALID); state->enc_tkt_reply.times.starttime = state->request->from; } else state->enc_tkt_reply.times.starttime = state->kdc_time; kdc_get_ticket_endtime(kdc_active_realm, state->enc_tkt_reply.times.starttime, kdc_infinity, state->request->till, state->client, state->server, &state->enc_tkt_reply.times.endtime); kdc_get_ticket_renewtime(kdc_active_realm, state->request, NULL, state->client, state->server, &state->enc_tkt_reply); /* * starttime is optional, and treated as authtime if not present. * so we can nuke it if it matches */ if (state->enc_tkt_reply.times.starttime == state->enc_tkt_reply.times.authtime) state->enc_tkt_reply.times.starttime = 0; state->enc_tkt_reply.caddrs = state->request->addresses; state->enc_tkt_reply.authorization_data = 0; /* If anonymous requests are being used, adjust the realm of the client * principal. */ if (isflagset(state->request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS)) { if (!krb5_principal_compare_any_realm(kdc_context, state->request->client, krb5_anonymous_principal())) { errcode = KRB5KDC_ERR_BADOPTION; /* Anonymous requested but anonymous principal not used.*/ state->status = "VALIDATE_ANONYMOUS_PRINCIPAL"; goto errout; } krb5_free_principal(kdc_context, state->request->client); state->request->client = NULL; errcode = krb5_copy_principal(kdc_context, krb5_anonymous_principal(), &state->request->client); if (errcode) { state->status = "COPY_ANONYMOUS_PRINCIPAL"; goto errout; } state->enc_tkt_reply.client = state->request->client; setflag(state->client->attributes, KRB5_KDB_REQUIRES_PRE_AUTH); } errcode = select_client_key(kdc_context, state->client, state->request->ktype, state->request->nktypes, &state->client_keyblock, &state->client_key); if (errcode) { state->status = "DECRYPT_CLIENT_KEY"; goto errout; } if (state->client_key != NULL) { state->rock.client_key = state->client_key; state->rock.client_keyblock = &state->client_keyblock; } errcode = kdc_fast_read_cookie(kdc_context, state->rstate, state->request, state->local_tgt); if (errcode) { state->status = "READ_COOKIE"; goto errout; } /* * Check the preauthentication if it is there. */ if (state->request->padata) { check_padata(kdc_context, &state->rock, state->req_pkt, state->request, &state->enc_tkt_reply, &state->pa_context, &state->e_data, &state->typed_e_data, finish_preauth, state); } else finish_preauth(state, 0); return; errout: finish_process_as_req(state, errcode); } static krb5_error_code prepare_error_as(struct kdc_request_state *rstate, krb5_kdc_req *request, krb5_db_entry *local_tgt, int error, krb5_pa_data **e_data_in, krb5_boolean typed_e_data, krb5_principal canon_client, krb5_data **response, const char *status) { krb5_error errpkt; krb5_error_code retval; krb5_data *scratch = NULL, *e_data_asn1 = NULL, *fast_edata = NULL; krb5_pa_data **e_data = NULL, *cookie = NULL; kdc_realm_t *kdc_active_realm = rstate->realm_data; size_t count; errpkt.magic = KV5M_ERROR; if (e_data_in != NULL) { /* Add a PA-FX-COOKIE to e_data_in. e_data is a shallow copy * containing aliases. */ for (count = 0; e_data_in[count] != NULL; count++); e_data = calloc(count + 2, sizeof(*e_data)); if (e_data == NULL) return ENOMEM; memcpy(e_data, e_data_in, count * sizeof(*e_data)); retval = kdc_fast_make_cookie(kdc_context, rstate, local_tgt, request->client, &cookie); e_data[count] = cookie; } errpkt.ctime = request->nonce; errpkt.cusec = 0; retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec); if (retval) goto cleanup; errpkt.error = error; errpkt.server = request->server; errpkt.client = (error == KDC_ERR_WRONG_REALM) ? canon_client : request->client; errpkt.text = string2data((char *)status); if (e_data != NULL) { if (typed_e_data) retval = encode_krb5_typed_data(e_data, &e_data_asn1); else retval = encode_krb5_padata_sequence(e_data, &e_data_asn1); if (retval) goto cleanup; errpkt.e_data = *e_data_asn1; } else errpkt.e_data = empty_data(); retval = kdc_fast_handle_error(kdc_context, rstate, request, e_data, &errpkt, &fast_edata); if (retval) goto cleanup; if (fast_edata != NULL) errpkt.e_data = *fast_edata; scratch = k5alloc(sizeof(*scratch), &retval); if (scratch == NULL) goto cleanup; if (kdc_fast_hide_client(rstate) && errpkt.client != NULL) errpkt.client = (krb5_principal)krb5_anonymous_principal(); retval = krb5_mk_error(kdc_context, &errpkt, scratch); if (retval) goto cleanup; *response = scratch; scratch = NULL; cleanup: krb5_free_data(kdc_context, fast_edata); krb5_free_data(kdc_context, e_data_asn1); free(scratch); free(e_data); if (cookie != NULL) free(cookie->contents); free(cookie); return retval; }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2571_0
crossvul-cpp_data_bad_390_0
/* * Copyright (c) 2009-2017 Nicira, Inc. * Copyright (c) 2010 Jean Tourrilhes - HP-Labs. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include <errno.h> #include <inttypes.h> #include <stdbool.h> #include <stdlib.h> #include <unistd.h> #include "bitmap.h" #include "bundles.h" #include "byte-order.h" #include "classifier.h" #include "connectivity.h" #include "connmgr.h" #include "coverage.h" #include "dp-packet.h" #include "hash.h" #include "openvswitch/hmap.h" #include "netdev.h" #include "nx-match.h" #include "ofproto.h" #include "ofproto-provider.h" #include "openflow/nicira-ext.h" #include "openflow/openflow.h" #include "openvswitch/dynamic-string.h" #include "openvswitch/meta-flow.h" #include "openvswitch/ofp-actions.h" #include "openvswitch/ofp-errors.h" #include "openvswitch/ofp-msgs.h" #include "openvswitch/ofp-print.h" #include "openvswitch/ofp-util.h" #include "openvswitch/ofpbuf.h" #include "openvswitch/vlog.h" #include "ovs-rcu.h" #include "packets.h" #include "pinsched.h" #include "poll-loop.h" #include "random.h" #include "seq.h" #include "openvswitch/shash.h" #include "simap.h" #include "smap.h" #include "sset.h" #include "timeval.h" #include "tun-metadata.h" #include "unaligned.h" #include "unixctl.h" #include "util.h" VLOG_DEFINE_THIS_MODULE(ofproto); COVERAGE_DEFINE(ofproto_flush); COVERAGE_DEFINE(ofproto_packet_out); COVERAGE_DEFINE(ofproto_queue_req); COVERAGE_DEFINE(ofproto_recv_openflow); COVERAGE_DEFINE(ofproto_reinit_ports); COVERAGE_DEFINE(ofproto_update_port); /* Default fields to use for prefix tries in each flow table, unless something * else is configured. */ const enum mf_field_id default_prefix_fields[2] = { MFF_IPV4_DST, MFF_IPV4_SRC }; /* oftable. */ static void oftable_init(struct oftable *); static void oftable_destroy(struct oftable *); static void oftable_set_name(struct oftable *, const char *name); static enum ofperr evict_rules_from_table(struct oftable *) OVS_REQUIRES(ofproto_mutex); static void oftable_configure_eviction(struct oftable *, unsigned int eviction, const struct mf_subfield *fields, size_t n_fields) OVS_REQUIRES(ofproto_mutex); /* This is the only combination of OpenFlow eviction flags that OVS supports: a * combination of OF1.4+ importance, the remaining lifetime of the flow, and * fairness based on user-specified fields. */ #define OFPROTO_EVICTION_FLAGS \ (OFPTMPEF14_OTHER | OFPTMPEF14_IMPORTANCE | OFPTMPEF14_LIFETIME) /* A set of rules within a single OpenFlow table (oftable) that have the same * values for the oftable's eviction_fields. A rule to be evicted, when one is * needed, is taken from the eviction group that contains the greatest number * of rules. * * An oftable owns any number of eviction groups, each of which contains any * number of rules. * * Membership in an eviction group is imprecise, based on the hash of the * oftable's eviction_fields (in the eviction_group's id_node.hash member). * That is, if two rules have different eviction_fields, but those * eviction_fields hash to the same value, then they will belong to the same * eviction_group anyway. * * (When eviction is not enabled on an oftable, we don't track any eviction * groups, to save time and space.) */ struct eviction_group { struct hmap_node id_node; /* In oftable's "eviction_groups_by_id". */ struct heap_node size_node; /* In oftable's "eviction_groups_by_size". */ struct heap rules; /* Contains "struct rule"s. */ }; static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep) OVS_REQUIRES(ofproto_mutex); static uint64_t rule_eviction_priority(struct ofproto *ofproto, struct rule *) OVS_REQUIRES(ofproto_mutex); static void eviction_group_add_rule(struct rule *) OVS_REQUIRES(ofproto_mutex); static void eviction_group_remove_rule(struct rule *) OVS_REQUIRES(ofproto_mutex); static void rule_criteria_init(struct rule_criteria *, uint8_t table_id, const struct match *match, int priority, ovs_version_t version, ovs_be64 cookie, ovs_be64 cookie_mask, ofp_port_t out_port, uint32_t out_group); static void rule_criteria_require_rw(struct rule_criteria *, bool can_write_readonly); static void rule_criteria_destroy(struct rule_criteria *); static enum ofperr collect_rules_loose(struct ofproto *, const struct rule_criteria *, struct rule_collection *); struct learned_cookie { union { /* In struct ofproto's 'learned_cookies' hmap. */ struct hmap_node hmap_node OVS_GUARDED_BY(ofproto_mutex); /* In 'dead_cookies' list when removed from hmap. */ struct ovs_list list_node; } u; /* Key. */ ovs_be64 cookie OVS_GUARDED_BY(ofproto_mutex); uint8_t table_id OVS_GUARDED_BY(ofproto_mutex); /* Number of references from "learn" actions. * * When this drops to 0, all of the flows in 'table_id' with the specified * 'cookie' are deleted. */ int n OVS_GUARDED_BY(ofproto_mutex); }; static const struct ofpact_learn *next_learn_with_delete( const struct rule_actions *, const struct ofpact_learn *start); static void learned_cookies_inc(struct ofproto *, const struct rule_actions *) OVS_REQUIRES(ofproto_mutex); static void learned_cookies_dec(struct ofproto *, const struct rule_actions *, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex); static void learned_cookies_flush(struct ofproto *, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex); /* ofport. */ static void ofport_destroy__(struct ofport *) OVS_EXCLUDED(ofproto_mutex); static void ofport_destroy(struct ofport *, bool del); static bool ofport_is_mtu_overridden(const struct ofproto *, const struct ofport *); static int update_port(struct ofproto *, const char *devname); static int init_ports(struct ofproto *); static void reinit_ports(struct ofproto *); static long long int ofport_get_usage(const struct ofproto *, ofp_port_t ofp_port); static void ofport_set_usage(struct ofproto *, ofp_port_t ofp_port, long long int last_used); static void ofport_remove_usage(struct ofproto *, ofp_port_t ofp_port); /* Ofport usage. * * Keeps track of the currently used and recently used ofport values and is * used to prevent immediate recycling of ofport values. */ struct ofport_usage { struct hmap_node hmap_node; /* In struct ofproto's "ofport_usage" hmap. */ ofp_port_t ofp_port; /* OpenFlow port number. */ long long int last_used; /* Last time the 'ofp_port' was used. LLONG_MAX represents in-use ofports. */ }; /* rule. */ static void ofproto_rule_send_removed(struct rule *) OVS_EXCLUDED(ofproto_mutex); static bool rule_is_readonly(const struct rule *); static void ofproto_rule_insert__(struct ofproto *, struct rule *) OVS_REQUIRES(ofproto_mutex); static void ofproto_rule_remove__(struct ofproto *, struct rule *) OVS_REQUIRES(ofproto_mutex); /* The source of an OpenFlow request. * * A table modification request can be generated externally, via OpenFlow, or * internally through a function call. This structure indicates the source of * an OpenFlow-generated table modification. For an internal flow_mod, it * isn't meaningful and thus supplied as NULL. */ struct openflow_mod_requester { struct ofconn *ofconn; /* Connection on which flow_mod arrived. */ const struct ofp_header *request; }; /* OpenFlow. */ static enum ofperr ofproto_rule_create(struct ofproto *, struct cls_rule *, uint8_t table_id, ovs_be64 new_cookie, uint16_t idle_timeout, uint16_t hard_timeout, enum ofputil_flow_mod_flags flags, uint16_t importance, const struct ofpact *ofpacts, size_t ofpacts_len, uint64_t match_tlv_bitmap, uint64_t ofpacts_tlv_bitmap, struct rule **new_rule) OVS_NO_THREAD_SAFETY_ANALYSIS; static void replace_rule_start(struct ofproto *, struct ofproto_flow_mod *, struct rule *old_rule, struct rule *new_rule) OVS_REQUIRES(ofproto_mutex); static void replace_rule_revert(struct ofproto *, struct rule *old_rule, struct rule *new_rule) OVS_REQUIRES(ofproto_mutex); static void replace_rule_finish(struct ofproto *, struct ofproto_flow_mod *, const struct openflow_mod_requester *, struct rule *old_rule, struct rule *new_rule, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex); static void delete_flows__(struct rule_collection *, enum ofp_flow_removed_reason, const struct openflow_mod_requester *) OVS_REQUIRES(ofproto_mutex); static bool ofproto_group_exists(const struct ofproto *, uint32_t group_id); static void handle_openflow(struct ofconn *, const struct ofpbuf *); static enum ofperr ofproto_flow_mod_init(struct ofproto *, struct ofproto_flow_mod *, const struct ofputil_flow_mod *fm, struct rule *) OVS_EXCLUDED(ofproto_mutex); static enum ofperr ofproto_flow_mod_start(struct ofproto *, struct ofproto_flow_mod *) OVS_REQUIRES(ofproto_mutex); static void ofproto_flow_mod_revert(struct ofproto *, struct ofproto_flow_mod *) OVS_REQUIRES(ofproto_mutex); static void ofproto_flow_mod_finish(struct ofproto *, struct ofproto_flow_mod *, const struct openflow_mod_requester *) OVS_REQUIRES(ofproto_mutex); static enum ofperr handle_flow_mod__(struct ofproto *, const struct ofputil_flow_mod *, const struct openflow_mod_requester *) OVS_EXCLUDED(ofproto_mutex); static void calc_duration(long long int start, long long int now, uint32_t *sec, uint32_t *nsec); /* ofproto. */ static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); static void ofproto_destroy__(struct ofproto *); static void update_mtu(struct ofproto *, struct ofport *); static void update_mtu_ofproto(struct ofproto *); static void meter_delete(struct ofproto *, uint32_t first, uint32_t last); static void meter_insert_rule(struct rule *); /* unixctl. */ static void ofproto_unixctl_init(void); /* All registered ofproto classes, in probe order. */ static const struct ofproto_class **ofproto_classes; static size_t n_ofproto_classes; static size_t allocated_ofproto_classes; /* Global lock that protects all flow table operations. */ struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER; unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT; unsigned ofproto_max_idle = OFPROTO_MAX_IDLE_DEFAULT; size_t n_handlers, n_revalidators; char *pmd_cpu_mask; /* Map from datapath name to struct ofproto, for use by unixctl commands. */ static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos); /* Initial mappings of port to OpenFlow number mappings. */ static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports); static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); /* The default value of true waits for flow restore. */ static bool flow_restore_wait = true; /* Must be called to initialize the ofproto library. * * The caller may pass in 'iface_hints', which contains an shash of * "iface_hint" elements indexed by the interface's name. The provider * may use these hints to describe the startup configuration in order to * reinitialize its state. The caller owns the provided data, so a * provider will make copies of anything required. An ofproto provider * will remove any existing state that is not described by the hint, and * may choose to remove it all. */ void ofproto_init(const struct shash *iface_hints) { struct shash_node *node; size_t i; ofproto_class_register(&ofproto_dpif_class); /* Make a local copy, since we don't own 'iface_hints' elements. */ SHASH_FOR_EACH(node, iface_hints) { const struct iface_hint *orig_hint = node->data; struct iface_hint *new_hint = xmalloc(sizeof *new_hint); const char *br_type = ofproto_normalize_type(orig_hint->br_type); new_hint->br_name = xstrdup(orig_hint->br_name); new_hint->br_type = xstrdup(br_type); new_hint->ofp_port = orig_hint->ofp_port; shash_add(&init_ofp_ports, node->name, new_hint); } for (i = 0; i < n_ofproto_classes; i++) { ofproto_classes[i]->init(&init_ofp_ports); } ofproto_unixctl_init(); } /* 'type' should be a normalized datapath type, as returned by * ofproto_normalize_type(). Returns the corresponding ofproto_class * structure, or a null pointer if there is none registered for 'type'. */ static const struct ofproto_class * ofproto_class_find__(const char *type) { size_t i; for (i = 0; i < n_ofproto_classes; i++) { const struct ofproto_class *class = ofproto_classes[i]; struct sset types; bool found; sset_init(&types); class->enumerate_types(&types); found = sset_contains(&types, type); sset_destroy(&types); if (found) { return class; } } VLOG_WARN("unknown datapath type %s", type); return NULL; } /* Registers a new ofproto class. After successful registration, new ofprotos * of that type can be created using ofproto_create(). */ int ofproto_class_register(const struct ofproto_class *new_class) { size_t i; for (i = 0; i < n_ofproto_classes; i++) { if (ofproto_classes[i] == new_class) { return EEXIST; } } if (n_ofproto_classes >= allocated_ofproto_classes) { ofproto_classes = x2nrealloc(ofproto_classes, &allocated_ofproto_classes, sizeof *ofproto_classes); } ofproto_classes[n_ofproto_classes++] = new_class; return 0; } /* Unregisters a datapath provider. 'type' must have been previously * registered and not currently be in use by any ofprotos. After * unregistration new datapaths of that type cannot be opened using * ofproto_create(). */ int ofproto_class_unregister(const struct ofproto_class *class) { size_t i; for (i = 0; i < n_ofproto_classes; i++) { if (ofproto_classes[i] == class) { for (i++; i < n_ofproto_classes; i++) { ofproto_classes[i - 1] = ofproto_classes[i]; } n_ofproto_classes--; return 0; } } VLOG_WARN("attempted to unregister an ofproto class that is not " "registered"); return EAFNOSUPPORT; } /* Clears 'types' and enumerates all registered ofproto types into it. The * caller must first initialize the sset. */ void ofproto_enumerate_types(struct sset *types) { size_t i; sset_clear(types); for (i = 0; i < n_ofproto_classes; i++) { ofproto_classes[i]->enumerate_types(types); } } /* Returns the fully spelled out name for the given ofproto 'type'. * * Normalized type string can be compared with strcmp(). Unnormalized type * string might be the same even if they have different spellings. */ const char * ofproto_normalize_type(const char *type) { return type && type[0] ? type : "system"; } /* Clears 'names' and enumerates the names of all known created ofprotos with * the given 'type'. The caller must first initialize the sset. Returns 0 if * successful, otherwise a positive errno value. * * Some kinds of datapaths might not be practically enumerable. This is not * considered an error. */ int ofproto_enumerate_names(const char *type, struct sset *names) { const struct ofproto_class *class = ofproto_class_find__(type); return class ? class->enumerate_names(type, names) : EAFNOSUPPORT; } static void ofproto_bump_tables_version(struct ofproto *ofproto) { ++ofproto->tables_version; ofproto->ofproto_class->set_tables_version(ofproto, ofproto->tables_version); } int ofproto_create(const char *datapath_name, const char *datapath_type, struct ofproto **ofprotop) OVS_EXCLUDED(ofproto_mutex) { const struct ofproto_class *class; struct ofproto *ofproto; int error; int i; *ofprotop = NULL; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (!class) { VLOG_WARN("could not create datapath %s of unknown type %s", datapath_name, datapath_type); return EAFNOSUPPORT; } ofproto = class->alloc(); if (!ofproto) { VLOG_ERR("failed to allocate datapath %s of type %s", datapath_name, datapath_type); return ENOMEM; } /* Initialize. */ ovs_mutex_lock(&ofproto_mutex); memset(ofproto, 0, sizeof *ofproto); ofproto->ofproto_class = class; ofproto->name = xstrdup(datapath_name); ofproto->type = xstrdup(datapath_type); hmap_insert(&all_ofprotos, &ofproto->hmap_node, hash_string(ofproto->name, 0)); ofproto->datapath_id = 0; ofproto->forward_bpdu = false; ofproto->fallback_dpid = pick_fallback_dpid(); ofproto->mfr_desc = NULL; ofproto->hw_desc = NULL; ofproto->sw_desc = NULL; ofproto->serial_desc = NULL; ofproto->dp_desc = NULL; ofproto->frag_handling = OFPUTIL_FRAG_NORMAL; hmap_init(&ofproto->ports); hmap_init(&ofproto->ofport_usage); shash_init(&ofproto->port_by_name); simap_init(&ofproto->ofp_requests); ofproto->max_ports = ofp_to_u16(OFPP_MAX); ofproto->eviction_group_timer = LLONG_MIN; ofproto->tables = NULL; ofproto->n_tables = 0; ofproto->tables_version = OVS_VERSION_MIN; hindex_init(&ofproto->cookies); hmap_init(&ofproto->learned_cookies); ovs_list_init(&ofproto->expirable); ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name); ofproto->min_mtu = INT_MAX; cmap_init(&ofproto->groups); ovs_mutex_unlock(&ofproto_mutex); ofproto->ogf.types = 0xf; ofproto->ogf.capabilities = OFPGFC_CHAINING | OFPGFC_SELECT_LIVENESS | OFPGFC_SELECT_WEIGHT; for (i = 0; i < 4; i++) { ofproto->ogf.max_groups[i] = OFPG_MAX; ofproto->ogf.ofpacts[i] = (UINT64_C(1) << N_OFPACTS) - 1; } ovsrcu_set(&ofproto->metadata_tab, tun_metadata_alloc(NULL)); ovs_mutex_init(&ofproto->vl_mff_map.mutex); cmap_init(&ofproto->vl_mff_map.cmap); error = ofproto->ofproto_class->construct(ofproto); if (error) { VLOG_ERR("failed to open datapath %s: %s", datapath_name, ovs_strerror(error)); ovs_mutex_lock(&ofproto_mutex); connmgr_destroy(ofproto->connmgr); ofproto->connmgr = NULL; ovs_mutex_unlock(&ofproto_mutex); ofproto_destroy__(ofproto); return error; } /* Check that hidden tables, if any, are at the end. */ ovs_assert(ofproto->n_tables); for (i = 0; i + 1 < ofproto->n_tables; i++) { enum oftable_flags flags = ofproto->tables[i].flags; enum oftable_flags next_flags = ofproto->tables[i + 1].flags; ovs_assert(!(flags & OFTABLE_HIDDEN) || next_flags & OFTABLE_HIDDEN); } ofproto->datapath_id = pick_datapath_id(ofproto); init_ports(ofproto); /* Initialize meters table. */ if (ofproto->ofproto_class->meter_get_features) { ofproto->ofproto_class->meter_get_features(ofproto, &ofproto->meter_features); } else { memset(&ofproto->meter_features, 0, sizeof ofproto->meter_features); } ofproto->meters = xzalloc((ofproto->meter_features.max_meters + 1) * sizeof(struct meter *)); /* Set the initial tables version. */ ofproto_bump_tables_version(ofproto); *ofprotop = ofproto; return 0; } /* Must be called (only) by an ofproto implementation in its constructor * function. See the large comment on 'construct' in struct ofproto_class for * details. */ void ofproto_init_tables(struct ofproto *ofproto, int n_tables) { struct oftable *table; ovs_assert(!ofproto->n_tables); ovs_assert(n_tables >= 1 && n_tables <= 255); ofproto->n_tables = n_tables; ofproto->tables = xmalloc(n_tables * sizeof *ofproto->tables); OFPROTO_FOR_EACH_TABLE (table, ofproto) { oftable_init(table); } } /* To be optionally called (only) by an ofproto implementation in its * constructor function. See the large comment on 'construct' in struct * ofproto_class for details. * * Sets the maximum number of ports to 'max_ports'. The ofproto generic layer * will then ensure that actions passed into the ofproto implementation will * not refer to OpenFlow ports numbered 'max_ports' or higher. If this * function is not called, there will be no such restriction. * * Reserved ports numbered OFPP_MAX and higher are special and not subject to * the 'max_ports' restriction. */ void ofproto_init_max_ports(struct ofproto *ofproto, uint16_t max_ports) { ovs_assert(max_ports <= ofp_to_u16(OFPP_MAX)); ofproto->max_ports = max_ports; } uint64_t ofproto_get_datapath_id(const struct ofproto *ofproto) { return ofproto->datapath_id; } void ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id) { uint64_t old_dpid = p->datapath_id; p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p); if (p->datapath_id != old_dpid) { /* Force all active connections to reconnect, since there is no way to * notify a controller that the datapath ID has changed. */ ofproto_reconnect_controllers(p); } } void ofproto_set_controllers(struct ofproto *p, const struct ofproto_controller *controllers, size_t n_controllers, uint32_t allowed_versions) { connmgr_set_controllers(p->connmgr, controllers, n_controllers, allowed_versions); } void ofproto_set_fail_mode(struct ofproto *p, enum ofproto_fail_mode fail_mode) { connmgr_set_fail_mode(p->connmgr, fail_mode); } /* Drops the connections between 'ofproto' and all of its controllers, forcing * them to reconnect. */ void ofproto_reconnect_controllers(struct ofproto *ofproto) { connmgr_reconnect(ofproto->connmgr); } /* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s * in-band control should guarantee access, in the same way that in-band * control guarantees access to OpenFlow controllers. */ void ofproto_set_extra_in_band_remotes(struct ofproto *ofproto, const struct sockaddr_in *extras, size_t n) { connmgr_set_extra_in_band_remotes(ofproto->connmgr, extras, n); } /* Sets the OpenFlow queue used by flows set up by in-band control on * 'ofproto' to 'queue_id'. If 'queue_id' is negative, then in-band control * flows will use the default queue. */ void ofproto_set_in_band_queue(struct ofproto *ofproto, int queue_id) { connmgr_set_in_band_queue(ofproto->connmgr, queue_id); } /* Sets the number of flows at which eviction from the kernel flow table * will occur. */ void ofproto_set_flow_limit(unsigned limit) { ofproto_flow_limit = limit; } /* Sets the maximum idle time for flows in the datapath before they are * expired. */ void ofproto_set_max_idle(unsigned max_idle) { ofproto_max_idle = max_idle; } /* If forward_bpdu is true, the NORMAL action will forward frames with * reserved (e.g. STP) destination Ethernet addresses. if forward_bpdu is false, * the NORMAL action will drop these frames. */ void ofproto_set_forward_bpdu(struct ofproto *ofproto, bool forward_bpdu) { bool old_val = ofproto->forward_bpdu; ofproto->forward_bpdu = forward_bpdu; if (old_val != ofproto->forward_bpdu) { if (ofproto->ofproto_class->forward_bpdu_changed) { ofproto->ofproto_class->forward_bpdu_changed(ofproto); } } } /* Sets the MAC aging timeout for the OFPP_NORMAL action on 'ofproto' to * 'idle_time', in seconds, and the maximum number of MAC table entries to * 'max_entries'. */ void ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time, size_t max_entries) { if (ofproto->ofproto_class->set_mac_table_config) { ofproto->ofproto_class->set_mac_table_config(ofproto, idle_time, max_entries); } } /* Multicast snooping configuration. */ /* Configures multicast snooping on 'ofproto' using the settings * defined in 's'. If 's' is NULL, disables multicast snooping. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_set_mcast_snooping(struct ofproto *ofproto, const struct ofproto_mcast_snooping_settings *s) { return (ofproto->ofproto_class->set_mcast_snooping ? ofproto->ofproto_class->set_mcast_snooping(ofproto, s) : EOPNOTSUPP); } /* Configures multicast snooping flood settings on 'ofp_port' of 'ofproto'. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_set_mcast_snooping(struct ofproto *ofproto, void *aux, const struct ofproto_mcast_snooping_port_settings *s) { return (ofproto->ofproto_class->set_mcast_snooping_port ? ofproto->ofproto_class->set_mcast_snooping_port(ofproto, aux, s) : EOPNOTSUPP); } void ofproto_set_cpu_mask(const char *cmask) { free(pmd_cpu_mask); pmd_cpu_mask = nullable_xstrdup(cmask); } void ofproto_set_threads(int n_handlers_, int n_revalidators_) { int threads = MAX(count_cpu_cores(), 2); n_revalidators = MAX(n_revalidators_, 0); n_handlers = MAX(n_handlers_, 0); if (!n_revalidators) { n_revalidators = n_handlers ? MAX(threads - (int) n_handlers, 1) : threads / 4 + 1; } if (!n_handlers) { n_handlers = MAX(threads - (int) n_revalidators, 1); } } void ofproto_set_dp_desc(struct ofproto *p, const char *dp_desc) { free(p->dp_desc); p->dp_desc = nullable_xstrdup(dp_desc); } int ofproto_set_snoops(struct ofproto *ofproto, const struct sset *snoops) { return connmgr_set_snoops(ofproto->connmgr, snoops); } int ofproto_set_netflow(struct ofproto *ofproto, const struct netflow_options *nf_options) { if (nf_options && sset_is_empty(&nf_options->collectors)) { nf_options = NULL; } if (ofproto->ofproto_class->set_netflow) { return ofproto->ofproto_class->set_netflow(ofproto, nf_options); } else { return nf_options ? EOPNOTSUPP : 0; } } int ofproto_set_sflow(struct ofproto *ofproto, const struct ofproto_sflow_options *oso) { if (oso && sset_is_empty(&oso->targets)) { oso = NULL; } if (ofproto->ofproto_class->set_sflow) { return ofproto->ofproto_class->set_sflow(ofproto, oso); } else { return oso ? EOPNOTSUPP : 0; } } int ofproto_set_ipfix(struct ofproto *ofproto, const struct ofproto_ipfix_bridge_exporter_options *bo, const struct ofproto_ipfix_flow_exporter_options *fo, size_t n_fo) { if (ofproto->ofproto_class->set_ipfix) { return ofproto->ofproto_class->set_ipfix(ofproto, bo, fo, n_fo); } else { return (bo || fo) ? EOPNOTSUPP : 0; } } static int ofproto_get_ipfix_stats(struct ofproto *ofproto, bool bridge_ipfix, struct ovs_list *replies) { int error; if (ofproto->ofproto_class->get_ipfix_stats) { error = ofproto->ofproto_class->get_ipfix_stats(ofproto, bridge_ipfix, replies); } else { error = EOPNOTSUPP; } return error; } static enum ofperr handle_ipfix_bridge_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; enum ofperr error; ofpmp_init(&replies, request); error = ofproto_get_ipfix_stats(ofproto, true, &replies); if (!error) { ofconn_send_replies(ofconn, &replies); } else { ofpbuf_list_delete(&replies); } return error; } static enum ofperr handle_ipfix_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; enum ofperr error; ofpmp_init(&replies, request); error = ofproto_get_ipfix_stats(ofproto, false, &replies); if (!error) { ofconn_send_replies(ofconn, &replies); } else { ofpbuf_list_delete(&replies); } return error; } static enum ofperr handle_nxt_ct_flush_zone(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); const struct nx_zone_id *nzi = ofpmsg_body(oh); if (!is_all_zeros(nzi->zero, sizeof nzi->zero)) { return OFPERR_NXBRC_MUST_BE_ZERO; } uint16_t zone = ntohs(nzi->zone_id); if (ofproto->ofproto_class->ct_flush) { ofproto->ofproto_class->ct_flush(ofproto, &zone); } else { return EOPNOTSUPP; } return 0; } void ofproto_set_flow_restore_wait(bool flow_restore_wait_db) { flow_restore_wait = flow_restore_wait_db; } bool ofproto_get_flow_restore_wait(void) { return flow_restore_wait; } /* Spanning Tree Protocol (STP) configuration. */ /* Configures STP on 'ofproto' using the settings defined in 's'. If * 's' is NULL, disables STP. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_set_stp(struct ofproto *ofproto, const struct ofproto_stp_settings *s) { return (ofproto->ofproto_class->set_stp ? ofproto->ofproto_class->set_stp(ofproto, s) : EOPNOTSUPP); } /* Retrieves STP status of 'ofproto' and stores it in 's'. If the * 'enabled' member of 's' is false, then the other members are not * meaningful. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_get_stp_status(struct ofproto *ofproto, struct ofproto_stp_status *s) { return (ofproto->ofproto_class->get_stp_status ? ofproto->ofproto_class->get_stp_status(ofproto, s) : EOPNOTSUPP); } /* Configures STP on 'ofp_port' of 'ofproto' using the settings defined * in 's'. The caller is responsible for assigning STP port numbers * (using the 'port_num' member in the range of 1 through 255, inclusive) * and ensuring there are no duplicates. If the 's' is NULL, then STP * is disabled on the port. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_set_stp(struct ofproto *ofproto, ofp_port_t ofp_port, const struct ofproto_port_stp_settings *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure STP on nonexistent port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->set_stp_port ? ofproto->ofproto_class->set_stp_port(ofport, s) : EOPNOTSUPP); } /* Retrieves STP port status of 'ofp_port' on 'ofproto' and stores it in * 's'. If the 'enabled' member in 's' is false, then the other members * are not meaningful. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_get_stp_status(struct ofproto *ofproto, ofp_port_t ofp_port, struct ofproto_port_stp_status *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN_RL(&rl, "%s: cannot get STP status on nonexistent " "port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->get_stp_port_status ? ofproto->ofproto_class->get_stp_port_status(ofport, s) : EOPNOTSUPP); } /* Retrieves STP port statistics of 'ofp_port' on 'ofproto' and stores it in * 's'. If the 'enabled' member in 's' is false, then the other members * are not meaningful. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_get_stp_stats(struct ofproto *ofproto, ofp_port_t ofp_port, struct ofproto_port_stp_stats *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN_RL(&rl, "%s: cannot get STP stats on nonexistent " "port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->get_stp_port_stats ? ofproto->ofproto_class->get_stp_port_stats(ofport, s) : EOPNOTSUPP); } /* Rapid Spanning Tree Protocol (RSTP) configuration. */ /* Configures RSTP on 'ofproto' using the settings defined in 's'. If * 's' is NULL, disables RSTP. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_set_rstp(struct ofproto *ofproto, const struct ofproto_rstp_settings *s) { if (!ofproto->ofproto_class->set_rstp) { return EOPNOTSUPP; } ofproto->ofproto_class->set_rstp(ofproto, s); return 0; } /* Retrieves RSTP status of 'ofproto' and stores it in 's'. If the * 'enabled' member of 's' is false, then the other members are not * meaningful. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_get_rstp_status(struct ofproto *ofproto, struct ofproto_rstp_status *s) { if (!ofproto->ofproto_class->get_rstp_status) { return EOPNOTSUPP; } ofproto->ofproto_class->get_rstp_status(ofproto, s); return 0; } /* Configures RSTP on 'ofp_port' of 'ofproto' using the settings defined * in 's'. The caller is responsible for assigning RSTP port numbers * (using the 'port_num' member in the range of 1 through 255, inclusive) * and ensuring there are no duplicates. If the 's' is NULL, then RSTP * is disabled on the port. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_set_rstp(struct ofproto *ofproto, ofp_port_t ofp_port, const struct ofproto_port_rstp_settings *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure RSTP on nonexistent port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } if (!ofproto->ofproto_class->set_rstp_port) { return EOPNOTSUPP; } ofproto->ofproto_class->set_rstp_port(ofport, s); return 0; } /* Retrieves RSTP port status of 'ofp_port' on 'ofproto' and stores it in * 's'. If the 'enabled' member in 's' is false, then the other members * are not meaningful. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_get_rstp_status(struct ofproto *ofproto, ofp_port_t ofp_port, struct ofproto_port_rstp_status *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN_RL(&rl, "%s: cannot get RSTP status on nonexistent " "port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } if (!ofproto->ofproto_class->get_rstp_port_status) { return EOPNOTSUPP; } ofproto->ofproto_class->get_rstp_port_status(ofport, s); return 0; } /* Queue DSCP configuration. */ /* Registers meta-data associated with the 'n_qdscp' Qualities of Service * 'queues' attached to 'ofport'. This data is not intended to be sufficient * to implement QoS. Instead, it is used to implement features which require * knowledge of what queues exist on a port, and some basic information about * them. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_port_set_queues(struct ofproto *ofproto, ofp_port_t ofp_port, const struct ofproto_port_queue *queues, size_t n_queues) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot set queues on nonexistent port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->set_queues ? ofproto->ofproto_class->set_queues(ofport, queues, n_queues) : EOPNOTSUPP); } /* LLDP configuration. */ void ofproto_port_set_lldp(struct ofproto *ofproto, ofp_port_t ofp_port, const struct smap *cfg) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure LLDP on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } error = (ofproto->ofproto_class->set_lldp ? ofproto->ofproto_class->set_lldp(ofport, cfg) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: lldp configuration on port %"PRIu32" (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } int ofproto_set_aa(struct ofproto *ofproto, void *aux OVS_UNUSED, const struct aa_settings *s) { if (!ofproto->ofproto_class->set_aa) { return EOPNOTSUPP; } ofproto->ofproto_class->set_aa(ofproto, s); return 0; } int ofproto_aa_mapping_register(struct ofproto *ofproto, void *aux, const struct aa_mapping_settings *s) { if (!ofproto->ofproto_class->aa_mapping_set) { return EOPNOTSUPP; } ofproto->ofproto_class->aa_mapping_set(ofproto, aux, s); return 0; } int ofproto_aa_mapping_unregister(struct ofproto *ofproto, void *aux) { if (!ofproto->ofproto_class->aa_mapping_unset) { return EOPNOTSUPP; } ofproto->ofproto_class->aa_mapping_unset(ofproto, aux); return 0; } int ofproto_aa_vlan_get_queued(struct ofproto *ofproto, struct ovs_list *list) { if (!ofproto->ofproto_class->aa_vlan_get_queued) { return EOPNOTSUPP; } ofproto->ofproto_class->aa_vlan_get_queued(ofproto, list); return 0; } unsigned int ofproto_aa_vlan_get_queue_size(struct ofproto *ofproto) { if (!ofproto->ofproto_class->aa_vlan_get_queue_size) { return EOPNOTSUPP; } return ofproto->ofproto_class->aa_vlan_get_queue_size(ofproto); } /* Connectivity Fault Management configuration. */ /* Clears the CFM configuration from 'ofp_port' on 'ofproto'. */ void ofproto_port_clear_cfm(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (ofport && ofproto->ofproto_class->set_cfm) { ofproto->ofproto_class->set_cfm(ofport, NULL); } } /* Configures connectivity fault management on 'ofp_port' in 'ofproto'. Takes * basic configuration from the configuration members in 'cfm', and the remote * maintenance point ID from remote_mpid. Ignores the statistics members of * 'cfm'. * * This function has no effect if 'ofproto' does not have a port 'ofp_port'. */ void ofproto_port_set_cfm(struct ofproto *ofproto, ofp_port_t ofp_port, const struct cfm_settings *s) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure CFM on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } /* XXX: For configuration simplicity, we only support one remote_mpid * outside of the CFM module. It's not clear if this is the correct long * term solution or not. */ error = (ofproto->ofproto_class->set_cfm ? ofproto->ofproto_class->set_cfm(ofport, s) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: CFM configuration on port %"PRIu32" (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } /* Configures BFD on 'ofp_port' in 'ofproto'. This function has no effect if * 'ofproto' does not have a port 'ofp_port'. */ void ofproto_port_set_bfd(struct ofproto *ofproto, ofp_port_t ofp_port, const struct smap *cfg) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure bfd on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } error = (ofproto->ofproto_class->set_bfd ? ofproto->ofproto_class->set_bfd(ofport, cfg) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: bfd configuration on port %"PRIu32" (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } /* Checks the status change of BFD on 'ofport'. * * Returns true if 'ofproto_class' does not support 'bfd_status_changed'. */ bool ofproto_port_bfd_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->bfd_status_changed ? ofproto->ofproto_class->bfd_status_changed(ofport) : true); } /* Populates 'status' with the status of BFD on 'ofport'. Returns 0 on * success. Returns a positive errno otherwise. Has no effect if 'ofp_port' * is not an OpenFlow port in 'ofproto'. * * The caller must provide and own '*status'. */ int ofproto_port_get_bfd_status(struct ofproto *ofproto, ofp_port_t ofp_port, struct smap *status) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->get_bfd_status ? ofproto->ofproto_class->get_bfd_status(ofport, status) : EOPNOTSUPP); } /* Checks the status of LACP negotiation for 'ofp_port' within ofproto. * Returns 1 if LACP partner information for 'ofp_port' is up-to-date, * 0 if LACP partner information is not current (generally indicating a * connectivity problem), or -1 if LACP is not enabled on 'ofp_port'. */ int ofproto_port_is_lacp_current(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->port_is_lacp_current ? ofproto->ofproto_class->port_is_lacp_current(ofport) : -1); } int ofproto_port_get_lacp_stats(const struct ofport *port, struct lacp_slave_stats *stats) { struct ofproto *ofproto = port->ofproto; int error; if (ofproto->ofproto_class->port_get_lacp_stats) { error = ofproto->ofproto_class->port_get_lacp_stats(port, stats); } else { error = EOPNOTSUPP; } return error; } /* Bundles. */ /* Registers a "bundle" associated with client data pointer 'aux' in 'ofproto'. * A bundle is the same concept as a Port in OVSDB, that is, it consists of one * or more "slave" devices (Interfaces, in OVSDB) along with a VLAN * configuration plus, if there is more than one slave, a bonding * configuration. * * If 'aux' is already registered then this function updates its configuration * to 's'. Otherwise, this function registers a new bundle. * * Bundles only affect the NXAST_AUTOPATH action and output to the OFPP_NORMAL * port. */ int ofproto_bundle_register(struct ofproto *ofproto, void *aux, const struct ofproto_bundle_settings *s) { return (ofproto->ofproto_class->bundle_set ? ofproto->ofproto_class->bundle_set(ofproto, aux, s) : EOPNOTSUPP); } /* Unregisters the bundle registered on 'ofproto' with auxiliary data 'aux'. * If no such bundle has been registered, this has no effect. */ int ofproto_bundle_unregister(struct ofproto *ofproto, void *aux) { return ofproto_bundle_register(ofproto, aux, NULL); } /* Registers a mirror associated with client data pointer 'aux' in 'ofproto'. * If 'aux' is already registered then this function updates its configuration * to 's'. Otherwise, this function registers a new mirror. */ int ofproto_mirror_register(struct ofproto *ofproto, void *aux, const struct ofproto_mirror_settings *s) { return (ofproto->ofproto_class->mirror_set ? ofproto->ofproto_class->mirror_set(ofproto, aux, s) : EOPNOTSUPP); } /* Unregisters the mirror registered on 'ofproto' with auxiliary data 'aux'. * If no mirror has been registered, this has no effect. */ int ofproto_mirror_unregister(struct ofproto *ofproto, void *aux) { return ofproto_mirror_register(ofproto, aux, NULL); } /* Retrieves statistics from mirror associated with client data pointer * 'aux' in 'ofproto'. Stores packet and byte counts in 'packets' and * 'bytes', respectively. If a particular counters is not supported, * the appropriate argument is set to UINT64_MAX. */ int ofproto_mirror_get_stats(struct ofproto *ofproto, void *aux, uint64_t *packets, uint64_t *bytes) { if (!ofproto->ofproto_class->mirror_get_stats) { *packets = *bytes = UINT64_MAX; return EOPNOTSUPP; } return ofproto->ofproto_class->mirror_get_stats(ofproto, aux, packets, bytes); } /* Configures the VLANs whose bits are set to 1 in 'flood_vlans' as VLANs on * which all packets are flooded, instead of using MAC learning. If * 'flood_vlans' is NULL, then MAC learning applies to all VLANs. * * Flood VLANs affect only the treatment of packets output to the OFPP_NORMAL * port. */ int ofproto_set_flood_vlans(struct ofproto *ofproto, unsigned long *flood_vlans) { return (ofproto->ofproto_class->set_flood_vlans ? ofproto->ofproto_class->set_flood_vlans(ofproto, flood_vlans) : EOPNOTSUPP); } /* Returns true if 'aux' is a registered bundle that is currently in use as the * output for a mirror. */ bool ofproto_is_mirror_output_bundle(const struct ofproto *ofproto, void *aux) { return (ofproto->ofproto_class->is_mirror_output_bundle ? ofproto->ofproto_class->is_mirror_output_bundle(ofproto, aux) : false); } /* Configuration of OpenFlow tables. */ /* Returns the number of OpenFlow tables in 'ofproto'. */ int ofproto_get_n_tables(const struct ofproto *ofproto) { return ofproto->n_tables; } /* Returns the number of Controller visible OpenFlow tables * in 'ofproto'. This number will exclude Hidden tables. * This funtion's return value should be less or equal to that of * ofproto_get_n_tables() . */ uint8_t ofproto_get_n_visible_tables(const struct ofproto *ofproto) { uint8_t n = ofproto->n_tables; /* Count only non-hidden tables in the number of tables. (Hidden tables, * if present, are always at the end.) */ while(n && (ofproto->tables[n - 1].flags & OFTABLE_HIDDEN)) { n--; } return n; } /* Configures the OpenFlow table in 'ofproto' with id 'table_id' with the * settings from 's'. 'table_id' must be in the range 0 through the number of * OpenFlow tables in 'ofproto' minus 1, inclusive. * * For read-only tables, only the name may be configured. */ void ofproto_configure_table(struct ofproto *ofproto, int table_id, const struct ofproto_table_settings *s) { struct oftable *table; ovs_assert(table_id >= 0 && table_id < ofproto->n_tables); table = &ofproto->tables[table_id]; oftable_set_name(table, s->name); if (table->flags & OFTABLE_READONLY) { return; } if (classifier_set_prefix_fields(&table->cls, s->prefix_fields, s->n_prefix_fields)) { /* XXX: Trigger revalidation. */ } ovs_mutex_lock(&ofproto_mutex); unsigned int new_eviction = (s->enable_eviction ? table->eviction | EVICTION_CLIENT : table->eviction & ~EVICTION_CLIENT); oftable_configure_eviction(table, new_eviction, s->groups, s->n_groups); table->max_flows = s->max_flows; evict_rules_from_table(table); ovs_mutex_unlock(&ofproto_mutex); } bool ofproto_has_snoops(const struct ofproto *ofproto) { return connmgr_has_snoops(ofproto->connmgr); } void ofproto_get_snoops(const struct ofproto *ofproto, struct sset *snoops) { connmgr_get_snoops(ofproto->connmgr, snoops); } /* Deletes 'rule' from 'ofproto'. * * Within an ofproto implementation, this function allows an ofproto * implementation to destroy any rules that remain when its ->destruct() * function is called. This function is not suitable for use elsewhere in an * ofproto implementation. * * This function implements steps 4.4 and 4.5 in the section titled "Rule Life * Cycle" in ofproto-provider.h. */ void ofproto_rule_delete(struct ofproto *ofproto, struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { /* This skips the ofmonitor and flow-removed notifications because the * switch is being deleted and any OpenFlow channels have been or soon will * be killed. */ ovs_mutex_lock(&ofproto_mutex); if (rule->state == RULE_INSERTED) { /* Make sure there is no postponed removal of the rule. */ ovs_assert(cls_rule_visible_in_version(&rule->cr, OVS_VERSION_MAX)); if (!classifier_remove(&rule->ofproto->tables[rule->table_id].cls, &rule->cr)) { OVS_NOT_REACHED(); } ofproto_rule_remove__(rule->ofproto, rule); if (ofproto->ofproto_class->rule_delete) { ofproto->ofproto_class->rule_delete(rule); } /* This may not be the last reference to the rule. */ ofproto_rule_unref(rule); } ovs_mutex_unlock(&ofproto_mutex); } static void ofproto_flush__(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { struct oftable *table; /* This will flush all datapath flows. */ if (ofproto->ofproto_class->flush) { ofproto->ofproto_class->flush(ofproto); } /* XXX: There is a small race window here, where new datapath flows can be * created by upcall handlers based on the existing flow table. We can not * call ofproto class flush while holding 'ofproto_mutex' to prevent this, * as then we could deadlock on syncing with the handler threads waiting on * the same mutex. */ ovs_mutex_lock(&ofproto_mutex); OFPROTO_FOR_EACH_TABLE (table, ofproto) { struct rule_collection rules; struct rule *rule; if (table->flags & OFTABLE_HIDDEN) { continue; } rule_collection_init(&rules); CLS_FOR_EACH (rule, cr, &table->cls) { rule_collection_add(&rules, rule); } delete_flows__(&rules, OFPRR_DELETE, NULL); } /* XXX: Concurrent handler threads may insert new learned flows based on * learn actions of the now deleted flows right after we release * 'ofproto_mutex'. */ ovs_mutex_unlock(&ofproto_mutex); } static void ofproto_destroy__(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { struct oftable *table; cmap_destroy(&ofproto->groups); hmap_remove(&all_ofprotos, &ofproto->hmap_node); free(ofproto->name); free(ofproto->type); free(ofproto->mfr_desc); free(ofproto->hw_desc); free(ofproto->sw_desc); free(ofproto->serial_desc); free(ofproto->dp_desc); hmap_destroy(&ofproto->ports); hmap_destroy(&ofproto->ofport_usage); shash_destroy(&ofproto->port_by_name); simap_destroy(&ofproto->ofp_requests); OFPROTO_FOR_EACH_TABLE (table, ofproto) { oftable_destroy(table); } free(ofproto->tables); ovs_mutex_lock(&ofproto->vl_mff_map.mutex); mf_vl_mff_map_clear(&ofproto->vl_mff_map, true); ovs_mutex_unlock(&ofproto->vl_mff_map.mutex); cmap_destroy(&ofproto->vl_mff_map.cmap); ovs_mutex_destroy(&ofproto->vl_mff_map.mutex); tun_metadata_free(ovsrcu_get_protected(struct tun_table *, &ofproto->metadata_tab)); ovs_assert(hindex_is_empty(&ofproto->cookies)); hindex_destroy(&ofproto->cookies); ovs_assert(hmap_is_empty(&ofproto->learned_cookies)); hmap_destroy(&ofproto->learned_cookies); ofproto->ofproto_class->dealloc(ofproto); } /* Destroying rules is doubly deferred, must have 'ofproto' around for them. * - 1st we defer the removal of the rules from the classifier * - 2nd we defer the actual destruction of the rules. */ static void ofproto_destroy_defer__(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { ovsrcu_postpone(ofproto_destroy__, ofproto); } void ofproto_destroy(struct ofproto *p, bool del) OVS_EXCLUDED(ofproto_mutex) { struct ofport *ofport, *next_ofport; struct ofport_usage *usage; if (!p) { return; } if (p->meters) { meter_delete(p, 1, p->meter_features.max_meters); p->meter_features.max_meters = 0; free(p->meters); p->meters = NULL; } ofproto_flush__(p); HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) { ofport_destroy(ofport, del); } HMAP_FOR_EACH_POP (usage, hmap_node, &p->ofport_usage) { free(usage); } p->ofproto_class->destruct(p, del); /* We should not postpone this because it involves deleting a listening * socket which we may want to reopen soon. 'connmgr' may be used by other * threads only if they take the ofproto_mutex and read a non-NULL * 'ofproto->connmgr'. */ ovs_mutex_lock(&ofproto_mutex); connmgr_destroy(p->connmgr); p->connmgr = NULL; ovs_mutex_unlock(&ofproto_mutex); /* Destroying rules is deferred, must have 'ofproto' around for them. */ ovsrcu_postpone(ofproto_destroy_defer__, p); } /* Destroys the datapath with the respective 'name' and 'type'. With the Linux * kernel datapath, for example, this destroys the datapath in the kernel, and * with the netdev-based datapath, it tears down the data structures that * represent the datapath. * * The datapath should not be currently open as an ofproto. */ int ofproto_delete(const char *name, const char *type) { const struct ofproto_class *class = ofproto_class_find__(type); return (!class ? EAFNOSUPPORT : !class->del ? EACCES : class->del(type, name)); } static void process_port_change(struct ofproto *ofproto, int error, char *devname) { if (error == ENOBUFS) { reinit_ports(ofproto); } else if (!error) { update_port(ofproto, devname); free(devname); } } int ofproto_type_run(const char *datapath_type) { const struct ofproto_class *class; int error; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); error = class->type_run ? class->type_run(datapath_type) : 0; if (error && error != EAGAIN) { VLOG_ERR_RL(&rl, "%s: type_run failed (%s)", datapath_type, ovs_strerror(error)); } return error; } void ofproto_type_wait(const char *datapath_type) { const struct ofproto_class *class; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (class->type_wait) { class->type_wait(datapath_type); } } int ofproto_run(struct ofproto *p) { int error; uint64_t new_seq; error = p->ofproto_class->run(p); if (error && error != EAGAIN) { VLOG_ERR_RL(&rl, "%s: run failed (%s)", p->name, ovs_strerror(error)); } /* Restore the eviction group heap invariant occasionally. */ if (p->eviction_group_timer < time_msec()) { size_t i; p->eviction_group_timer = time_msec() + 1000; for (i = 0; i < p->n_tables; i++) { struct oftable *table = &p->tables[i]; struct eviction_group *evg; struct rule *rule; if (!table->eviction) { continue; } if (table->n_flows > 100000) { static struct vlog_rate_limit count_rl = VLOG_RATE_LIMIT_INIT(1, 1); VLOG_WARN_RL(&count_rl, "Table %"PRIuSIZE" has an excessive" " number of rules: %d", i, table->n_flows); } ovs_mutex_lock(&ofproto_mutex); CLS_FOR_EACH (rule, cr, &table->cls) { if (rule->idle_timeout || rule->hard_timeout) { if (!rule->eviction_group) { eviction_group_add_rule(rule); } else { heap_raw_change(&rule->evg_node, rule_eviction_priority(p, rule)); } } } HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) { heap_rebuild(&evg->rules); } ovs_mutex_unlock(&ofproto_mutex); } } if (p->ofproto_class->port_poll) { char *devname; while ((error = p->ofproto_class->port_poll(p, &devname)) != EAGAIN) { process_port_change(p, error, devname); } } new_seq = seq_read(connectivity_seq_get()); if (new_seq != p->change_seq) { struct sset devnames; const char *devname; struct ofport *ofport; /* Update OpenFlow port status for any port whose netdev has changed. * * Refreshing a given 'ofport' can cause an arbitrary ofport to be * destroyed, so it's not safe to update ports directly from the * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we * need this two-phase approach. */ sset_init(&devnames); HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { uint64_t port_change_seq; port_change_seq = netdev_get_change_seq(ofport->netdev); if (ofport->change_seq != port_change_seq) { ofport->change_seq = port_change_seq; sset_add(&devnames, netdev_get_name(ofport->netdev)); } } SSET_FOR_EACH (devname, &devnames) { update_port(p, devname); } sset_destroy(&devnames); p->change_seq = new_seq; } connmgr_run(p->connmgr, handle_openflow); return error; } void ofproto_wait(struct ofproto *p) { p->ofproto_class->wait(p); if (p->ofproto_class->port_poll_wait) { p->ofproto_class->port_poll_wait(p); } seq_wait(connectivity_seq_get(), p->change_seq); connmgr_wait(p->connmgr); } bool ofproto_is_alive(const struct ofproto *p) { return connmgr_has_controllers(p->connmgr); } /* Adds some memory usage statistics for 'ofproto' into 'usage', for use with * memory_report(). */ void ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage) { const struct oftable *table; unsigned int n_rules; simap_increase(usage, "ports", hmap_count(&ofproto->ports)); n_rules = 0; OFPROTO_FOR_EACH_TABLE (table, ofproto) { n_rules += table->n_flows; } simap_increase(usage, "rules", n_rules); if (ofproto->ofproto_class->get_memory_usage) { ofproto->ofproto_class->get_memory_usage(ofproto, usage); } connmgr_get_memory_usage(ofproto->connmgr, usage); } void ofproto_type_get_memory_usage(const char *datapath_type, struct simap *usage) { const struct ofproto_class *class; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (class && class->type_get_memory_usage) { class->type_get_memory_usage(datapath_type, usage); } } void ofproto_get_ofproto_controller_info(const struct ofproto *ofproto, struct shash *info) { connmgr_get_controller_info(ofproto->connmgr, info); } void ofproto_free_ofproto_controller_info(struct shash *info) { connmgr_free_controller_info(info); } /* Makes a deep copy of 'old' into 'port'. */ void ofproto_port_clone(struct ofproto_port *port, const struct ofproto_port *old) { port->name = xstrdup(old->name); port->type = xstrdup(old->type); port->ofp_port = old->ofp_port; } /* Frees memory allocated to members of 'ofproto_port'. * * Do not call this function on an ofproto_port obtained from * ofproto_port_dump_next(): that function retains ownership of the data in the * ofproto_port. */ void ofproto_port_destroy(struct ofproto_port *ofproto_port) { free(ofproto_port->name); free(ofproto_port->type); } /* Initializes 'dump' to begin dumping the ports in an ofproto. * * This function provides no status indication. An error status for the entire * dump operation is provided when it is completed by calling * ofproto_port_dump_done(). */ void ofproto_port_dump_start(struct ofproto_port_dump *dump, const struct ofproto *ofproto) { dump->ofproto = ofproto; dump->error = ofproto->ofproto_class->port_dump_start(ofproto, &dump->state); } /* Attempts to retrieve another port from 'dump', which must have been created * with ofproto_port_dump_start(). On success, stores a new ofproto_port into * 'port' and returns true. On failure, returns false. * * Failure might indicate an actual error or merely that the last port has been * dumped. An error status for the entire dump operation is provided when it * is completed by calling ofproto_port_dump_done(). * * The ofproto owns the data stored in 'port'. It will remain valid until at * least the next time 'dump' is passed to ofproto_port_dump_next() or * ofproto_port_dump_done(). */ bool ofproto_port_dump_next(struct ofproto_port_dump *dump, struct ofproto_port *port) { const struct ofproto *ofproto = dump->ofproto; if (dump->error) { return false; } dump->error = ofproto->ofproto_class->port_dump_next(ofproto, dump->state, port); if (dump->error) { ofproto->ofproto_class->port_dump_done(ofproto, dump->state); return false; } return true; } /* Completes port table dump operation 'dump', which must have been created * with ofproto_port_dump_start(). Returns 0 if the dump operation was * error-free, otherwise a positive errno value describing the problem. */ int ofproto_port_dump_done(struct ofproto_port_dump *dump) { const struct ofproto *ofproto = dump->ofproto; if (!dump->error) { dump->error = ofproto->ofproto_class->port_dump_done(ofproto, dump->state); } return dump->error == EOF ? 0 : dump->error; } /* Returns the type to pass to netdev_open() when a datapath of type * 'datapath_type' has a port of type 'port_type', for a few special * cases when a netdev type differs from a port type. For example, when * using the userspace datapath, a port of type "internal" needs to be * opened as "tap". * * Returns either 'type' itself or a string literal, which must not be * freed. */ const char * ofproto_port_open_type(const char *datapath_type, const char *port_type) { const struct ofproto_class *class; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (!class) { return port_type; } return (class->port_open_type ? class->port_open_type(datapath_type, port_type) : port_type); } /* Attempts to add 'netdev' as a port on 'ofproto'. If 'ofp_portp' is * non-null and '*ofp_portp' is not OFPP_NONE, attempts to use that as * the port's OpenFlow port number. * * If successful, returns 0 and sets '*ofp_portp' to the new port's * OpenFlow port number (if 'ofp_portp' is non-null). On failure, * returns a positive errno value and sets '*ofp_portp' to OFPP_NONE (if * 'ofp_portp' is non-null). */ int ofproto_port_add(struct ofproto *ofproto, struct netdev *netdev, ofp_port_t *ofp_portp) { ofp_port_t ofp_port = ofp_portp ? *ofp_portp : OFPP_NONE; int error; error = ofproto->ofproto_class->port_add(ofproto, netdev); if (!error) { const char *netdev_name = netdev_get_name(netdev); simap_put(&ofproto->ofp_requests, netdev_name, ofp_to_u16(ofp_port)); error = update_port(ofproto, netdev_name); } if (ofp_portp) { *ofp_portp = OFPP_NONE; if (!error) { struct ofproto_port ofproto_port; error = ofproto_port_query_by_name(ofproto, netdev_get_name(netdev), &ofproto_port); if (!error) { *ofp_portp = ofproto_port.ofp_port; ofproto_port_destroy(&ofproto_port); } } } return error; } /* Looks up a port named 'devname' in 'ofproto'. On success, returns 0 and * initializes '*port' appropriately; on failure, returns a positive errno * value. * * The caller owns the data in 'ofproto_port' and must free it with * ofproto_port_destroy() when it is no longer needed. */ int ofproto_port_query_by_name(const struct ofproto *ofproto, const char *devname, struct ofproto_port *port) { int error; error = ofproto->ofproto_class->port_query_by_name(ofproto, devname, port); if (error) { memset(port, 0, sizeof *port); } return error; } /* Deletes port number 'ofp_port' from the datapath for 'ofproto'. * Returns 0 if successful, otherwise a positive errno. */ int ofproto_port_del(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); const char *name = ofport ? netdev_get_name(ofport->netdev) : "<unknown>"; struct simap_node *ofp_request_node; int error; ofp_request_node = simap_find(&ofproto->ofp_requests, name); if (ofp_request_node) { simap_delete(&ofproto->ofp_requests, ofp_request_node); } error = ofproto->ofproto_class->port_del(ofproto, ofp_port); if (!error && ofport) { /* 'name' is the netdev's name and update_port() is going to close the * netdev. Just in case update_port() refers to 'name' after it * destroys 'ofport', make a copy of it around the update_port() * call. */ char *devname = xstrdup(name); update_port(ofproto, devname); free(devname); } return error; } /* Refreshes datapath configuration of port number 'ofp_port' in 'ofproto'. * * This function has no effect if 'ofproto' does not have a port 'ofp_port'. */ void ofproto_port_set_config(struct ofproto *ofproto, ofp_port_t ofp_port, const struct smap *cfg) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure datapath on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } error = (ofproto->ofproto_class->port_set_config ? ofproto->ofproto_class->port_set_config(ofport, cfg) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: datapath configuration on port %"PRIu32 " (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } static void flow_mod_init(struct ofputil_flow_mod *fm, const struct match *match, int priority, const struct ofpact *ofpacts, size_t ofpacts_len, enum ofp_flow_mod_command command) { *fm = (struct ofputil_flow_mod) { .match = *match, .priority = priority, .table_id = 0, .command = command, .buffer_id = UINT32_MAX, .out_port = OFPP_ANY, .out_group = OFPG_ANY, .ofpacts = CONST_CAST(struct ofpact *, ofpacts), .ofpacts_len = ofpacts_len, }; } static int simple_flow_mod(struct ofproto *ofproto, const struct match *match, int priority, const struct ofpact *ofpacts, size_t ofpacts_len, enum ofp_flow_mod_command command) { struct ofputil_flow_mod fm; flow_mod_init(&fm, match, priority, ofpacts, ofpacts_len, command); return handle_flow_mod__(ofproto, &fm, NULL); } /* Adds a flow to OpenFlow flow table 0 in 'p' that matches 'cls_rule' and * performs the 'n_actions' actions in 'actions'. The new flow will not * timeout. * * If cls_rule->priority is in the range of priorities supported by OpenFlow * (0...65535, inclusive) then the flow will be visible to OpenFlow * controllers; otherwise, it will be hidden. * * The caller retains ownership of 'cls_rule' and 'ofpacts'. * * This is a helper function for in-band control and fail-open. */ void ofproto_add_flow(struct ofproto *ofproto, const struct match *match, int priority, const struct ofpact *ofpacts, size_t ofpacts_len) OVS_EXCLUDED(ofproto_mutex) { const struct rule *rule; bool must_add; /* First do a cheap check whether the rule we're looking for already exists * with the actions that we want. If it does, then we're done. */ rule = rule_from_cls_rule(classifier_find_match_exactly( &ofproto->tables[0].cls, match, priority, OVS_VERSION_MAX)); if (rule) { const struct rule_actions *actions = rule_get_actions(rule); must_add = !ofpacts_equal(actions->ofpacts, actions->ofpacts_len, ofpacts, ofpacts_len); } else { must_add = true; } /* If there's no such rule or the rule doesn't have the actions we want, * fall back to a executing a full flow mod. We can't optimize this at * all because we didn't take enough locks above to ensure that the flow * table didn't already change beneath us. */ if (must_add) { simple_flow_mod(ofproto, match, priority, ofpacts, ofpacts_len, OFPFC_MODIFY_STRICT); } } /* Executes the flow modification specified in 'fm'. Returns 0 on success, or * an OFPERR_* OpenFlow error code on failure. * * This is a helper function for in-band control and fail-open. */ enum ofperr ofproto_flow_mod(struct ofproto *ofproto, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { return handle_flow_mod__(ofproto, fm, NULL); } /* Searches for a rule with matching criteria exactly equal to 'target' in * ofproto's table 0 and, if it finds one, deletes it. * * This is a helper function for in-band control and fail-open. */ void ofproto_delete_flow(struct ofproto *ofproto, const struct match *target, int priority) OVS_REQUIRES(ofproto_mutex) { struct classifier *cls = &ofproto->tables[0].cls; struct rule *rule; /* First do a cheap check whether the rule we're looking for has already * been deleted. If so, then we're done. */ rule = rule_from_cls_rule(classifier_find_match_exactly( cls, target, priority, OVS_VERSION_MAX)); if (!rule) { return; } struct rule_collection rules; rule_collection_init(&rules); rule_collection_add(&rules, rule); delete_flows__(&rules, OFPRR_DELETE, NULL); rule_collection_destroy(&rules); } /* Delete all of the flows from all of ofproto's flow tables, then reintroduce * the flows required by in-band control and fail-open. */ void ofproto_flush_flows(struct ofproto *ofproto) { COVERAGE_INC(ofproto_flush); ofproto_flush__(ofproto); connmgr_flushed(ofproto->connmgr); } static void reinit_ports(struct ofproto *p) { struct ofproto_port_dump dump; struct sset devnames; struct ofport *ofport; struct ofproto_port ofproto_port; const char *devname; COVERAGE_INC(ofproto_reinit_ports); sset_init(&devnames); HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { sset_add(&devnames, netdev_get_name(ofport->netdev)); } OFPROTO_PORT_FOR_EACH (&ofproto_port, &dump, p) { sset_add(&devnames, ofproto_port.name); } SSET_FOR_EACH (devname, &devnames) { update_port(p, devname); } sset_destroy(&devnames); } static ofp_port_t alloc_ofp_port(struct ofproto *ofproto, const char *netdev_name) { uint16_t port_idx; port_idx = simap_get(&ofproto->ofp_requests, netdev_name); port_idx = port_idx ? port_idx : UINT16_MAX; if (port_idx >= ofproto->max_ports || ofport_get_usage(ofproto, u16_to_ofp(port_idx)) == LLONG_MAX) { uint16_t lru_ofport = 0, end_port_no = ofproto->alloc_port_no; long long int last_used_at, lru = LLONG_MAX; /* Search for a free OpenFlow port number. We try not to * immediately reuse them to prevent problems due to old * flows. * * We limit the automatically assigned port numbers to the lower half * of the port range, to reserve the upper half for assignment by * controllers. */ for (;;) { if (++ofproto->alloc_port_no >= MIN(ofproto->max_ports, 32768)) { ofproto->alloc_port_no = 1; } last_used_at = ofport_get_usage(ofproto, u16_to_ofp(ofproto->alloc_port_no)); if (!last_used_at) { port_idx = ofproto->alloc_port_no; break; } else if ( last_used_at < time_msec() - 60*60*1000) { /* If the port with ofport 'ofproto->alloc_port_no' was deleted * more than an hour ago, consider it usable. */ ofport_remove_usage(ofproto, u16_to_ofp(ofproto->alloc_port_no)); port_idx = ofproto->alloc_port_no; break; } else if (last_used_at < lru) { lru = last_used_at; lru_ofport = ofproto->alloc_port_no; } if (ofproto->alloc_port_no == end_port_no) { if (lru_ofport) { port_idx = lru_ofport; break; } return OFPP_NONE; } } } ofport_set_usage(ofproto, u16_to_ofp(port_idx), LLONG_MAX); return u16_to_ofp(port_idx); } static void dealloc_ofp_port(struct ofproto *ofproto, ofp_port_t ofp_port) { if (ofp_to_u16(ofp_port) < ofproto->max_ports) { ofport_set_usage(ofproto, ofp_port, time_msec()); } } /* Opens and returns a netdev for 'ofproto_port' in 'ofproto', or a null * pointer if the netdev cannot be opened. On success, also fills in * '*pp'. */ static struct netdev * ofport_open(struct ofproto *ofproto, struct ofproto_port *ofproto_port, struct ofputil_phy_port *pp) { enum netdev_flags flags; struct netdev *netdev; int error; error = netdev_open(ofproto_port->name, ofproto_port->type, &netdev); if (error) { VLOG_WARN_RL(&rl, "%s: ignoring port %s (%"PRIu32") because netdev %s " "cannot be opened (%s)", ofproto->name, ofproto_port->name, ofproto_port->ofp_port, ofproto_port->name, ovs_strerror(error)); return NULL; } if (ofproto_port->ofp_port == OFPP_NONE) { if (!strcmp(ofproto->name, ofproto_port->name)) { ofproto_port->ofp_port = OFPP_LOCAL; } else { ofproto_port->ofp_port = alloc_ofp_port(ofproto, ofproto_port->name); } } pp->port_no = ofproto_port->ofp_port; netdev_get_etheraddr(netdev, &pp->hw_addr); ovs_strlcpy(pp->name, ofproto_port->name, sizeof pp->name); netdev_get_flags(netdev, &flags); pp->config = flags & NETDEV_UP ? 0 : OFPUTIL_PC_PORT_DOWN; pp->state = netdev_get_carrier(netdev) ? 0 : OFPUTIL_PS_LINK_DOWN; netdev_get_features(netdev, &pp->curr, &pp->advertised, &pp->supported, &pp->peer); pp->curr_speed = netdev_features_to_bps(pp->curr, 0) / 1000; pp->max_speed = netdev_features_to_bps(pp->supported, 0) / 1000; return netdev; } /* Returns true if most fields of 'a' and 'b' are equal. Differences in name, * port number, and 'config' bits other than OFPUTIL_PC_PORT_DOWN are * disregarded. */ static bool ofport_equal(const struct ofputil_phy_port *a, const struct ofputil_phy_port *b) { return (eth_addr_equals(a->hw_addr, b->hw_addr) && a->state == b->state && !((a->config ^ b->config) & OFPUTIL_PC_PORT_DOWN) && a->curr == b->curr && a->advertised == b->advertised && a->supported == b->supported && a->peer == b->peer && a->curr_speed == b->curr_speed && a->max_speed == b->max_speed); } /* Adds an ofport to 'p' initialized based on the given 'netdev' and 'opp'. * The caller must ensure that 'p' does not have a conflicting ofport (that is, * one with the same name or port number). */ static int ofport_install(struct ofproto *p, struct netdev *netdev, const struct ofputil_phy_port *pp) { const char *netdev_name = netdev_get_name(netdev); struct ofport *ofport; int error; /* Create ofport. */ ofport = p->ofproto_class->port_alloc(); if (!ofport) { error = ENOMEM; goto error; } ofport->ofproto = p; ofport->netdev = netdev; ofport->change_seq = netdev_get_change_seq(netdev); ofport->pp = *pp; ofport->ofp_port = pp->port_no; ofport->created = time_msec(); /* Add port to 'p'. */ hmap_insert(&p->ports, &ofport->hmap_node, hash_ofp_port(ofport->ofp_port)); shash_add(&p->port_by_name, netdev_name, ofport); update_mtu(p, ofport); /* Let the ofproto_class initialize its private data. */ error = p->ofproto_class->port_construct(ofport); if (error) { goto error; } connmgr_send_port_status(p->connmgr, NULL, pp, OFPPR_ADD); return 0; error: VLOG_WARN_RL(&rl, "%s: could not add port %s (%s)", p->name, netdev_name, ovs_strerror(error)); if (ofport) { ofport_destroy__(ofport); } else { netdev_close(netdev); } return error; } /* Removes 'ofport' from 'p' and destroys it. */ static void ofport_remove(struct ofport *ofport) { struct ofproto *p = ofport->ofproto; bool is_mtu_overridden = ofport_is_mtu_overridden(p, ofport); connmgr_send_port_status(ofport->ofproto->connmgr, NULL, &ofport->pp, OFPPR_DELETE); ofport_destroy(ofport, true); if (!is_mtu_overridden) { update_mtu_ofproto(p); } } /* If 'ofproto' contains an ofport named 'name', removes it from 'ofproto' and * destroys it. */ static void ofport_remove_with_name(struct ofproto *ofproto, const char *name) { struct ofport *port = shash_find_data(&ofproto->port_by_name, name); if (port) { ofport_remove(port); } } /* Updates 'port' with new 'pp' description. * * Does not handle a name or port number change. The caller must implement * such a change as a delete followed by an add. */ static void ofport_modified(struct ofport *port, struct ofputil_phy_port *pp) { port->pp.hw_addr = pp->hw_addr; port->pp.config = ((port->pp.config & ~OFPUTIL_PC_PORT_DOWN) | (pp->config & OFPUTIL_PC_PORT_DOWN)); port->pp.state = ((port->pp.state & ~OFPUTIL_PS_LINK_DOWN) | (pp->state & OFPUTIL_PS_LINK_DOWN)); port->pp.curr = pp->curr; port->pp.advertised = pp->advertised; port->pp.supported = pp->supported; port->pp.peer = pp->peer; port->pp.curr_speed = pp->curr_speed; port->pp.max_speed = pp->max_speed; connmgr_send_port_status(port->ofproto->connmgr, NULL, &port->pp, OFPPR_MODIFY); } /* Update OpenFlow 'state' in 'port' and notify controller. */ void ofproto_port_set_state(struct ofport *port, enum ofputil_port_state state) { if (port->pp.state != state) { port->pp.state = state; connmgr_send_port_status(port->ofproto->connmgr, NULL, &port->pp, OFPPR_MODIFY); } } void ofproto_port_unregister(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *port = ofproto_get_port(ofproto, ofp_port); if (port) { if (port->ofproto->ofproto_class->set_stp_port) { port->ofproto->ofproto_class->set_stp_port(port, NULL); } if (port->ofproto->ofproto_class->set_rstp_port) { port->ofproto->ofproto_class->set_rstp_port(port, NULL); } if (port->ofproto->ofproto_class->set_cfm) { port->ofproto->ofproto_class->set_cfm(port, NULL); } if (port->ofproto->ofproto_class->bundle_remove) { port->ofproto->ofproto_class->bundle_remove(port); } } } static void ofport_destroy__(struct ofport *port) { struct ofproto *ofproto = port->ofproto; const char *name = netdev_get_name(port->netdev); hmap_remove(&ofproto->ports, &port->hmap_node); shash_delete(&ofproto->port_by_name, shash_find(&ofproto->port_by_name, name)); netdev_close(port->netdev); ofproto->ofproto_class->port_dealloc(port); } static void ofport_destroy(struct ofport *port, bool del) { if (port) { dealloc_ofp_port(port->ofproto, port->ofp_port); port->ofproto->ofproto_class->port_destruct(port, del); ofport_destroy__(port); } } struct ofport * ofproto_get_port(const struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *port; HMAP_FOR_EACH_IN_BUCKET (port, hmap_node, hash_ofp_port(ofp_port), &ofproto->ports) { if (port->ofp_port == ofp_port) { return port; } } return NULL; } static long long int ofport_get_usage(const struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport_usage *usage; HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), &ofproto->ofport_usage) { if (usage->ofp_port == ofp_port) { return usage->last_used; } } return 0; } static void ofport_set_usage(struct ofproto *ofproto, ofp_port_t ofp_port, long long int last_used) { struct ofport_usage *usage; HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), &ofproto->ofport_usage) { if (usage->ofp_port == ofp_port) { usage->last_used = last_used; return; } } ovs_assert(last_used == LLONG_MAX); usage = xmalloc(sizeof *usage); usage->ofp_port = ofp_port; usage->last_used = last_used; hmap_insert(&ofproto->ofport_usage, &usage->hmap_node, hash_ofp_port(ofp_port)); } static void ofport_remove_usage(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport_usage *usage; HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), &ofproto->ofport_usage) { if (usage->ofp_port == ofp_port) { hmap_remove(&ofproto->ofport_usage, &usage->hmap_node); free(usage); break; } } } int ofproto_port_get_stats(const struct ofport *port, struct netdev_stats *stats) { struct ofproto *ofproto = port->ofproto; int error; if (ofproto->ofproto_class->port_get_stats) { error = ofproto->ofproto_class->port_get_stats(port, stats); } else { error = EOPNOTSUPP; } return error; } static int update_port(struct ofproto *ofproto, const char *name) { struct ofproto_port ofproto_port; struct ofputil_phy_port pp; struct netdev *netdev; struct ofport *port; int error = 0; COVERAGE_INC(ofproto_update_port); /* Fetch 'name''s location and properties from the datapath. */ netdev = (!ofproto_port_query_by_name(ofproto, name, &ofproto_port) ? ofport_open(ofproto, &ofproto_port, &pp) : NULL); if (netdev) { port = ofproto_get_port(ofproto, ofproto_port.ofp_port); if (port && !strcmp(netdev_get_name(port->netdev), name)) { struct netdev *old_netdev = port->netdev; /* 'name' hasn't changed location. Any properties changed? */ if (!ofport_equal(&port->pp, &pp)) { ofport_modified(port, &pp); } update_mtu(ofproto, port); /* Install the newly opened netdev in case it has changed. * Don't close the old netdev yet in case port_modified has to * remove a retained reference to it.*/ port->netdev = netdev; port->change_seq = netdev_get_change_seq(netdev); if (port->ofproto->ofproto_class->port_modified) { port->ofproto->ofproto_class->port_modified(port); } netdev_close(old_netdev); } else { /* If 'port' is nonnull then its name differs from 'name' and thus * we should delete it. If we think there's a port named 'name' * then its port number must be wrong now so delete it too. */ if (port) { ofport_remove(port); } ofport_remove_with_name(ofproto, name); error = ofport_install(ofproto, netdev, &pp); } } else { /* Any port named 'name' is gone now. */ ofport_remove_with_name(ofproto, name); } ofproto_port_destroy(&ofproto_port); return error; } static int init_ports(struct ofproto *p) { struct ofproto_port_dump dump; struct ofproto_port ofproto_port; struct shash_node *node, *next; OFPROTO_PORT_FOR_EACH (&ofproto_port, &dump, p) { const char *name = ofproto_port.name; if (shash_find(&p->port_by_name, name)) { VLOG_WARN_RL(&rl, "%s: ignoring duplicate device %s in datapath", p->name, name); } else { struct ofputil_phy_port pp; struct netdev *netdev; /* Check if an OpenFlow port number had been requested. */ node = shash_find(&init_ofp_ports, name); if (node) { const struct iface_hint *iface_hint = node->data; simap_put(&p->ofp_requests, name, ofp_to_u16(iface_hint->ofp_port)); } netdev = ofport_open(p, &ofproto_port, &pp); if (netdev) { ofport_install(p, netdev, &pp); if (ofp_to_u16(ofproto_port.ofp_port) < p->max_ports) { p->alloc_port_no = MAX(p->alloc_port_no, ofp_to_u16(ofproto_port.ofp_port)); } } } } SHASH_FOR_EACH_SAFE(node, next, &init_ofp_ports) { struct iface_hint *iface_hint = node->data; if (!strcmp(iface_hint->br_name, p->name)) { free(iface_hint->br_name); free(iface_hint->br_type); free(iface_hint); shash_delete(&init_ofp_ports, node); } } return 0; } static bool ofport_is_internal_or_patch(const struct ofproto *p, const struct ofport *port) { return !strcmp(netdev_get_type(port->netdev), ofproto_port_open_type(p->type, "internal")) || !strcmp(netdev_get_type(port->netdev), ofproto_port_open_type(p->type, "patch")); } /* If 'port' is internal or patch and if the user didn't explicitly specify an * mtu through the database, we have to override it. */ static bool ofport_is_mtu_overridden(const struct ofproto *p, const struct ofport *port) { return ofport_is_internal_or_patch(p, port) && !netdev_mtu_is_user_config(port->netdev); } /* Find the minimum MTU of all non-overridden devices attached to 'p'. * Returns ETH_PAYLOAD_MAX or the minimum of the ports. */ static int find_min_mtu(struct ofproto *p) { struct ofport *ofport; int mtu = 0; HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { struct netdev *netdev = ofport->netdev; int dev_mtu; /* Skip any overridden port, since that's what we're trying to set. */ if (ofport_is_mtu_overridden(p, ofport)) { continue; } if (netdev_get_mtu(netdev, &dev_mtu)) { continue; } if (!mtu || dev_mtu < mtu) { mtu = dev_mtu; } } return mtu ? mtu: ETH_PAYLOAD_MAX; } /* Update MTU of all overridden devices on 'p' to the minimum of the * non-overridden ports in event of 'port' added or changed. */ static void update_mtu(struct ofproto *p, struct ofport *port) { struct netdev *netdev = port->netdev; int dev_mtu; if (netdev_get_mtu(netdev, &dev_mtu)) { port->mtu = 0; return; } if (ofport_is_mtu_overridden(p, port)) { if (dev_mtu > p->min_mtu) { if (!netdev_set_mtu(port->netdev, p->min_mtu)) { dev_mtu = p->min_mtu; } } port->mtu = dev_mtu; return; } port->mtu = dev_mtu; /* For non-overridden port find new min mtu. */ update_mtu_ofproto(p); } static void update_mtu_ofproto(struct ofproto *p) { struct ofport *ofport; int old_min = p->min_mtu; p->min_mtu = find_min_mtu(p); if (p->min_mtu == old_min) { return; } HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { struct netdev *netdev = ofport->netdev; if (ofport_is_mtu_overridden(p, ofport)) { if (!netdev_set_mtu(netdev, p->min_mtu)) { ofport->mtu = p->min_mtu; } } } } static void ofproto_rule_destroy__(struct rule *rule) OVS_NO_THREAD_SAFETY_ANALYSIS { cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr)); rule_actions_destroy(rule_get_actions(rule)); ovs_mutex_destroy(&rule->mutex); rule->ofproto->ofproto_class->rule_dealloc(rule); } static void rule_destroy_cb(struct rule *rule) OVS_NO_THREAD_SAFETY_ANALYSIS { /* Send rule removed if needed. */ if (rule->flags & OFPUTIL_FF_SEND_FLOW_REM && rule->removed_reason != OVS_OFPRR_NONE && !rule_is_hidden(rule)) { ofproto_rule_send_removed(rule); } rule->ofproto->ofproto_class->rule_destruct(rule); mf_vl_mff_unref(&rule->ofproto->vl_mff_map, rule->match_tlv_bitmap); mf_vl_mff_unref(&rule->ofproto->vl_mff_map, rule->ofpacts_tlv_bitmap); ofproto_rule_destroy__(rule); } void ofproto_rule_ref(struct rule *rule) { if (rule) { ovs_refcount_ref(&rule->ref_count); } } bool ofproto_rule_try_ref(struct rule *rule) { if (rule) { return ovs_refcount_try_ref_rcu(&rule->ref_count); } return false; } /* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the * ref_count reaches 0. * * Use of RCU allows short term use (between RCU quiescent periods) without * keeping a reference. A reference must be taken if the rule needs to * stay around accross the RCU quiescent periods. */ void ofproto_rule_unref(struct rule *rule) { if (rule && ovs_refcount_unref_relaxed(&rule->ref_count) == 1) { ovs_assert(rule->state != RULE_INSERTED); ovsrcu_postpone(rule_destroy_cb, rule); } } static void remove_rule_rcu__(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { struct ofproto *ofproto = rule->ofproto; struct oftable *table = &ofproto->tables[rule->table_id]; ovs_assert(!cls_rule_visible_in_version(&rule->cr, OVS_VERSION_MAX)); if (!classifier_remove(&table->cls, &rule->cr)) { OVS_NOT_REACHED(); } if (ofproto->ofproto_class->rule_delete) { ofproto->ofproto_class->rule_delete(rule); } ofproto_rule_unref(rule); } static void remove_rule_rcu(struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { ovs_mutex_lock(&ofproto_mutex); remove_rule_rcu__(rule); ovs_mutex_unlock(&ofproto_mutex); } /* Removes and deletes rules from a NULL-terminated array of rule pointers. */ static void remove_rules_rcu(struct rule **rules) OVS_EXCLUDED(ofproto_mutex) { struct rule **orig_rules = rules; if (*rules) { struct ofproto *ofproto = rules[0]->ofproto; unsigned long tables[BITMAP_N_LONGS(256)]; struct rule *rule; size_t table_id; memset(tables, 0, sizeof tables); ovs_mutex_lock(&ofproto_mutex); while ((rule = *rules++)) { /* Defer once for each new table. This defers the subtable cleanup * until later, so that when removing large number of flows the * operation is faster. */ if (!bitmap_is_set(tables, rule->table_id)) { struct classifier *cls = &ofproto->tables[rule->table_id].cls; bitmap_set1(tables, rule->table_id); classifier_defer(cls); } remove_rule_rcu__(rule); } BITMAP_FOR_EACH_1(table_id, 256, tables) { struct classifier *cls = &ofproto->tables[table_id].cls; classifier_publish(cls); } ovs_mutex_unlock(&ofproto_mutex); } free(orig_rules); } void ofproto_group_ref(struct ofgroup *group) { if (group) { ovs_refcount_ref(&group->ref_count); } } bool ofproto_group_try_ref(struct ofgroup *group) { if (group) { return ovs_refcount_try_ref_rcu(&group->ref_count); } return false; } static void group_destroy_cb(struct ofgroup *group) { group->ofproto->ofproto_class->group_destruct(group); ofputil_group_properties_destroy(CONST_CAST(struct ofputil_group_props *, &group->props)); ofputil_bucket_list_destroy(CONST_CAST(struct ovs_list *, &group->buckets)); group->ofproto->ofproto_class->group_dealloc(group); } void ofproto_group_unref(struct ofgroup *group) OVS_NO_THREAD_SAFETY_ANALYSIS { if (group && ovs_refcount_unref_relaxed(&group->ref_count) == 1) { ovs_assert(rule_collection_n(&group->rules) == 0); ovsrcu_postpone(group_destroy_cb, group); } } static void remove_group_rcu__(struct ofgroup *group) OVS_REQUIRES(ofproto_mutex) { struct ofproto *ofproto = group->ofproto; ovs_assert(!versions_visible_in_version(&group->versions, OVS_VERSION_MAX)); cmap_remove(&ofproto->groups, &group->cmap_node, hash_int(group->group_id, 0)); ofproto_group_unref(group); } static void remove_group_rcu(struct ofgroup *group) OVS_EXCLUDED(ofproto_mutex) { ovs_mutex_lock(&ofproto_mutex); remove_group_rcu__(group); ovs_mutex_unlock(&ofproto_mutex); } /* Removes and deletes groups from a NULL-terminated array of group * pointers. */ static void remove_groups_rcu(struct ofgroup **groups) OVS_EXCLUDED(ofproto_mutex) { ovs_mutex_lock(&ofproto_mutex); for (struct ofgroup **g = groups; *g; g++) { remove_group_rcu__(*g); } ovs_mutex_unlock(&ofproto_mutex); free(groups); } static uint32_t get_provider_meter_id(const struct ofproto *, uint32_t of_meter_id); /* Creates and returns a new 'struct rule_actions', whose actions are a copy * of from the 'ofpacts_len' bytes of 'ofpacts'. */ const struct rule_actions * rule_actions_create(const struct ofpact *ofpacts, size_t ofpacts_len) { struct rule_actions *actions; actions = xmalloc(sizeof *actions + ofpacts_len); actions->ofpacts_len = ofpacts_len; memcpy(actions->ofpacts, ofpacts, ofpacts_len); actions->has_meter = ofpacts_get_meter(ofpacts, ofpacts_len) != 0; actions->has_groups = (ofpact_find_type_flattened(ofpacts, OFPACT_GROUP, ofpact_end(ofpacts, ofpacts_len)) != NULL); actions->has_learn_with_delete = (next_learn_with_delete(actions, NULL) != NULL); return actions; } /* Free the actions after the RCU quiescent period is reached. */ void rule_actions_destroy(const struct rule_actions *actions) { if (actions) { ovsrcu_postpone(free, CONST_CAST(struct rule_actions *, actions)); } } /* Returns true if 'rule' has an OpenFlow OFPAT_OUTPUT or OFPAT_ENQUEUE action * that outputs to 'port' (output to OFPP_FLOOD and OFPP_ALL doesn't count). */ bool ofproto_rule_has_out_port(const struct rule *rule, ofp_port_t port) OVS_REQUIRES(ofproto_mutex) { if (port == OFPP_ANY) { return true; } else { const struct rule_actions *actions = rule_get_actions(rule); return ofpacts_output_to_port(actions->ofpacts, actions->ofpacts_len, port); } } /* Returns true if 'rule' has group and equals group_id. */ static bool ofproto_rule_has_out_group(const struct rule *rule, uint32_t group_id) OVS_REQUIRES(ofproto_mutex) { if (group_id == OFPG_ANY) { return true; } else { const struct rule_actions *actions = rule_get_actions(rule); return ofpacts_output_to_group(actions->ofpacts, actions->ofpacts_len, group_id); } } static bool rule_is_readonly(const struct rule *rule) { const struct oftable *table = &rule->ofproto->tables[rule->table_id]; return (table->flags & OFTABLE_READONLY) != 0; } static uint32_t hash_learned_cookie(ovs_be64 cookie_, uint8_t table_id) { uint64_t cookie = (OVS_FORCE uint64_t) cookie_; return hash_3words(cookie, cookie >> 32, table_id); } static void learned_cookies_update_one__(struct ofproto *ofproto, const struct ofpact_learn *learn, int delta, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { uint32_t hash = hash_learned_cookie(learn->cookie, learn->table_id); struct learned_cookie *c; HMAP_FOR_EACH_WITH_HASH (c, u.hmap_node, hash, &ofproto->learned_cookies) { if (c->cookie == learn->cookie && c->table_id == learn->table_id) { c->n += delta; ovs_assert(c->n >= 0); if (!c->n) { hmap_remove(&ofproto->learned_cookies, &c->u.hmap_node); ovs_list_push_back(dead_cookies, &c->u.list_node); } return; } } ovs_assert(delta > 0); c = xmalloc(sizeof *c); hmap_insert(&ofproto->learned_cookies, &c->u.hmap_node, hash); c->cookie = learn->cookie; c->table_id = learn->table_id; c->n = delta; } static const struct ofpact_learn * next_learn_with_delete(const struct rule_actions *actions, const struct ofpact_learn *start) { const struct ofpact *pos; for (pos = start ? ofpact_next(&start->ofpact) : actions->ofpacts; pos < ofpact_end(actions->ofpacts, actions->ofpacts_len); pos = ofpact_next(pos)) { if (pos->type == OFPACT_LEARN) { const struct ofpact_learn *learn = ofpact_get_LEARN(pos); if (learn->flags & NX_LEARN_F_DELETE_LEARNED) { return learn; } } } return NULL; } static void learned_cookies_update__(struct ofproto *ofproto, const struct rule_actions *actions, int delta, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { if (actions->has_learn_with_delete) { const struct ofpact_learn *learn; for (learn = next_learn_with_delete(actions, NULL); learn; learn = next_learn_with_delete(actions, learn)) { learned_cookies_update_one__(ofproto, learn, delta, dead_cookies); } } } static void learned_cookies_inc(struct ofproto *ofproto, const struct rule_actions *actions) OVS_REQUIRES(ofproto_mutex) { learned_cookies_update__(ofproto, actions, +1, NULL); } static void learned_cookies_dec(struct ofproto *ofproto, const struct rule_actions *actions, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { learned_cookies_update__(ofproto, actions, -1, dead_cookies); } static void learned_cookies_flush(struct ofproto *ofproto, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { struct learned_cookie *c; LIST_FOR_EACH_POP (c, u.list_node, dead_cookies) { struct rule_criteria criteria; struct rule_collection rules; struct match match; match_init_catchall(&match); rule_criteria_init(&criteria, c->table_id, &match, 0, OVS_VERSION_MAX, c->cookie, OVS_BE64_MAX, OFPP_ANY, OFPG_ANY); rule_criteria_require_rw(&criteria, false); collect_rules_loose(ofproto, &criteria, &rules); rule_criteria_destroy(&criteria); delete_flows__(&rules, OFPRR_DELETE, NULL); free(c); } } static enum ofperr handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh) { ofconn_send_reply(ofconn, make_echo_reply(oh)); return 0; } static void query_tables(struct ofproto *ofproto, struct ofputil_table_features **featuresp, struct ofputil_table_stats **statsp) { struct mf_bitmap rw_fields = oxm_writable_fields(); struct mf_bitmap match = oxm_matchable_fields(); struct mf_bitmap mask = oxm_maskable_fields(); struct ofputil_table_features *features; struct ofputil_table_stats *stats; int i; features = *featuresp = xcalloc(ofproto->n_tables, sizeof *features); for (i = 0; i < ofproto->n_tables; i++) { struct ofputil_table_features *f = &features[i]; f->table_id = i; sprintf(f->name, "table%d", i); f->metadata_match = OVS_BE64_MAX; f->metadata_write = OVS_BE64_MAX; atomic_read_relaxed(&ofproto->tables[i].miss_config, &f->miss_config); f->max_entries = 1000000; bool more_tables = false; for (int j = i + 1; j < ofproto->n_tables; j++) { if (!(ofproto->tables[j].flags & OFTABLE_HIDDEN)) { bitmap_set1(f->nonmiss.next, j); more_tables = true; } } f->nonmiss.instructions = (1u << N_OVS_INSTRUCTIONS) - 1; if (!more_tables) { f->nonmiss.instructions &= ~(1u << OVSINST_OFPIT11_GOTO_TABLE); } f->nonmiss.write.ofpacts = (UINT64_C(1) << N_OFPACTS) - 1; f->nonmiss.write.set_fields = rw_fields; f->nonmiss.apply = f->nonmiss.write; f->miss = f->nonmiss; f->match = match; f->mask = mask; f->wildcard = match; } if (statsp) { stats = *statsp = xcalloc(ofproto->n_tables, sizeof *stats); for (i = 0; i < ofproto->n_tables; i++) { struct ofputil_table_stats *s = &stats[i]; s->table_id = i; s->active_count = ofproto->tables[i].n_flows; if (i == 0) { s->active_count -= connmgr_count_hidden_rules( ofproto->connmgr); } } } else { stats = NULL; } ofproto->ofproto_class->query_tables(ofproto, features, stats); for (i = 0; i < ofproto->n_tables; i++) { const struct oftable *table = &ofproto->tables[i]; struct ofputil_table_features *f = &features[i]; if (table->name) { ovs_strzcpy(f->name, table->name, sizeof f->name); } if (table->max_flows < f->max_entries) { f->max_entries = table->max_flows; } } } static void query_switch_features(struct ofproto *ofproto, bool *arp_match_ip, uint64_t *ofpacts) { struct ofputil_table_features *features, *f; *arp_match_ip = false; *ofpacts = 0; query_tables(ofproto, &features, NULL); for (f = features; f < &features[ofproto->n_tables]; f++) { *ofpacts |= f->nonmiss.apply.ofpacts | f->miss.apply.ofpacts; if (bitmap_is_set(f->match.bm, MFF_ARP_SPA) || bitmap_is_set(f->match.bm, MFF_ARP_TPA)) { *arp_match_ip = true; } } free(features); /* Sanity check. */ ovs_assert(*ofpacts & (UINT64_C(1) << OFPACT_OUTPUT)); } static enum ofperr handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_switch_features features; struct ofport *port; bool arp_match_ip; struct ofpbuf *b; query_switch_features(ofproto, &arp_match_ip, &features.ofpacts); features.datapath_id = ofproto->datapath_id; features.n_buffers = 0; features.n_tables = ofproto_get_n_visible_tables(ofproto); features.capabilities = (OFPUTIL_C_FLOW_STATS | OFPUTIL_C_TABLE_STATS | OFPUTIL_C_PORT_STATS | OFPUTIL_C_QUEUE_STATS | OFPUTIL_C_GROUP_STATS | OFPUTIL_C_BUNDLES); if (arp_match_ip) { features.capabilities |= OFPUTIL_C_ARP_MATCH_IP; } /* FIXME: Fill in proper features.auxiliary_id for auxiliary connections */ features.auxiliary_id = 0; b = ofputil_encode_switch_features(&features, ofconn_get_protocol(ofconn), oh->xid); HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { ofputil_put_switch_features_port(&port->pp, b); } ofconn_send_reply(ofconn, b); return 0; } static enum ofperr handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_switch_config config; config.frag = ofconn_get_ofproto(ofconn)->frag_handling; config.invalid_ttl_to_controller = ofconn_get_invalid_ttl_to_controller(ofconn); config.miss_send_len = ofconn_get_miss_send_len(ofconn); ofconn_send_reply(ofconn, ofputil_encode_get_config_reply(oh, &config)); return 0; } static enum ofperr handle_set_config(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_switch_config config; enum ofperr error; error = ofputil_decode_set_config(oh, &config); if (error) { return error; } if (ofconn_get_type(ofconn) != OFCONN_PRIMARY || ofconn_get_role(ofconn) != OFPCR12_ROLE_SLAVE) { enum ofputil_frag_handling cur = ofproto->frag_handling; enum ofputil_frag_handling next = config.frag; if (cur != next) { if (ofproto->ofproto_class->set_frag_handling(ofproto, next)) { ofproto->frag_handling = next; } else { VLOG_WARN_RL(&rl, "%s: unsupported fragment handling mode %s", ofproto->name, ofputil_frag_handling_to_string(next)); } } } if (config.invalid_ttl_to_controller >= 0) { ofconn_set_invalid_ttl_to_controller(ofconn, config.invalid_ttl_to_controller); } ofconn_set_miss_send_len(ofconn, config.miss_send_len); return 0; } /* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow * error message code for the caller to propagate upward. Otherwise, returns * 0. * * The log message mentions 'msg_type'. */ static enum ofperr reject_slave_controller(struct ofconn *ofconn) { if (ofconn_get_type(ofconn) == OFCONN_PRIMARY && ofconn_get_role(ofconn) == OFPCR12_ROLE_SLAVE) { return OFPERR_OFPBRC_IS_SLAVE; } else { return 0; } } /* Checks that the 'ofpacts_len' bytes of action in 'ofpacts' are appropriate * for 'ofproto': * * - If they use a meter, then 'ofproto' has that meter configured. * * - If they use any groups, then 'ofproto' has that group configured. * * Returns 0 if successful, otherwise an OpenFlow error. Caller must hold * 'ofproto_mutex' for the result to be valid also after this function * returns. */ enum ofperr ofproto_check_ofpacts(struct ofproto *ofproto, const struct ofpact ofpacts[], size_t ofpacts_len) OVS_REQUIRES(ofproto_mutex) { uint32_t mid; mid = ofpacts_get_meter(ofpacts, ofpacts_len); if (mid && get_provider_meter_id(ofproto, mid) == UINT32_MAX) { return OFPERR_OFPMMFC_INVALID_METER; } const struct ofpact_group *a; OFPACT_FOR_EACH_TYPE_FLATTENED (a, GROUP, ofpacts, ofpacts_len) { if (!ofproto_group_exists(ofproto, a->group_id)) { return OFPERR_OFPBAC_BAD_OUT_GROUP; } } return 0; } void ofproto_packet_out_uninit(struct ofproto_packet_out *opo) { dp_packet_delete(opo->packet); opo->packet = NULL; free(opo->flow); opo->flow = NULL; free(opo->ofpacts); opo->ofpacts = NULL; opo->ofpacts_len = 0; ovs_assert(!opo->aux); } /* Takes ownership of po->ofpacts, which must have been malloc'ed. */ static enum ofperr ofproto_packet_out_init(struct ofproto *ofproto, struct ofconn *ofconn, struct ofproto_packet_out *opo, const struct ofputil_packet_out *po) { enum ofperr error; if (ofp_to_u16(po->in_port) >= ofproto->max_ports && ofp_to_u16(po->in_port) < ofp_to_u16(OFPP_MAX)) { return OFPERR_OFPBRC_BAD_PORT; } /* Get payload. */ if (po->buffer_id != UINT32_MAX) { return OFPERR_OFPBRC_BUFFER_UNKNOWN; } /* Ensure that the L3 header is 32-bit aligned. */ opo->packet = dp_packet_clone_data_with_headroom(po->packet, po->packet_len, 2); /* Store struct flow. */ opo->flow = xmalloc(sizeof *opo->flow); flow_extract(opo->packet, opo->flow); opo->flow->in_port.ofp_port = po->in_port; /* Check actions like for flow mods. We pass a 'table_id' of 0 to * ofproto_check_consistency(), which isn't strictly correct because these * actions aren't in any table. This is OK as 'table_id' is only used to * check instructions (e.g., goto-table), which can't appear on the action * list of a packet-out. */ error = ofpacts_check_consistency(po->ofpacts, po->ofpacts_len, opo->flow, u16_to_ofp(ofproto->max_ports), 0, ofproto->n_tables, ofconn_get_protocol(ofconn)); if (error) { dp_packet_delete(opo->packet); free(opo->flow); return error; } opo->ofpacts = po->ofpacts; opo->ofpacts_len = po->ofpacts_len; opo->aux = NULL; return 0; } static enum ofperr ofproto_packet_out_start(struct ofproto *ofproto, struct ofproto_packet_out *opo) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; error = ofproto_check_ofpacts(ofproto, opo->ofpacts, opo->ofpacts_len); if (error) { return error; } return ofproto->ofproto_class->packet_xlate(ofproto, opo); } static void ofproto_packet_out_revert(struct ofproto *ofproto, struct ofproto_packet_out *opo) OVS_REQUIRES(ofproto_mutex) { ofproto->ofproto_class->packet_xlate_revert(ofproto, opo); } static void ofproto_packet_out_finish(struct ofproto *ofproto, struct ofproto_packet_out *opo) OVS_REQUIRES(ofproto_mutex) { ofproto->ofproto_class->packet_execute(ofproto, opo); } static enum ofperr handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofputil_packet_out po; struct ofproto_packet_out opo; uint64_t ofpacts_stub[1024 / 8]; struct ofpbuf ofpacts; enum ofperr error; COVERAGE_INC(ofproto_packet_out); error = reject_slave_controller(ofconn); if (error) { return error; } /* Decode message. */ ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); error = ofputil_decode_packet_out(&po, oh, &ofpacts); if (error) { ofpbuf_uninit(&ofpacts); return error; } po.ofpacts = ofpbuf_steal_data(&ofpacts); /* Move to heap. */ error = ofproto_packet_out_init(p, ofconn, &opo, &po); if (error) { free(po.ofpacts); return error; } ovs_mutex_lock(&ofproto_mutex); opo.version = p->tables_version; error = ofproto_packet_out_start(p, &opo); if (!error) { ofproto_packet_out_finish(p, &opo); } ovs_mutex_unlock(&ofproto_mutex); ofproto_packet_out_uninit(&opo); return error; } static enum ofperr handle_nxt_resume(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_packet_in_private pin; enum ofperr error; error = ofputil_decode_packet_in_private(oh, false, ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map, &pin, NULL, NULL); if (error) { return error; } error = (ofproto->ofproto_class->nxt_resume ? ofproto->ofproto_class->nxt_resume(ofproto, &pin) : OFPERR_NXR_NOT_SUPPORTED); ofputil_packet_in_private_destroy(&pin); return error; } static void update_port_config(struct ofconn *ofconn, struct ofport *port, enum ofputil_port_config config, enum ofputil_port_config mask) { enum ofputil_port_config toggle = (config ^ port->pp.config) & mask; if (toggle & OFPUTIL_PC_PORT_DOWN && (config & OFPUTIL_PC_PORT_DOWN ? netdev_turn_flags_off(port->netdev, NETDEV_UP, NULL) : netdev_turn_flags_on(port->netdev, NETDEV_UP, NULL))) { /* We tried to bring the port up or down, but it failed, so don't * update the "down" bit. */ toggle &= ~OFPUTIL_PC_PORT_DOWN; } if (toggle) { enum ofputil_port_config old_config = port->pp.config; port->pp.config ^= toggle; port->ofproto->ofproto_class->port_reconfigured(port, old_config); connmgr_send_port_status(port->ofproto->connmgr, ofconn, &port->pp, OFPPR_MODIFY); } } static enum ofperr port_mod_start(struct ofconn *ofconn, struct ofputil_port_mod *pm, struct ofport **port) { struct ofproto *p = ofconn_get_ofproto(ofconn); *port = ofproto_get_port(p, pm->port_no); if (!*port) { return OFPERR_OFPPMFC_BAD_PORT; } if (!eth_addr_equals((*port)->pp.hw_addr, pm->hw_addr)) { return OFPERR_OFPPMFC_BAD_HW_ADDR; } return 0; } static void port_mod_finish(struct ofconn *ofconn, struct ofputil_port_mod *pm, struct ofport *port) { update_port_config(ofconn, port, pm->config, pm->mask); if (pm->advertise) { netdev_set_advertisements(port->netdev, pm->advertise); } } static enum ofperr handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_port_mod pm; struct ofport *port; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_port_mod(oh, &pm, false); if (error) { return error; } error = port_mod_start(ofconn, &pm, &port); if (!error) { port_mod_finish(ofconn, &pm, port); } return error; } static enum ofperr handle_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { static const char *default_mfr_desc = "Nicira, Inc."; static const char *default_hw_desc = "Open vSwitch"; static const char *default_sw_desc = VERSION; static const char *default_serial_desc = "None"; static const char *default_dp_desc = "None"; struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_desc_stats *ods; struct ofpbuf *msg; msg = ofpraw_alloc_stats_reply(request, 0); ods = ofpbuf_put_zeros(msg, sizeof *ods); ovs_strlcpy(ods->mfr_desc, p->mfr_desc ? p->mfr_desc : default_mfr_desc, sizeof ods->mfr_desc); ovs_strlcpy(ods->hw_desc, p->hw_desc ? p->hw_desc : default_hw_desc, sizeof ods->hw_desc); ovs_strlcpy(ods->sw_desc, p->sw_desc ? p->sw_desc : default_sw_desc, sizeof ods->sw_desc); ovs_strlcpy(ods->serial_num, p->serial_desc ? p->serial_desc : default_serial_desc, sizeof ods->serial_num); ovs_strlcpy(ods->dp_desc, p->dp_desc ? p->dp_desc : default_dp_desc, sizeof ods->dp_desc); ofconn_send_reply(ofconn, msg); return 0; } static enum ofperr handle_table_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_table_features *features; struct ofputil_table_stats *stats; struct ofpbuf *reply; size_t i; query_tables(ofproto, &features, &stats); reply = ofputil_encode_table_stats_reply(request); for (i = 0; i < ofproto->n_tables; i++) { if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) { ofputil_append_table_stats_reply(reply, &stats[i], &features[i]); } } ofconn_send_reply(ofconn, reply); free(features); free(stats); return 0; } static enum ofperr handle_table_features_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf msg = ofpbuf_const_initializer(request, ntohs(request->length)); ofpraw_pull_assert(&msg); if (msg.size || ofpmp_more(request)) { return OFPERR_OFPTFFC_EPERM; } struct ofputil_table_features *features; query_tables(ofproto, &features, NULL); struct ovs_list replies; ofpmp_init(&replies, request); for (size_t i = 0; i < ofproto->n_tables; i++) { if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) { ofputil_append_table_features_reply(&features[i], &replies); } } ofconn_send_replies(ofconn, &replies); free(features); return 0; } /* Returns the vacancy of 'oftable', a number that ranges from 0 (if the table * is full) to 100 (if the table is empty). * * A table without a limit on flows is considered to be empty. */ static uint8_t oftable_vacancy(const struct oftable *t) { return (!t->max_flows ? 100 : t->n_flows >= t->max_flows ? 0 : (t->max_flows - t->n_flows) * 100.0 / t->max_flows); } static void query_table_desc__(struct ofputil_table_desc *td, struct ofproto *ofproto, uint8_t table_id) { const struct oftable *t = &ofproto->tables[table_id]; td->table_id = table_id; td->eviction = (t->eviction & EVICTION_OPENFLOW ? OFPUTIL_TABLE_EVICTION_ON : OFPUTIL_TABLE_EVICTION_OFF); td->eviction_flags = OFPROTO_EVICTION_FLAGS; td->vacancy = (t->vacancy_event ? OFPUTIL_TABLE_VACANCY_ON : OFPUTIL_TABLE_VACANCY_OFF); td->table_vacancy.vacancy_down = t->vacancy_down; td->table_vacancy.vacancy_up = t->vacancy_up; td->table_vacancy.vacancy = oftable_vacancy(t); } /* This function queries the database for dumping table-desc. */ static void query_tables_desc(struct ofproto *ofproto, struct ofputil_table_desc **descp) { struct ofputil_table_desc *table_desc; size_t i; table_desc = *descp = xcalloc(ofproto->n_tables, sizeof *table_desc); for (i = 0; i < ofproto->n_tables; i++) { struct ofputil_table_desc *td = &table_desc[i]; query_table_desc__(td, ofproto, i); } } /* Function to handle dump-table-desc request. */ static enum ofperr handle_table_desc_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_table_desc *table_desc; struct ovs_list replies; size_t i; query_tables_desc(ofproto, &table_desc); ofpmp_init(&replies, request); for (i = 0; i < ofproto->n_tables; i++) { if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) { ofputil_append_table_desc_reply(&table_desc[i], &replies, request->version); } } ofconn_send_replies(ofconn, &replies); free(table_desc); return 0; } /* This function determines and sends the vacancy event, based on the value * of current vacancy and threshold vacancy. If the current vacancy is less * than or equal to vacancy_down, vacancy up events must be enabled, and when * the current vacancy is greater or equal to vacancy_up, vacancy down events * must be enabled. */ static void send_table_status(struct ofproto *ofproto, uint8_t table_id) { struct oftable *t = &ofproto->tables[table_id]; if (!t->vacancy_event) { return; } uint8_t vacancy = oftable_vacancy(t); enum ofp14_table_reason event; if (vacancy < t->vacancy_down) { event = OFPTR_VACANCY_DOWN; } else if (vacancy > t->vacancy_up) { event = OFPTR_VACANCY_UP; } else { return; } if (event == t->vacancy_event) { struct ofputil_table_desc td; query_table_desc__(&td, ofproto, table_id); connmgr_send_table_status(ofproto->connmgr, &td, event); t->vacancy_event = (event == OFPTR_VACANCY_DOWN ? OFPTR_VACANCY_UP : OFPTR_VACANCY_DOWN); } } static void append_port_stat(struct ofport *port, struct ovs_list *replies) { struct ofputil_port_stats ops = { .port_no = port->pp.port_no }; calc_duration(port->created, time_msec(), &ops.duration_sec, &ops.duration_nsec); /* Intentionally ignore return value, since errors will set * 'stats' to all-1s, which is correct for OpenFlow, and * netdev_get_stats() will log errors. */ ofproto_port_get_stats(port, &ops.stats); ofputil_append_port_stat(replies, &ops); } static void handle_port_request(struct ofconn *ofconn, const struct ofp_header *request, ofp_port_t port_no, void (*cb)(struct ofport *, struct ovs_list *replies)) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofport *port; struct ovs_list replies; ofpmp_init(&replies, request); if (port_no != OFPP_ANY) { port = ofproto_get_port(ofproto, port_no); if (port) { cb(port, &replies); } } else { HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { cb(port, &replies); } } ofconn_send_replies(ofconn, &replies); } static enum ofperr handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { ofp_port_t port_no; enum ofperr error; error = ofputil_decode_port_stats_request(request, &port_no); if (!error) { handle_port_request(ofconn, request, port_no, append_port_stat); } return error; } static void append_port_desc(struct ofport *port, struct ovs_list *replies) { ofputil_append_port_desc_stats_reply(&port->pp, replies); } static enum ofperr handle_port_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { ofp_port_t port_no; enum ofperr error; error = ofputil_decode_port_desc_stats_request(request, &port_no); if (!error) { handle_port_request(ofconn, request, port_no, append_port_desc); } return error; } static uint32_t hash_cookie(ovs_be64 cookie) { return hash_uint64((OVS_FORCE uint64_t)cookie); } static void cookies_insert(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { hindex_insert(&ofproto->cookies, &rule->cookie_node, hash_cookie(rule->flow_cookie)); } static void cookies_remove(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { hindex_remove(&ofproto->cookies, &rule->cookie_node); } static void calc_duration(long long int start, long long int now, uint32_t *sec, uint32_t *nsec) { long long int msecs = now - start; *sec = msecs / 1000; *nsec = (msecs % 1000) * (1000 * 1000); } /* Checks whether 'table_id' is 0xff or a valid table ID in 'ofproto'. Returns * true if 'table_id' is OK, false otherwise. */ static bool check_table_id(const struct ofproto *ofproto, uint8_t table_id) { return table_id == OFPTT_ALL || table_id < ofproto->n_tables; } static struct oftable * next_visible_table(const struct ofproto *ofproto, uint8_t table_id) { struct oftable *table; for (table = &ofproto->tables[table_id]; table < &ofproto->tables[ofproto->n_tables]; table++) { if (!(table->flags & OFTABLE_HIDDEN)) { return table; } } return NULL; } static struct oftable * first_matching_table(const struct ofproto *ofproto, uint8_t table_id) { if (table_id == 0xff) { return next_visible_table(ofproto, 0); } else if (table_id < ofproto->n_tables) { return &ofproto->tables[table_id]; } else { return NULL; } } static struct oftable * next_matching_table(const struct ofproto *ofproto, const struct oftable *table, uint8_t table_id) { return (table_id == 0xff ? next_visible_table(ofproto, (table - ofproto->tables) + 1) : NULL); } /* Assigns TABLE to each oftable, in turn, that matches TABLE_ID in OFPROTO: * * - If TABLE_ID is 0xff, this iterates over every classifier table in * OFPROTO, skipping tables marked OFTABLE_HIDDEN. * * - If TABLE_ID is the number of a table in OFPROTO, then the loop iterates * only once, for that table. (This can be used to access tables marked * OFTABLE_HIDDEN.) * * - Otherwise, TABLE_ID isn't valid for OFPROTO, so the loop won't be * entered at all. (Perhaps you should have validated TABLE_ID with * check_table_id().) * * All parameters are evaluated multiple times. */ #define FOR_EACH_MATCHING_TABLE(TABLE, TABLE_ID, OFPROTO) \ for ((TABLE) = first_matching_table(OFPROTO, TABLE_ID); \ (TABLE) != NULL; \ (TABLE) = next_matching_table(OFPROTO, TABLE, TABLE_ID)) /* Initializes 'criteria' in a straightforward way based on the other * parameters. * * By default, the criteria include flows that are read-only, on the assumption * that the collected flows won't be modified. Call rule_criteria_require_rw() * if flows will be modified. * * For "loose" matching, the 'priority' parameter is unimportant and may be * supplied as 0. */ static void rule_criteria_init(struct rule_criteria *criteria, uint8_t table_id, const struct match *match, int priority, ovs_version_t version, ovs_be64 cookie, ovs_be64 cookie_mask, ofp_port_t out_port, uint32_t out_group) { criteria->table_id = table_id; cls_rule_init(&criteria->cr, match, priority); criteria->version = version; criteria->cookie = cookie; criteria->cookie_mask = cookie_mask; criteria->out_port = out_port; criteria->out_group = out_group; /* We ordinarily want to skip hidden rules, but there has to be a way for * code internal to OVS to modify and delete them, so if the criteria * specify a priority that can only be for a hidden flow, then allow hidden * rules to be selected. (This doesn't allow OpenFlow clients to meddle * with hidden flows because OpenFlow uses only a 16-bit field to specify * priority.) */ criteria->include_hidden = priority > UINT16_MAX; /* We assume that the criteria are being used to collect flows for reading * but not modification. Thus, we should collect read-only flows. */ criteria->include_readonly = true; } /* By default, criteria initialized by rule_criteria_init() will match flows * that are read-only, on the assumption that the collected flows won't be * modified. Call this function to match only flows that are be modifiable. * * Specify 'can_write_readonly' as false in ordinary circumstances, true if the * caller has special privileges that allow it to modify even "read-only" * flows. */ static void rule_criteria_require_rw(struct rule_criteria *criteria, bool can_write_readonly) { criteria->include_readonly = can_write_readonly; } static void rule_criteria_destroy(struct rule_criteria *criteria) { cls_rule_destroy(&criteria->cr); criteria->version = OVS_VERSION_NOT_REMOVED; /* Mark as destroyed. */ } /* Schedules postponed removal of rules, destroys 'rules'. */ static void remove_rules_postponed(struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { if (rule_collection_n(rules) > 0) { if (rule_collection_n(rules) == 1) { ovsrcu_postpone(remove_rule_rcu, rule_collection_rules(rules)[0]); rule_collection_init(rules); } else { ovsrcu_postpone(remove_rules_rcu, rule_collection_detach(rules)); } } } /* Schedules postponed removal of groups, destroys 'groups'. */ static void remove_groups_postponed(struct group_collection *groups) OVS_REQUIRES(ofproto_mutex) { if (group_collection_n(groups) > 0) { if (group_collection_n(groups) == 1) { ovsrcu_postpone(remove_group_rcu, group_collection_groups(groups)[0]); group_collection_init(groups); } else { ovsrcu_postpone(remove_groups_rcu, group_collection_detach(groups)); } } } /* Checks whether 'rule' matches 'c' and, if so, adds it to 'rules'. This * function verifies most of the criteria in 'c' itself, but the caller must * check 'c->cr' itself. * * Rules that have already been marked for removal are not collected. * * Increments '*n_readonly' if 'rule' wasn't added because it's read-only (and * 'c' only includes modifiable rules). */ static void collect_rule(struct rule *rule, const struct rule_criteria *c, struct rule_collection *rules, size_t *n_readonly) OVS_REQUIRES(ofproto_mutex) { if ((c->table_id == rule->table_id || c->table_id == 0xff) && ofproto_rule_has_out_port(rule, c->out_port) && ofproto_rule_has_out_group(rule, c->out_group) && !((rule->flow_cookie ^ c->cookie) & c->cookie_mask) && (!rule_is_hidden(rule) || c->include_hidden) && cls_rule_visible_in_version(&rule->cr, c->version)) { /* Rule matches all the criteria... */ if (!rule_is_readonly(rule) || c->include_readonly) { /* ...add it. */ rule_collection_add(rules, rule); } else { /* ...except it's read-only. */ ++*n_readonly; } } } /* Searches 'ofproto' for rules that match the criteria in 'criteria'. Matches * on classifiers rules are done in the "loose" way required for OpenFlow * OFPFC_MODIFY and OFPFC_DELETE requests. Puts the selected rules on list * 'rules'. * * Returns 0 on success, otherwise an OpenFlow error code. */ static enum ofperr collect_rules_loose(struct ofproto *ofproto, const struct rule_criteria *criteria, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct oftable *table; enum ofperr error = 0; size_t n_readonly = 0; rule_collection_init(rules); if (!check_table_id(ofproto, criteria->table_id)) { error = OFPERR_OFPBRC_BAD_TABLE_ID; goto exit; } if (criteria->cookie_mask == OVS_BE64_MAX) { struct rule *rule; HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node, hash_cookie(criteria->cookie), &ofproto->cookies) { if (cls_rule_is_loose_match(&rule->cr, &criteria->cr.match)) { collect_rule(rule, criteria, rules, &n_readonly); } } } else { FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) { struct rule *rule; CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &criteria->cr, criteria->version) { collect_rule(rule, criteria, rules, &n_readonly); } } } exit: if (!error && !rule_collection_n(rules) && n_readonly) { /* We didn't find any rules to modify. We did find some read-only * rules that we're not allowed to modify, so report that. */ error = OFPERR_OFPBRC_EPERM; } if (error) { rule_collection_destroy(rules); } return error; } /* Searches 'ofproto' for rules that match the criteria in 'criteria'. Matches * on classifiers rules are done in the "strict" way required for OpenFlow * OFPFC_MODIFY_STRICT and OFPFC_DELETE_STRICT requests. Puts the selected * rules on list 'rules'. * * Returns 0 on success, otherwise an OpenFlow error code. */ static enum ofperr collect_rules_strict(struct ofproto *ofproto, const struct rule_criteria *criteria, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct oftable *table; size_t n_readonly = 0; enum ofperr error = 0; rule_collection_init(rules); if (!check_table_id(ofproto, criteria->table_id)) { error = OFPERR_OFPBRC_BAD_TABLE_ID; goto exit; } if (criteria->cookie_mask == OVS_BE64_MAX) { struct rule *rule; HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node, hash_cookie(criteria->cookie), &ofproto->cookies) { if (cls_rule_equal(&rule->cr, &criteria->cr)) { collect_rule(rule, criteria, rules, &n_readonly); } } } else { FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) { struct rule *rule; rule = rule_from_cls_rule(classifier_find_rule_exactly( &table->cls, &criteria->cr, criteria->version)); if (rule) { collect_rule(rule, criteria, rules, &n_readonly); } } } exit: if (!error && !rule_collection_n(rules) && n_readonly) { /* We didn't find any rules to modify. We did find some read-only * rules that we're not allowed to modify, so report that. */ error = OFPERR_OFPBRC_EPERM; } if (error) { rule_collection_destroy(rules); } return error; } /* Returns 'age_ms' (a duration in milliseconds), converted to seconds and * forced into the range of a uint16_t. */ static int age_secs(long long int age_ms) { return (age_ms < 0 ? 0 : age_ms >= UINT16_MAX * 1000 ? UINT16_MAX : (unsigned int) age_ms / 1000); } static enum ofperr handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *request) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_flow_stats_request fsr; struct rule_criteria criteria; struct rule_collection rules; struct ovs_list replies; enum ofperr error; error = ofputil_decode_flow_stats_request(&fsr, request, ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map); if (error) { return error; } rule_criteria_init(&criteria, fsr.table_id, &fsr.match, 0, OVS_VERSION_MAX, fsr.cookie, fsr.cookie_mask, fsr.out_port, fsr.out_group); ovs_mutex_lock(&ofproto_mutex); error = collect_rules_loose(ofproto, &criteria, &rules); rule_criteria_destroy(&criteria); if (!error) { rule_collection_ref(&rules); } ovs_mutex_unlock(&ofproto_mutex); if (error) { return error; } ofpmp_init(&replies, request); struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, &rules) { long long int now = time_msec(); struct ofputil_flow_stats fs; long long int created, used, modified; const struct rule_actions *actions; enum ofputil_flow_mod_flags flags; ovs_mutex_lock(&rule->mutex); fs.cookie = rule->flow_cookie; fs.idle_timeout = rule->idle_timeout; fs.hard_timeout = rule->hard_timeout; fs.importance = rule->importance; created = rule->created; modified = rule->modified; actions = rule_get_actions(rule); flags = rule->flags; ovs_mutex_unlock(&rule->mutex); ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count, &fs.byte_count, &used); minimatch_expand(&rule->cr.match, &fs.match); fs.table_id = rule->table_id; calc_duration(created, now, &fs.duration_sec, &fs.duration_nsec); fs.priority = rule->cr.priority; fs.idle_age = age_secs(now - used); fs.hard_age = age_secs(now - modified); fs.ofpacts = actions->ofpacts; fs.ofpacts_len = actions->ofpacts_len; fs.flags = flags; ofputil_append_flow_stats_reply(&fs, &replies, ofproto_get_tun_tab(ofproto)); } rule_collection_unref(&rules); rule_collection_destroy(&rules); ofconn_send_replies(ofconn, &replies); return 0; } static void flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) { uint64_t packet_count, byte_count; const struct rule_actions *actions; long long int created, used; rule->ofproto->ofproto_class->rule_get_stats(rule, &packet_count, &byte_count, &used); ovs_mutex_lock(&rule->mutex); actions = rule_get_actions(rule); created = rule->created; ovs_mutex_unlock(&rule->mutex); if (rule->table_id != 0) { ds_put_format(results, "table_id=%"PRIu8", ", rule->table_id); } ds_put_format(results, "duration=%llds, ", (time_msec() - created) / 1000); ds_put_format(results, "n_packets=%"PRIu64", ", packet_count); ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count); cls_rule_format(&rule->cr, ofproto_get_tun_tab(ofproto), results); ds_put_char(results, ','); ds_put_cstr(results, "actions="); ofpacts_format(actions->ofpacts, actions->ofpacts_len, results); ds_put_cstr(results, "\n"); } /* Adds a pretty-printed description of all flows to 'results', including * hidden flows (e.g., set up by in-band control). */ void ofproto_get_all_flows(struct ofproto *p, struct ds *results) { struct oftable *table; OFPROTO_FOR_EACH_TABLE (table, p) { struct rule *rule; CLS_FOR_EACH (rule, cr, &table->cls) { flow_stats_ds(p, rule, results); } } } /* Obtains the NetFlow engine type and engine ID for 'ofproto' into * '*engine_type' and '*engine_id', respectively. */ void ofproto_get_netflow_ids(const struct ofproto *ofproto, uint8_t *engine_type, uint8_t *engine_id) { ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id); } /* Checks the status change of CFM on 'ofport'. * * Returns true if 'ofproto_class' does not support 'cfm_status_changed'. */ bool ofproto_port_cfm_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->cfm_status_changed ? ofproto->ofproto_class->cfm_status_changed(ofport) : true); } /* Checks the status of CFM configured on 'ofp_port' within 'ofproto'. * Returns 0 if the port's CFM status was successfully stored into * '*status'. Returns positive errno if the port did not have CFM * configured. * * The caller must provide and own '*status', and must free 'status->rmps'. * '*status' is indeterminate if the return value is non-zero. */ int ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port, struct cfm_status *status) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->get_cfm_status ? ofproto->ofproto_class->get_cfm_status(ofport, status) : EOPNOTSUPP); } static enum ofperr handle_aggregate_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_flow_stats_request request; struct ofputil_aggregate_stats stats; bool unknown_packets, unknown_bytes; struct rule_criteria criteria; struct rule_collection rules; struct ofpbuf *reply; enum ofperr error; error = ofputil_decode_flow_stats_request(&request, oh, ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map); if (error) { return error; } rule_criteria_init(&criteria, request.table_id, &request.match, 0, OVS_VERSION_MAX, request.cookie, request.cookie_mask, request.out_port, request.out_group); ovs_mutex_lock(&ofproto_mutex); error = collect_rules_loose(ofproto, &criteria, &rules); rule_criteria_destroy(&criteria); if (!error) { rule_collection_ref(&rules); } ovs_mutex_unlock(&ofproto_mutex); if (error) { return error; } memset(&stats, 0, sizeof stats); unknown_packets = unknown_bytes = false; struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, &rules) { uint64_t packet_count; uint64_t byte_count; long long int used; ofproto->ofproto_class->rule_get_stats(rule, &packet_count, &byte_count, &used); if (packet_count == UINT64_MAX) { unknown_packets = true; } else { stats.packet_count += packet_count; } if (byte_count == UINT64_MAX) { unknown_bytes = true; } else { stats.byte_count += byte_count; } stats.flow_count++; } if (unknown_packets) { stats.packet_count = UINT64_MAX; } if (unknown_bytes) { stats.byte_count = UINT64_MAX; } rule_collection_unref(&rules); rule_collection_destroy(&rules); reply = ofputil_encode_aggregate_stats_reply(&stats, oh); ofconn_send_reply(ofconn, reply); return 0; } struct queue_stats_cbdata { struct ofport *ofport; struct ovs_list replies; long long int now; }; static void put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id, const struct netdev_queue_stats *stats) { struct ofputil_queue_stats oqs; oqs.port_no = cbdata->ofport->pp.port_no; oqs.queue_id = queue_id; oqs.tx_bytes = stats->tx_bytes; oqs.tx_packets = stats->tx_packets; oqs.tx_errors = stats->tx_errors; if (stats->created != LLONG_MIN) { calc_duration(stats->created, cbdata->now, &oqs.duration_sec, &oqs.duration_nsec); } else { oqs.duration_sec = oqs.duration_nsec = UINT32_MAX; } ofputil_append_queue_stat(&cbdata->replies, &oqs); } static void handle_queue_stats_dump_cb(uint32_t queue_id, struct netdev_queue_stats *stats, void *cbdata_) { struct queue_stats_cbdata *cbdata = cbdata_; put_queue_stats(cbdata, queue_id, stats); } static enum ofperr handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id, struct queue_stats_cbdata *cbdata) { cbdata->ofport = port; if (queue_id == OFPQ_ALL) { netdev_dump_queue_stats(port->netdev, handle_queue_stats_dump_cb, cbdata); } else { struct netdev_queue_stats stats; if (!netdev_get_queue_stats(port->netdev, queue_id, &stats)) { put_queue_stats(cbdata, queue_id, &stats); } else { return OFPERR_OFPQOFC_BAD_QUEUE; } } return 0; } static enum ofperr handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *rq) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct queue_stats_cbdata cbdata; struct ofport *port; enum ofperr error; struct ofputil_queue_stats_request oqsr; COVERAGE_INC(ofproto_queue_req); ofpmp_init(&cbdata.replies, rq); cbdata.now = time_msec(); error = ofputil_decode_queue_stats_request(rq, &oqsr); if (error) { return error; } if (oqsr.port_no == OFPP_ANY) { error = OFPERR_OFPQOFC_BAD_QUEUE; HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { if (!handle_queue_stats_for_port(port, oqsr.queue_id, &cbdata)) { error = 0; } } } else { port = ofproto_get_port(ofproto, oqsr.port_no); error = (port ? handle_queue_stats_for_port(port, oqsr.queue_id, &cbdata) : OFPERR_OFPQOFC_BAD_PORT); } if (!error) { ofconn_send_replies(ofconn, &cbdata.replies); } else { ofpbuf_list_delete(&cbdata.replies); } return error; } static enum ofperr evict_rules_from_table(struct oftable *table) OVS_REQUIRES(ofproto_mutex) { enum ofperr error = 0; struct rule_collection rules; unsigned int count = table->n_flows; unsigned int max_flows = table->max_flows; rule_collection_init(&rules); while (count-- > max_flows) { struct rule *rule; if (!choose_rule_to_evict(table, &rule)) { error = OFPERR_OFPFMFC_TABLE_FULL; break; } else { eviction_group_remove_rule(rule); rule_collection_add(&rules, rule); } } delete_flows__(&rules, OFPRR_EVICTION, NULL); return error; } static void get_conjunctions(const struct ofputil_flow_mod *fm, struct cls_conjunction **conjsp, size_t *n_conjsp) { struct cls_conjunction *conjs = NULL; int n_conjs = 0; const struct ofpact *ofpact; OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) { if (ofpact->type == OFPACT_CONJUNCTION) { n_conjs++; } else if (ofpact->type != OFPACT_NOTE) { /* "conjunction" may appear with "note" actions but not with any * other type of actions. */ ovs_assert(!n_conjs); break; } } if (n_conjs) { int i = 0; conjs = xzalloc(n_conjs * sizeof *conjs); OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) { if (ofpact->type == OFPACT_CONJUNCTION) { struct ofpact_conjunction *oc = ofpact_get_CONJUNCTION(ofpact); conjs[i].clause = oc->clause; conjs[i].n_clauses = oc->n_clauses; conjs[i].id = oc->id; i++; } } } *conjsp = conjs; *n_conjsp = n_conjs; } /* add_flow_init(), add_flow_start(), add_flow_revert(), and add_flow_finish() * implement OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT * in which no matching flow already exists in the flow table. * * add_flow_init() creates a new flow according to 'fm' and stores it to 'ofm' * for later reference. If the flow replaces other flow, it will be updated to * match modify semantics later by add_flow_start() (by calling * replace_rule_start()). * * Returns 0 on success, or an OpenFlow error code on failure. * * On successful return the caller must complete the operation by calling * add_flow_start(), and if that succeeds, then either add_flow_finish(), or * add_flow_revert() if the operation needs to be reverted due to a later * failure. */ static enum ofperr add_flow_init(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { struct oftable *table; struct cls_rule cr; uint8_t table_id; enum ofperr error; if (!check_table_id(ofproto, fm->table_id)) { return OFPERR_OFPBRC_BAD_TABLE_ID; } /* Pick table. */ if (fm->table_id == 0xff) { if (ofproto->ofproto_class->rule_choose_table) { error = ofproto->ofproto_class->rule_choose_table(ofproto, &fm->match, &table_id); if (error) { return error; } ovs_assert(table_id < ofproto->n_tables); } else { table_id = 0; } } else if (fm->table_id < ofproto->n_tables) { table_id = fm->table_id; } else { return OFPERR_OFPBRC_BAD_TABLE_ID; } table = &ofproto->tables[table_id]; if (table->flags & OFTABLE_READONLY && !(fm->flags & OFPUTIL_FF_NO_READONLY)) { return OFPERR_OFPBRC_EPERM; } if (!(fm->flags & OFPUTIL_FF_HIDDEN_FIELDS) && !match_has_default_hidden_fields(&fm->match)) { VLOG_WARN_RL(&rl, "%s: (add_flow) only internal flows can set " "non-default values to hidden fields", ofproto->name); return OFPERR_OFPBRC_EPERM; } if (!ofm->temp_rule) { cls_rule_init(&cr, &fm->match, fm->priority); /* Allocate new rule. Destroys 'cr'. */ error = ofproto_rule_create(ofproto, &cr, table - ofproto->tables, fm->new_cookie, fm->idle_timeout, fm->hard_timeout, fm->flags, fm->importance, fm->ofpacts, fm->ofpacts_len, fm->match.flow.tunnel.metadata.present.map, fm->ofpacts_tlv_bitmap, &ofm->temp_rule); if (error) { return error; } get_conjunctions(fm, &ofm->conjs, &ofm->n_conjs); } return 0; } /* ofm->temp_rule is consumed only in the successful case. */ static enum ofperr add_flow_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *old_rule = NULL; struct rule *new_rule = ofm->temp_rule; const struct rule_actions *actions = rule_get_actions(new_rule); struct oftable *table = &ofproto->tables[new_rule->table_id]; enum ofperr error; /* Must check actions while holding ofproto_mutex to avoid a race. */ error = ofproto_check_ofpacts(ofproto, actions->ofpacts, actions->ofpacts_len); if (error) { return error; } /* Check for the existence of an identical rule. * This will not return rules earlier marked for removal. */ old_rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &new_rule->cr, ofm->version)); if (!old_rule) { /* Check for overlap, if requested. */ if (new_rule->flags & OFPUTIL_FF_CHECK_OVERLAP && classifier_rule_overlaps(&table->cls, &new_rule->cr, ofm->version)) { return OFPERR_OFPFMFC_OVERLAP; } /* If necessary, evict an existing rule to clear out space. */ if (table->n_flows >= table->max_flows) { if (!choose_rule_to_evict(table, &old_rule)) { return OFPERR_OFPFMFC_TABLE_FULL; } eviction_group_remove_rule(old_rule); /* Marks 'old_rule' as an evicted rule rather than replaced rule. */ old_rule->removed_reason = OFPRR_EVICTION; } } else { ofm->modify_cookie = true; } if (old_rule) { rule_collection_add(&ofm->old_rules, old_rule); } /* Take ownership of the temp_rule. */ rule_collection_add(&ofm->new_rules, new_rule); ofm->temp_rule = NULL; replace_rule_start(ofproto, ofm, old_rule, new_rule); return 0; } /* Revert the effects of add_flow_start(). */ static void add_flow_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *old_rule = rule_collection_n(&ofm->old_rules) ? rule_collection_rules(&ofm->old_rules)[0] : NULL; struct rule *new_rule = rule_collection_rules(&ofm->new_rules)[0]; replace_rule_revert(ofproto, old_rule, new_rule); } /* To be called after version bump. */ static void add_flow_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { struct rule *old_rule = rule_collection_n(&ofm->old_rules) ? rule_collection_rules(&ofm->old_rules)[0] : NULL; struct rule *new_rule = rule_collection_rules(&ofm->new_rules)[0]; struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies); replace_rule_finish(ofproto, ofm, req, old_rule, new_rule, &dead_cookies); learned_cookies_flush(ofproto, &dead_cookies); if (old_rule) { ovsrcu_postpone(remove_rule_rcu, old_rule); } else { ofmonitor_report(ofproto->connmgr, new_rule, NXFME_ADDED, 0, req ? req->ofconn : NULL, req ? req->request->xid : 0, NULL); /* Send Vacancy Events for OF1.4+. */ send_table_status(ofproto, new_rule->table_id); } } /* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */ /* Create a new rule. Note that the rule is NOT inserted into a any data * structures yet. Takes ownership of 'cr'. Only assigns '*new_rule' if * successful. */ static enum ofperr ofproto_rule_create(struct ofproto *ofproto, struct cls_rule *cr, uint8_t table_id, ovs_be64 new_cookie, uint16_t idle_timeout, uint16_t hard_timeout, enum ofputil_flow_mod_flags flags, uint16_t importance, const struct ofpact *ofpacts, size_t ofpacts_len, uint64_t match_tlv_bitmap, uint64_t ofpacts_tlv_bitmap, struct rule **new_rule) OVS_NO_THREAD_SAFETY_ANALYSIS { struct rule *rule; enum ofperr error; /* Allocate new rule. */ rule = ofproto->ofproto_class->rule_alloc(); if (!rule) { cls_rule_destroy(cr); VLOG_WARN_RL(&rl, "%s: failed to allocate a rule.", ofproto->name); return OFPERR_OFPFMFC_UNKNOWN; } /* Initialize base state. */ *CONST_CAST(struct ofproto **, &rule->ofproto) = ofproto; cls_rule_move(CONST_CAST(struct cls_rule *, &rule->cr), cr); ovs_refcount_init(&rule->ref_count); ovs_mutex_init(&rule->mutex); ovs_mutex_lock(&rule->mutex); *CONST_CAST(ovs_be64 *, &rule->flow_cookie) = new_cookie; rule->created = rule->modified = time_msec(); rule->idle_timeout = idle_timeout; rule->hard_timeout = hard_timeout; *CONST_CAST(uint16_t *, &rule->importance) = importance; rule->removed_reason = OVS_OFPRR_NONE; *CONST_CAST(uint8_t *, &rule->table_id) = table_id; rule->flags = flags & OFPUTIL_FF_STATE; *CONST_CAST(const struct rule_actions **, &rule->actions) = rule_actions_create(ofpacts, ofpacts_len); ovs_list_init(&rule->meter_list_node); rule->eviction_group = NULL; rule->monitor_flags = 0; rule->add_seqno = 0; rule->modify_seqno = 0; ovs_list_init(&rule->expirable); ovs_mutex_unlock(&rule->mutex); /* Construct rule, initializing derived state. */ error = ofproto->ofproto_class->rule_construct(rule); if (error) { ofproto_rule_destroy__(rule); return error; } rule->state = RULE_INITIALIZED; rule->match_tlv_bitmap = match_tlv_bitmap; rule->ofpacts_tlv_bitmap = ofpacts_tlv_bitmap; mf_vl_mff_ref(&rule->ofproto->vl_mff_map, match_tlv_bitmap); mf_vl_mff_ref(&rule->ofproto->vl_mff_map, ofpacts_tlv_bitmap); *new_rule = rule; return 0; } /* Initialize 'ofm' for a learn action. If the rule already existed, reference * to that rule is taken, otherwise a new rule is created. 'ofm' keeps the * rule reference in both. This does not take the global 'ofproto_mutex'. */ enum ofperr ofproto_flow_mod_init_for_learn(struct ofproto *ofproto, const struct ofputil_flow_mod *fm, struct ofproto_flow_mod *ofm) OVS_EXCLUDED(ofproto_mutex) { /* Reject flow mods that do not look like they were generated by a learn * action. */ if (fm->command != OFPFC_MODIFY_STRICT || fm->table_id == OFPTT_ALL || fm->flags & OFPUTIL_FF_RESET_COUNTS || fm->buffer_id != UINT32_MAX) { return OFPERR_OFPFMFC_UNKNOWN; } /* Check if the rule already exists, and we can get a reference to it. */ struct oftable *table = &ofproto->tables[fm->table_id]; struct rule *rule; rule = rule_from_cls_rule(classifier_find_match_exactly( &table->cls, &fm->match, fm->priority, OVS_VERSION_MAX)); if (rule) { /* Check if the rule's attributes match as well. */ const struct rule_actions *actions; ovs_mutex_lock(&rule->mutex); actions = rule_get_actions(rule); if (rule->idle_timeout == fm->idle_timeout && rule->hard_timeout == fm->hard_timeout && rule->importance == fm->importance && rule->flags == (fm->flags & OFPUTIL_FF_STATE) && (!fm->modify_cookie || (fm->new_cookie == rule->flow_cookie)) && ofpacts_equal(fm->ofpacts, fm->ofpacts_len, actions->ofpacts, actions->ofpacts_len)) { /* Rule already exists and need not change, except for the modified * timestamp. Get a reference to the existing rule. */ ovs_mutex_unlock(&rule->mutex); if (!ofproto_rule_try_ref(rule)) { rule = NULL; /* Pretend it did not exist. */ } } else { ovs_mutex_unlock(&rule->mutex); rule = NULL; } } return ofproto_flow_mod_init(ofproto, ofm, fm, rule); } enum ofperr ofproto_flow_mod_learn_refresh(struct ofproto_flow_mod *ofm) { enum ofperr error = 0; /* ofm->temp_rule is our reference to the learned rule. We have a * reference to an existing rule, if it already was in the classifier, * otherwise we may have a fresh rule that we need to insert. */ struct rule *rule = ofm->temp_rule; if (!rule) { return OFPERR_OFPFMFC_UNKNOWN; } /* Create a new rule if the current one has been removed from the * classifier. We need to do this since RCU does not allow a current rule * to be reinserted before all threads have quiesced. * * It is possible that the rule is removed asynchronously, e.g., right * after we have read the 'rule->state' below. In this case the next time * this function is executed the rule will be reinstated. */ if (rule->state == RULE_REMOVED) { struct cls_rule cr; cls_rule_clone(&cr, &rule->cr); ovs_mutex_lock(&rule->mutex); error = ofproto_rule_create(rule->ofproto, &cr, rule->table_id, rule->flow_cookie, rule->idle_timeout, rule->hard_timeout, rule->flags, rule->importance, rule->actions->ofpacts, rule->actions->ofpacts_len, rule->match_tlv_bitmap, rule->ofpacts_tlv_bitmap, &ofm->temp_rule); ovs_mutex_unlock(&rule->mutex); if (!error) { ofproto_rule_unref(rule); /* Release old reference. */ } } else { /* Refresh the existing rule. */ ovs_mutex_lock(&rule->mutex); rule->modified = time_msec(); ovs_mutex_unlock(&rule->mutex); } return error; } enum ofperr ofproto_flow_mod_learn_start(struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *rule = ofm->temp_rule; /* ofproto_flow_mod_start() consumes the reference, so we * take a new one. */ ofproto_rule_ref(rule); enum ofperr error = ofproto_flow_mod_start(rule->ofproto, ofm); ofm->temp_rule = rule; return error; } void ofproto_flow_mod_learn_revert(struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *rule = rule_collection_rules(&ofm->new_rules)[0]; ofproto_flow_mod_revert(rule->ofproto, ofm); } void ofproto_flow_mod_learn_finish(struct ofproto_flow_mod *ofm, struct ofproto *orig_ofproto) OVS_REQUIRES(ofproto_mutex) { struct rule *rule = rule_collection_rules(&ofm->new_rules)[0]; /* If learning on a different bridge, must bump its version * number and flush connmgr afterwards. */ if (rule->ofproto != orig_ofproto) { ofproto_bump_tables_version(rule->ofproto); } ofproto_flow_mod_finish(rule->ofproto, ofm, NULL); if (rule->ofproto != orig_ofproto) { ofmonitor_flush(rule->ofproto->connmgr); } } /* Refresh 'ofm->temp_rule', for which the caller holds a reference, if already * in the classifier, insert it otherwise. If the rule has already been * removed from the classifier, a new rule is created using 'ofm->temp_rule' as * a template and the reference to the old 'ofm->temp_rule' is freed. If * 'keep_ref' is true, then a reference to the current rule is held, otherwise * it is released and 'ofm->temp_rule' is set to NULL. * * Caller needs to be the exclusive owner of 'ofm' as it is being manipulated * during the call. */ enum ofperr ofproto_flow_mod_learn(struct ofproto_flow_mod *ofm, bool keep_ref) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error = ofproto_flow_mod_learn_refresh(ofm); struct rule *rule = ofm->temp_rule; /* Do we need to insert the rule? */ if (!error && rule->state == RULE_INITIALIZED) { ovs_mutex_lock(&ofproto_mutex); ofm->version = rule->ofproto->tables_version + 1; error = ofproto_flow_mod_learn_start(ofm); if (!error) { ofproto_flow_mod_learn_finish(ofm, NULL); } ovs_mutex_unlock(&ofproto_mutex); } if (!keep_ref) { ofproto_rule_unref(rule); ofm->temp_rule = NULL; } return error; } static void replace_rule_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, struct rule *old_rule, struct rule *new_rule) { struct oftable *table = &ofproto->tables[new_rule->table_id]; /* 'old_rule' may be either an evicted rule or replaced rule. */ if (old_rule) { /* Copy values from old rule for modify semantics. */ if (old_rule->removed_reason != OFPRR_EVICTION) { bool change_cookie = (ofm->modify_cookie && new_rule->flow_cookie != OVS_BE64_MAX && new_rule->flow_cookie != old_rule->flow_cookie); ovs_mutex_lock(&new_rule->mutex); ovs_mutex_lock(&old_rule->mutex); if (ofm->command != OFPFC_ADD) { new_rule->idle_timeout = old_rule->idle_timeout; new_rule->hard_timeout = old_rule->hard_timeout; *CONST_CAST(uint16_t *, &new_rule->importance) = old_rule->importance; new_rule->flags = old_rule->flags; new_rule->created = old_rule->created; } if (!change_cookie) { *CONST_CAST(ovs_be64 *, &new_rule->flow_cookie) = old_rule->flow_cookie; } ovs_mutex_unlock(&old_rule->mutex); ovs_mutex_unlock(&new_rule->mutex); } /* Mark the old rule for removal in the next version. */ cls_rule_make_invisible_in_version(&old_rule->cr, ofm->version); /* Remove the old rule from data structures. */ ofproto_rule_remove__(ofproto, old_rule); } else { table->n_flows++; } /* Insert flow to ofproto data structures, so that later flow_mods may * relate to it. This is reversible, in case later errors require this to * be reverted. */ ofproto_rule_insert__(ofproto, new_rule); /* Make the new rule visible for classifier lookups only from the next * version. */ classifier_insert(&table->cls, &new_rule->cr, ofm->version, ofm->conjs, ofm->n_conjs); } static void replace_rule_revert(struct ofproto *ofproto, struct rule *old_rule, struct rule *new_rule) { struct oftable *table = &ofproto->tables[new_rule->table_id]; if (old_rule) { if (old_rule->removed_reason == OFPRR_EVICTION) { /* Revert the eviction. */ eviction_group_add_rule(old_rule); } /* Restore the old rule to data structures. */ ofproto_rule_insert__(ofproto, old_rule); /* Restore the original visibility of the old rule. */ cls_rule_restore_visibility(&old_rule->cr); } else { /* Restore table's rule count. */ table->n_flows--; } /* Remove the new rule immediately. It was never visible to lookups. */ if (!classifier_remove(&table->cls, &new_rule->cr)) { OVS_NOT_REACHED(); } ofproto_rule_remove__(ofproto, new_rule); ofproto_rule_unref(new_rule); } /* Adds the 'new_rule', replacing the 'old_rule'. */ static void replace_rule_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req, struct rule *old_rule, struct rule *new_rule, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { struct rule *replaced_rule; replaced_rule = (old_rule && old_rule->removed_reason != OFPRR_EVICTION) ? old_rule : NULL; /* Insert the new flow to the ofproto provider. A non-NULL 'replaced_rule' * is a duplicate rule the 'new_rule' is replacing. The provider should * link the packet and byte counts from the old rule to the new one if * 'modify_keep_counts' is 'true'. The 'replaced_rule' will be deleted * right after this call. */ ofproto->ofproto_class->rule_insert(new_rule, replaced_rule, ofm->modify_keep_counts); learned_cookies_inc(ofproto, rule_get_actions(new_rule)); if (old_rule) { const struct rule_actions *old_actions = rule_get_actions(old_rule); const struct rule_actions *new_actions = rule_get_actions(new_rule); learned_cookies_dec(ofproto, old_actions, dead_cookies); if (replaced_rule) { enum nx_flow_update_event event = ofm->command == OFPFC_ADD ? NXFME_ADDED : NXFME_MODIFIED; bool changed_cookie = (new_rule->flow_cookie != old_rule->flow_cookie); bool changed_actions = !ofpacts_equal(new_actions->ofpacts, new_actions->ofpacts_len, old_actions->ofpacts, old_actions->ofpacts_len); if (event != NXFME_MODIFIED || changed_actions || changed_cookie) { ofmonitor_report(ofproto->connmgr, new_rule, event, 0, req ? req->ofconn : NULL, req ? req->request->xid : 0, changed_actions ? old_actions : NULL); } } else { /* XXX: This is slight duplication with delete_flows_finish__() */ ofmonitor_report(ofproto->connmgr, old_rule, NXFME_DELETED, OFPRR_EVICTION, req ? req->ofconn : NULL, req ? req->request->xid : 0, NULL); } } } /* ofm->temp_rule is consumed only in the successful case. */ static enum ofperr modify_flows_start__(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; struct rule_collection *new_rules = &ofm->new_rules; enum ofperr error; if (rule_collection_n(old_rules) > 0) { /* Create a new 'modified' rule for each old rule. */ struct rule *old_rule, *new_rule; const struct rule_actions *actions = rule_get_actions(ofm->temp_rule); /* Must check actions while holding ofproto_mutex to avoid a race. */ error = ofproto_check_ofpacts(ofproto, actions->ofpacts, actions->ofpacts_len); if (error) { return error; } /* Use the temp rule as the first new rule, and as the template for * the rest. */ struct rule *temp = ofm->temp_rule; ofm->temp_rule = NULL; /* We consume the template. */ bool first = true; RULE_COLLECTION_FOR_EACH (old_rule, old_rules) { if (first) { /* The template rule's match is possibly a loose one, so it * must be replaced with the old rule's match so that the new * rule actually replaces the old one. */ cls_rule_destroy(CONST_CAST(struct cls_rule *, &temp->cr)); cls_rule_clone(CONST_CAST(struct cls_rule *, &temp->cr), &old_rule->cr); if (temp->match_tlv_bitmap != old_rule->match_tlv_bitmap) { mf_vl_mff_unref(&temp->ofproto->vl_mff_map, temp->match_tlv_bitmap); temp->match_tlv_bitmap = old_rule->match_tlv_bitmap; mf_vl_mff_ref(&temp->ofproto->vl_mff_map, temp->match_tlv_bitmap); } *CONST_CAST(uint8_t *, &temp->table_id) = old_rule->table_id; rule_collection_add(new_rules, temp); first = false; } else { struct cls_rule cr; cls_rule_clone(&cr, &old_rule->cr); error = ofproto_rule_create(ofproto, &cr, old_rule->table_id, temp->flow_cookie, temp->idle_timeout, temp->hard_timeout, temp->flags, temp->importance, temp->actions->ofpacts, temp->actions->ofpacts_len, old_rule->match_tlv_bitmap, temp->ofpacts_tlv_bitmap, &new_rule); if (!error) { rule_collection_add(new_rules, new_rule); } else { /* Return the template rule in place in the error case. */ ofm->temp_rule = temp; rule_collection_rules(new_rules)[0] = NULL; rule_collection_unref(new_rules); rule_collection_destroy(new_rules); return error; } } } ovs_assert(rule_collection_n(new_rules) == rule_collection_n(old_rules)); RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) { replace_rule_start(ofproto, ofm, old_rule, new_rule); } } else if (ofm->modify_may_add_flow) { /* No match, add a new flow, consumes 'temp'. */ error = add_flow_start(ofproto, ofm); } else { /* No flow to modify and may not add a flow. */ ofproto_rule_unref(ofm->temp_rule); ofm->temp_rule = NULL; /* We consume the template. */ error = 0; } return error; } static enum ofperr modify_flows_init_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, 0, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, OFPP_ANY, OFPG_ANY); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); /* Must create a new flow in advance for the case that no matches are * found. Also used for template for multiple modified flows. */ add_flow_init(ofproto, ofm, fm); return 0; } /* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code on * failure. */ static enum ofperr modify_flows_start_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; enum ofperr error; error = collect_rules_loose(ofproto, &ofm->criteria, old_rules); if (!error) { error = modify_flows_start__(ofproto, ofm); } if (error) { rule_collection_destroy(old_rules); } return error; } static void modify_flows_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; struct rule_collection *new_rules = &ofm->new_rules; /* Old rules were not changed yet, only need to revert new rules. */ if (rule_collection_n(old_rules) > 0) { struct rule *old_rule, *new_rule; RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) { replace_rule_revert(ofproto, old_rule, new_rule); } rule_collection_destroy(new_rules); rule_collection_destroy(old_rules); } } static void modify_flows_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; struct rule_collection *new_rules = &ofm->new_rules; if (rule_collection_n(old_rules) == 0 && rule_collection_n(new_rules) == 1) { add_flow_finish(ofproto, ofm, req); } else if (rule_collection_n(old_rules) > 0) { struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies); ovs_assert(rule_collection_n(new_rules) == rule_collection_n(old_rules)); struct rule *old_rule, *new_rule; RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) { replace_rule_finish(ofproto, ofm, req, old_rule, new_rule, &dead_cookies); } learned_cookies_flush(ofproto, &dead_cookies); remove_rules_postponed(old_rules); } } static enum ofperr modify_flow_init_strict(struct ofproto *ofproto OVS_UNUSED, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, fm->priority, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, OFPP_ANY, OFPG_ANY); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); /* Must create a new flow in advance for the case that no matches are * found. Also used for template for multiple modified flows. */ add_flow_init(ofproto, ofm, fm); return 0; } /* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error * code on failure. */ static enum ofperr modify_flow_start_strict(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; enum ofperr error; error = collect_rules_strict(ofproto, &ofm->criteria, old_rules); if (!error) { /* collect_rules_strict() can return max 1 rule. */ error = modify_flows_start__(ofproto, ofm); } return error; } /* OFPFC_DELETE implementation. */ static void delete_flows_start__(struct ofproto *ofproto, ovs_version_t version, const struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { struct oftable *table = &ofproto->tables[rule->table_id]; table->n_flows--; cls_rule_make_invisible_in_version(&rule->cr, version); /* Remove rule from ofproto data structures. */ ofproto_rule_remove__(ofproto, rule); } } static void delete_flows_revert__(struct ofproto *ofproto, const struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { struct oftable *table = &ofproto->tables[rule->table_id]; /* Add rule back to ofproto data structures. */ ofproto_rule_insert__(ofproto, rule); /* Restore table's rule count. */ table->n_flows++; /* Restore the original visibility of the rule. */ cls_rule_restore_visibility(&rule->cr); } } static void delete_flows_finish__(struct ofproto *ofproto, struct rule_collection *rules, enum ofp_flow_removed_reason reason, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { if (rule_collection_n(rules)) { struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies); struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { /* This value will be used to send the flow removed message right * before the rule is actually destroyed. */ rule->removed_reason = reason; ofmonitor_report(ofproto->connmgr, rule, NXFME_DELETED, reason, req ? req->ofconn : NULL, req ? req->request->xid : 0, NULL); /* Send Vacancy Event for OF1.4+. */ send_table_status(ofproto, rule->table_id); learned_cookies_dec(ofproto, rule_get_actions(rule), &dead_cookies); } remove_rules_postponed(rules); learned_cookies_flush(ofproto, &dead_cookies); } } /* Deletes the rules listed in 'rules'. * The deleted rules will become invisible to the lookups in the next version. * Destroys 'rules'. */ static void delete_flows__(struct rule_collection *rules, enum ofp_flow_removed_reason reason, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { if (rule_collection_n(rules)) { struct ofproto *ofproto = rule_collection_rules(rules)[0]->ofproto; delete_flows_start__(ofproto, ofproto->tables_version + 1, rules); ofproto_bump_tables_version(ofproto); delete_flows_finish__(ofproto, rules, reason, req); ofmonitor_flush(ofproto->connmgr); } } static enum ofperr delete_flows_init_loose(struct ofproto *ofproto OVS_UNUSED, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, 0, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, fm->out_port, fm->out_group); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); return 0; } /* Implements OFPFC_DELETE. */ static enum ofperr delete_flows_start_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *rules = &ofm->old_rules; enum ofperr error; error = collect_rules_loose(ofproto, &ofm->criteria, rules); if (!error) { delete_flows_start__(ofproto, ofm->version, rules); } return error; } static void delete_flows_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { delete_flows_revert__(ofproto, &ofm->old_rules); } static void delete_flows_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { delete_flows_finish__(ofproto, &ofm->old_rules, OFPRR_DELETE, req); } static enum ofperr delete_flows_init_strict(struct ofproto *ofproto OVS_UNUSED, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, fm->priority, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, fm->out_port, fm->out_group); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); return 0; } /* Implements OFPFC_DELETE_STRICT. */ static enum ofperr delete_flow_start_strict(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *rules = &ofm->old_rules; enum ofperr error; error = collect_rules_strict(ofproto, &ofm->criteria, rules); if (!error) { delete_flows_start__(ofproto, ofm->version, rules); } return error; } /* This may only be called by rule_destroy_cb()! */ static void ofproto_rule_send_removed(struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { struct ofputil_flow_removed fr; long long int used; minimatch_expand(&rule->cr.match, &fr.match); fr.priority = rule->cr.priority; /* Synchronize with connmgr_destroy() calls to prevent connmgr disappearing * while we use it. */ ovs_mutex_lock(&ofproto_mutex); struct connmgr *connmgr = rule->ofproto->connmgr; if (!connmgr) { ovs_mutex_unlock(&ofproto_mutex); return; } fr.cookie = rule->flow_cookie; fr.reason = rule->removed_reason; fr.table_id = rule->table_id; calc_duration(rule->created, time_msec(), &fr.duration_sec, &fr.duration_nsec); ovs_mutex_lock(&rule->mutex); fr.idle_timeout = rule->idle_timeout; fr.hard_timeout = rule->hard_timeout; ovs_mutex_unlock(&rule->mutex); rule->ofproto->ofproto_class->rule_get_stats(rule, &fr.packet_count, &fr.byte_count, &used); connmgr_send_flow_removed(connmgr, &fr); ovs_mutex_unlock(&ofproto_mutex); } /* Sends an OpenFlow "flow removed" message with the given 'reason' (either * OFPRR_HARD_TIMEOUT or OFPRR_IDLE_TIMEOUT), and then removes 'rule' from its * ofproto. * * ofproto implementation ->run() functions should use this function to expire * OpenFlow flows. */ void ofproto_rule_expire(struct rule *rule, uint8_t reason) OVS_REQUIRES(ofproto_mutex) { struct rule_collection rules; rule_collection_init(&rules); rule_collection_add(&rules, rule); delete_flows__(&rules, reason, NULL); } /* Reduces '*timeout' to no more than 'max'. A value of zero in either case * means "infinite". */ static void reduce_timeout(uint16_t max, uint16_t *timeout) { if (max && (!*timeout || *timeout > max)) { *timeout = max; } } /* If 'idle_timeout' is nonzero, and 'rule' has no idle timeout or an idle * timeout greater than 'idle_timeout', lowers 'rule''s idle timeout to * 'idle_timeout' seconds. Similarly for 'hard_timeout'. * * Suitable for implementing OFPACT_FIN_TIMEOUT. */ void ofproto_rule_reduce_timeouts__(struct rule *rule, uint16_t idle_timeout, uint16_t hard_timeout) OVS_REQUIRES(ofproto_mutex) OVS_EXCLUDED(rule->mutex) { if (!idle_timeout && !hard_timeout) { return; } if (ovs_list_is_empty(&rule->expirable)) { ovs_list_insert(&rule->ofproto->expirable, &rule->expirable); } ovs_mutex_lock(&rule->mutex); reduce_timeout(idle_timeout, &rule->idle_timeout); reduce_timeout(hard_timeout, &rule->hard_timeout); ovs_mutex_unlock(&rule->mutex); } void ofproto_rule_reduce_timeouts(struct rule *rule, uint16_t idle_timeout, uint16_t hard_timeout) OVS_EXCLUDED(ofproto_mutex, rule->mutex) { if (!idle_timeout && !hard_timeout) { return; } ovs_mutex_lock(&ofproto_mutex); if (ovs_list_is_empty(&rule->expirable)) { ovs_list_insert(&rule->ofproto->expirable, &rule->expirable); } ovs_mutex_unlock(&ofproto_mutex); ovs_mutex_lock(&rule->mutex); reduce_timeout(idle_timeout, &rule->idle_timeout); reduce_timeout(hard_timeout, &rule->hard_timeout); ovs_mutex_unlock(&rule->mutex); } static enum ofperr handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_flow_mod fm; uint64_t ofpacts_stub[1024 / 8]; struct ofpbuf ofpacts; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_protocol(ofconn), ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map, &ofpacts, u16_to_ofp(ofproto->max_ports), ofproto->n_tables); if (!error) { struct openflow_mod_requester req = { ofconn, oh }; error = handle_flow_mod__(ofproto, &fm, &req); } ofpbuf_uninit(&ofpacts); return error; } static enum ofperr handle_flow_mod__(struct ofproto *ofproto, const struct ofputil_flow_mod *fm, const struct openflow_mod_requester *req) OVS_EXCLUDED(ofproto_mutex) { struct ofproto_flow_mod ofm; enum ofperr error; error = ofproto_flow_mod_init(ofproto, &ofm, fm, NULL); if (error) { return error; } ovs_mutex_lock(&ofproto_mutex); ofm.version = ofproto->tables_version + 1; error = ofproto_flow_mod_start(ofproto, &ofm); if (!error) { ofproto_bump_tables_version(ofproto); ofproto_flow_mod_finish(ofproto, &ofm, req); ofmonitor_flush(ofproto->connmgr); } ovs_mutex_unlock(&ofproto_mutex); return error; } static enum ofperr handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_role_request request; struct ofputil_role_request reply; struct ofpbuf *buf; enum ofperr error; error = ofputil_decode_role_message(oh, &request); if (error) { return error; } if (request.role != OFPCR12_ROLE_NOCHANGE) { if (request.role != OFPCR12_ROLE_EQUAL && request.have_generation_id && !ofconn_set_master_election_id(ofconn, request.generation_id)) { return OFPERR_OFPRRFC_STALE; } ofconn_set_role(ofconn, request.role); } reply.role = ofconn_get_role(ofconn); reply.have_generation_id = ofconn_get_master_election_id( ofconn, &reply.generation_id); buf = ofputil_encode_role_reply(oh, &reply); ofconn_send_reply(ofconn, buf); return 0; } static enum ofperr handle_nxt_flow_mod_table_id(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_flow_mod_table_id *msg = ofpmsg_body(oh); enum ofputil_protocol cur, next; cur = ofconn_get_protocol(ofconn); next = ofputil_protocol_set_tid(cur, msg->set != 0); ofconn_set_protocol(ofconn, next); return 0; } static enum ofperr handle_nxt_set_flow_format(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_set_flow_format *msg = ofpmsg_body(oh); enum ofputil_protocol cur, next; enum ofputil_protocol next_base; next_base = ofputil_nx_flow_format_to_protocol(ntohl(msg->format)); if (!next_base) { return OFPERR_OFPBRC_EPERM; } cur = ofconn_get_protocol(ofconn); next = ofputil_protocol_set_base(cur, next_base); ofconn_set_protocol(ofconn, next); return 0; } static enum ofperr handle_nxt_set_packet_in_format(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_set_packet_in_format *msg = ofpmsg_body(oh); uint32_t format; format = ntohl(msg->format); if (!ofputil_packet_in_format_is_valid(format)) { return OFPERR_OFPBRC_EPERM; } ofconn_set_packet_in_format(ofconn, format); return 0; } static enum ofperr handle_nxt_set_async_config(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_async_cfg basis = ofconn_get_async_config(ofconn); struct ofputil_async_cfg ac; enum ofperr error; error = ofputil_decode_set_async_config(oh, false, &basis, &ac); if (error) { return error; } ofconn_set_async_config(ofconn, &ac); if (ofconn_get_type(ofconn) == OFCONN_SERVICE && !ofconn_get_miss_send_len(ofconn)) { ofconn_set_miss_send_len(ofconn, OFP_DEFAULT_MISS_SEND_LEN); } return 0; } static enum ofperr handle_nxt_get_async_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn); ofconn_send_reply(ofconn, ofputil_encode_get_async_reply(oh, &ac)); return 0; } static enum ofperr handle_nxt_set_controller_id(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_controller_id *nci = ofpmsg_body(oh); if (!is_all_zeros(nci->zero, sizeof nci->zero)) { return OFPERR_NXBRC_MUST_BE_ZERO; } ofconn_set_controller_id(ofconn, ntohs(nci->controller_id)); return 0; } static enum ofperr handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofpbuf *buf; buf = ofpraw_alloc_reply((oh->version == OFP10_VERSION ? OFPRAW_OFPT10_BARRIER_REPLY : OFPRAW_OFPT11_BARRIER_REPLY), oh, 0); ofconn_send_reply(ofconn, buf); return 0; } static void ofproto_compose_flow_refresh_update(const struct rule *rule, enum nx_flow_monitor_flags flags, struct ovs_list *msgs, const struct tun_table *tun_table) OVS_REQUIRES(ofproto_mutex) { const struct rule_actions *actions; struct ofputil_flow_update fu; fu.event = (flags & (NXFMF_INITIAL | NXFMF_ADD) ? NXFME_ADDED : NXFME_MODIFIED); fu.reason = 0; ovs_mutex_lock(&rule->mutex); fu.idle_timeout = rule->idle_timeout; fu.hard_timeout = rule->hard_timeout; ovs_mutex_unlock(&rule->mutex); fu.table_id = rule->table_id; fu.cookie = rule->flow_cookie; minimatch_expand(&rule->cr.match, &fu.match); fu.priority = rule->cr.priority; actions = flags & NXFMF_ACTIONS ? rule_get_actions(rule) : NULL; fu.ofpacts = actions ? actions->ofpacts : NULL; fu.ofpacts_len = actions ? actions->ofpacts_len : 0; if (ovs_list_is_empty(msgs)) { ofputil_start_flow_update(msgs); } ofputil_append_flow_update(&fu, msgs, tun_table); } void ofmonitor_compose_refresh_updates(struct rule_collection *rules, struct ovs_list *msgs) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { enum nx_flow_monitor_flags flags = rule->monitor_flags; rule->monitor_flags = 0; ofproto_compose_flow_refresh_update(rule, flags, msgs, ofproto_get_tun_tab(rule->ofproto)); } } static void ofproto_collect_ofmonitor_refresh_rule(const struct ofmonitor *m, struct rule *rule, uint64_t seqno, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { enum nx_flow_monitor_flags update; if (rule_is_hidden(rule)) { return; } if (!ofproto_rule_has_out_port(rule, m->out_port)) { return; } if (seqno) { if (rule->add_seqno > seqno) { update = NXFMF_ADD | NXFMF_MODIFY; } else if (rule->modify_seqno > seqno) { update = NXFMF_MODIFY; } else { return; } if (!(m->flags & update)) { return; } } else { update = NXFMF_INITIAL; } if (!rule->monitor_flags) { rule_collection_add(rules, rule); } rule->monitor_flags |= update | (m->flags & NXFMF_ACTIONS); } static void ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m, uint64_t seqno, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { const struct ofproto *ofproto = ofconn_get_ofproto(m->ofconn); const struct oftable *table; struct cls_rule target; cls_rule_init_from_minimatch(&target, &m->match, 0); FOR_EACH_MATCHING_TABLE (table, m->table_id, ofproto) { struct rule *rule; CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &target, OVS_VERSION_MAX) { ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules); } } cls_rule_destroy(&target); } static void ofproto_collect_ofmonitor_initial_rules(struct ofmonitor *m, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { if (m->flags & NXFMF_INITIAL) { ofproto_collect_ofmonitor_refresh_rules(m, 0, rules); } } void ofmonitor_collect_resume_rules(struct ofmonitor *m, uint64_t seqno, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { ofproto_collect_ofmonitor_refresh_rules(m, seqno, rules); } static enum ofperr flow_monitor_delete(struct ofconn *ofconn, uint32_t id) OVS_REQUIRES(ofproto_mutex) { struct ofmonitor *m; enum ofperr error; m = ofmonitor_lookup(ofconn, id); if (m) { ofmonitor_destroy(m); error = 0; } else { error = OFPERR_OFPMOFC_UNKNOWN_MONITOR; } return error; } static enum ofperr handle_flow_monitor_request(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); struct ofmonitor **monitors = NULL; size_t allocated_monitors = 0; size_t n_monitors = 0; enum ofperr error; ovs_mutex_lock(&ofproto_mutex); for (;;) { struct ofputil_flow_monitor_request request; struct ofmonitor *m; int retval; retval = ofputil_decode_flow_monitor_request(&request, &b); if (retval == EOF) { break; } else if (retval) { error = retval; goto error; } if (request.table_id != 0xff && request.table_id >= ofproto->n_tables) { error = OFPERR_OFPBRC_BAD_TABLE_ID; goto error; } error = ofmonitor_create(&request, ofconn, &m); if (error) { goto error; } if (n_monitors >= allocated_monitors) { monitors = x2nrealloc(monitors, &allocated_monitors, sizeof *monitors); } monitors[n_monitors++] = m; } struct rule_collection rules; rule_collection_init(&rules); for (size_t i = 0; i < n_monitors; i++) { ofproto_collect_ofmonitor_initial_rules(monitors[i], &rules); } struct ovs_list replies; ofpmp_init(&replies, oh); ofmonitor_compose_refresh_updates(&rules, &replies); ovs_mutex_unlock(&ofproto_mutex); rule_collection_destroy(&rules); ofconn_send_replies(ofconn, &replies); free(monitors); return 0; error: for (size_t i = 0; i < n_monitors; i++) { ofmonitor_destroy(monitors[i]); } free(monitors); ovs_mutex_unlock(&ofproto_mutex); return error; } static enum ofperr handle_flow_monitor_cancel(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error; uint32_t id; id = ofputil_decode_flow_monitor_cancel(oh); ovs_mutex_lock(&ofproto_mutex); error = flow_monitor_delete(ofconn, id); ovs_mutex_unlock(&ofproto_mutex); return error; } /* Meters implementation. * * Meter table entry, indexed by the OpenFlow meter_id. * 'created' is used to compute the duration for meter stats. * 'list rules' is needed so that we can delete the dependent rules when the * meter table entry is deleted. * 'provider_meter_id' is for the provider's private use. */ struct meter { long long int created; /* Time created. */ struct ovs_list rules; /* List of "struct rule_dpif"s. */ ofproto_meter_id provider_meter_id; uint16_t flags; /* Meter flags. */ uint16_t n_bands; /* Number of meter bands. */ struct ofputil_meter_band *bands; }; /* * This is used in instruction validation at flow set-up time, * as flows may not use non-existing meters. * Return value of UINT32_MAX signifies an invalid meter. */ static uint32_t get_provider_meter_id(const struct ofproto *ofproto, uint32_t of_meter_id) { if (of_meter_id && of_meter_id <= ofproto->meter_features.max_meters) { const struct meter *meter = ofproto->meters[of_meter_id]; if (meter) { return meter->provider_meter_id.uint32; } } return UINT32_MAX; } /* Finds the meter invoked by 'rule''s actions and adds 'rule' to the meter's * list of rules. */ static void meter_insert_rule(struct rule *rule) { const struct rule_actions *a = rule_get_actions(rule); uint32_t meter_id = ofpacts_get_meter(a->ofpacts, a->ofpacts_len); struct meter *meter = rule->ofproto->meters[meter_id]; ovs_list_insert(&meter->rules, &rule->meter_list_node); } static void meter_update(struct meter *meter, const struct ofputil_meter_config *config) { free(meter->bands); meter->flags = config->flags; meter->n_bands = config->n_bands; meter->bands = xmemdup(config->bands, config->n_bands * sizeof *meter->bands); } static struct meter * meter_create(const struct ofputil_meter_config *config, ofproto_meter_id provider_meter_id) { struct meter *meter; meter = xzalloc(sizeof *meter); meter->provider_meter_id = provider_meter_id; meter->created = time_msec(); ovs_list_init(&meter->rules); meter_update(meter, config); return meter; } static void meter_delete(struct ofproto *ofproto, uint32_t first, uint32_t last) OVS_REQUIRES(ofproto_mutex) { for (uint32_t mid = first; mid <= last; ++mid) { struct meter *meter = ofproto->meters[mid]; if (meter) { /* First delete the rules that use this meter. */ if (!ovs_list_is_empty(&meter->rules)) { struct rule_collection rules; struct rule *rule; rule_collection_init(&rules); LIST_FOR_EACH (rule, meter_list_node, &meter->rules) { rule_collection_add(&rules, rule); } delete_flows__(&rules, OFPRR_METER_DELETE, NULL); } ofproto->meters[mid] = NULL; ofproto->ofproto_class->meter_del(ofproto, meter->provider_meter_id); free(meter->bands); free(meter); } } } static enum ofperr handle_add_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm) { ofproto_meter_id provider_meter_id = { UINT32_MAX }; struct meter **meterp = &ofproto->meters[mm->meter.meter_id]; enum ofperr error; if (*meterp) { return OFPERR_OFPMMFC_METER_EXISTS; } error = ofproto->ofproto_class->meter_set(ofproto, &provider_meter_id, &mm->meter); if (!error) { ovs_assert(provider_meter_id.uint32 != UINT32_MAX); *meterp = meter_create(&mm->meter, provider_meter_id); } return error; } static enum ofperr handle_modify_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm) { struct meter *meter = ofproto->meters[mm->meter.meter_id]; enum ofperr error; uint32_t provider_meter_id; if (!meter) { return OFPERR_OFPMMFC_UNKNOWN_METER; } provider_meter_id = meter->provider_meter_id.uint32; error = ofproto->ofproto_class->meter_set(ofproto, &meter->provider_meter_id, &mm->meter); ovs_assert(meter->provider_meter_id.uint32 == provider_meter_id); if (!error) { meter_update(meter, &mm->meter); } return error; } static enum ofperr handle_delete_meter(struct ofconn *ofconn, struct ofputil_meter_mod *mm) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); uint32_t meter_id = mm->meter.meter_id; enum ofperr error = 0; uint32_t first, last; if (meter_id == OFPM13_ALL) { first = 1; last = ofproto->meter_features.max_meters; } else { if (!meter_id || meter_id > ofproto->meter_features.max_meters) { return 0; } first = last = meter_id; } /* Delete the meters. */ ovs_mutex_lock(&ofproto_mutex); meter_delete(ofproto, first, last); ovs_mutex_unlock(&ofproto_mutex); return error; } static enum ofperr handle_meter_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_meter_mod mm; uint64_t bands_stub[256 / 8]; struct ofpbuf bands; uint32_t meter_id; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub); error = ofputil_decode_meter_mod(oh, &mm, &bands); if (error) { goto exit_free_bands; } meter_id = mm.meter.meter_id; if (mm.command != OFPMC13_DELETE) { /* Fails also when meters are not implemented by the provider. */ if (meter_id == 0 || meter_id > OFPM13_MAX) { error = OFPERR_OFPMMFC_INVALID_METER; goto exit_free_bands; } else if (meter_id > ofproto->meter_features.max_meters) { error = OFPERR_OFPMMFC_OUT_OF_METERS; goto exit_free_bands; } if (mm.meter.n_bands > ofproto->meter_features.max_bands) { error = OFPERR_OFPMMFC_OUT_OF_BANDS; goto exit_free_bands; } } switch (mm.command) { case OFPMC13_ADD: error = handle_add_meter(ofproto, &mm); break; case OFPMC13_MODIFY: error = handle_modify_meter(ofproto, &mm); break; case OFPMC13_DELETE: error = handle_delete_meter(ofconn, &mm); break; default: error = OFPERR_OFPMMFC_BAD_COMMAND; break; } if (!error) { struct ofputil_requestforward rf; rf.xid = oh->xid; rf.reason = OFPRFR_METER_MOD; rf.meter_mod = &mm; connmgr_send_requestforward(ofproto->connmgr, ofconn, &rf); } exit_free_bands: ofpbuf_uninit(&bands); return error; } static enum ofperr handle_meter_features_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_meter_features features; struct ofpbuf *b; if (ofproto->ofproto_class->meter_get_features) { ofproto->ofproto_class->meter_get_features(ofproto, &features); } else { memset(&features, 0, sizeof features); } b = ofputil_encode_meter_features_reply(&features, request); ofconn_send_reply(ofconn, b); return 0; } static enum ofperr handle_meter_request(struct ofconn *ofconn, const struct ofp_header *request, enum ofptype type) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; uint64_t bands_stub[256 / 8]; struct ofpbuf bands; uint32_t meter_id, first, last; ofputil_decode_meter_request(request, &meter_id); if (meter_id == OFPM13_ALL) { first = 1; last = ofproto->meter_features.max_meters; } else { if (!meter_id || meter_id > ofproto->meter_features.max_meters || !ofproto->meters[meter_id]) { return OFPERR_OFPMMFC_UNKNOWN_METER; } first = last = meter_id; } ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub); ofpmp_init(&replies, request); for (meter_id = first; meter_id <= last; ++meter_id) { struct meter *meter = ofproto->meters[meter_id]; if (!meter) { continue; /* Skip non-existing meters. */ } if (type == OFPTYPE_METER_STATS_REQUEST) { struct ofputil_meter_stats stats; stats.meter_id = meter_id; /* Provider sets the packet and byte counts, we do the rest. */ stats.flow_count = ovs_list_size(&meter->rules); calc_duration(meter->created, time_msec(), &stats.duration_sec, &stats.duration_nsec); stats.n_bands = meter->n_bands; ofpbuf_clear(&bands); stats.bands = ofpbuf_put_uninit(&bands, meter->n_bands * sizeof *stats.bands); if (!ofproto->ofproto_class->meter_get(ofproto, meter->provider_meter_id, &stats)) { ofputil_append_meter_stats(&replies, &stats); } } else { /* type == OFPTYPE_METER_CONFIG_REQUEST */ struct ofputil_meter_config config; config.meter_id = meter_id; config.flags = meter->flags; config.n_bands = meter->n_bands; config.bands = meter->bands; ofputil_append_meter_config(&replies, &config); } } ofconn_send_replies(ofconn, &replies); ofpbuf_uninit(&bands); return 0; } /* Returned group is RCU protected. */ static struct ofgroup * ofproto_group_lookup__(const struct ofproto *ofproto, uint32_t group_id, ovs_version_t version) { struct ofgroup *group; CMAP_FOR_EACH_WITH_HASH (group, cmap_node, hash_int(group_id, 0), &ofproto->groups) { if (group->group_id == group_id && versions_visible_in_version(&group->versions, version)) { return group; } } return NULL; } /* If the group exists, this function increments the groups's reference count. * * Make sure to call ofproto_group_unref() after no longer needing to maintain * a reference to the group. */ struct ofgroup * ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id, ovs_version_t version, bool take_ref) { struct ofgroup *group; group = ofproto_group_lookup__(ofproto, group_id, version); if (group && take_ref) { /* Not holding a lock, so it is possible that another thread releases * the last reference just before we manage to get one. */ return ofproto_group_try_ref(group) ? group : NULL; } return group; } /* Caller should hold 'ofproto_mutex' if it is important that the * group is not removed by someone else. */ static bool ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) { return ofproto_group_lookup__(ofproto, group_id, OVS_VERSION_MAX) != NULL; } static void group_add_rule(struct ofgroup *group, struct rule *rule) { rule_collection_add(&group->rules, rule); } static void group_remove_rule(struct ofgroup *group, struct rule *rule) { rule_collection_remove(&group->rules, rule); } static void append_group_stats(struct ofgroup *group, struct ovs_list *replies) OVS_REQUIRES(ofproto_mutex) { struct ofputil_group_stats ogs; const struct ofproto *ofproto = group->ofproto; long long int now = time_msec(); int error; ogs.bucket_stats = xmalloc(group->n_buckets * sizeof *ogs.bucket_stats); /* Provider sets the packet and byte counts, we do the rest. */ ogs.ref_count = rule_collection_n(&group->rules); ogs.n_buckets = group->n_buckets; error = (ofproto->ofproto_class->group_get_stats ? ofproto->ofproto_class->group_get_stats(group, &ogs) : EOPNOTSUPP); if (error) { ogs.packet_count = UINT64_MAX; ogs.byte_count = UINT64_MAX; memset(ogs.bucket_stats, 0xff, ogs.n_buckets * sizeof *ogs.bucket_stats); } ogs.group_id = group->group_id; calc_duration(group->created, now, &ogs.duration_sec, &ogs.duration_nsec); ofputil_append_group_stats(replies, &ogs); free(ogs.bucket_stats); } static void handle_group_request(struct ofconn *ofconn, const struct ofp_header *request, uint32_t group_id, void (*cb)(struct ofgroup *, struct ovs_list *replies)) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofgroup *group; struct ovs_list replies; ofpmp_init(&replies, request); /* Must exclude modifications to guarantee iterating groups. */ ovs_mutex_lock(&ofproto_mutex); if (group_id == OFPG_ALL) { CMAP_FOR_EACH (group, cmap_node, &ofproto->groups) { if (versions_visible_in_version(&group->versions, OVS_VERSION_MAX)) { cb(group, &replies); } } } else { group = ofproto_group_lookup__(ofproto, group_id, OVS_VERSION_MAX); if (group) { cb(group, &replies); } } ovs_mutex_unlock(&ofproto_mutex); ofconn_send_replies(ofconn, &replies); } static enum ofperr handle_group_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { uint32_t group_id; enum ofperr error; error = ofputil_decode_group_stats_request(request, &group_id); if (error) { return error; } handle_group_request(ofconn, request, group_id, append_group_stats); return 0; } static void append_group_desc(struct ofgroup *group, struct ovs_list *replies) { struct ofputil_group_desc gds; gds.group_id = group->group_id; gds.type = group->type; gds.props = group->props; ofputil_append_group_desc_reply(&gds, &group->buckets, replies); } static enum ofperr handle_group_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { handle_group_request(ofconn, request, ofputil_decode_group_desc_request(request), append_group_desc); return 0; } static enum ofperr handle_group_features_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofpbuf *msg; msg = ofputil_encode_group_features_reply(&p->ogf, request); if (msg) { ofconn_send_reply(ofconn, msg); } return 0; } static void put_queue_get_config_reply(struct ofport *port, uint32_t queue, struct ovs_list *replies) { struct ofputil_queue_config qc; /* None of the existing queues have compatible properties, so we hard-code * omitting min_rate and max_rate. */ qc.port = port->ofp_port; qc.queue = queue; qc.min_rate = UINT16_MAX; qc.max_rate = UINT16_MAX; ofputil_append_queue_get_config_reply(&qc, replies); } static int handle_queue_get_config_request_for_port(struct ofport *port, uint32_t queue, struct ovs_list *replies) { struct smap details = SMAP_INITIALIZER(&details); if (queue != OFPQ_ALL) { int error = netdev_get_queue(port->netdev, queue, &details); switch (error) { case 0: put_queue_get_config_reply(port, queue, replies); break; case EOPNOTSUPP: case EINVAL: return OFPERR_OFPQOFC_BAD_QUEUE; default: return OFPERR_NXQOFC_QUEUE_ERROR; } } else { struct netdev_queue_dump queue_dump; uint32_t queue_id; NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &queue_dump, port->netdev) { put_queue_get_config_reply(port, queue_id, replies); } } smap_destroy(&details); return 0; } static enum ofperr handle_queue_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; struct ofport *port; ofp_port_t req_port; uint32_t req_queue; enum ofperr error; error = ofputil_decode_queue_get_config_request(oh, &req_port, &req_queue); if (error) { return error; } ofputil_start_queue_get_config_reply(oh, &replies); if (req_port == OFPP_ANY) { error = OFPERR_OFPQOFC_BAD_QUEUE; HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { if (!handle_queue_get_config_request_for_port(port, req_queue, &replies)) { error = 0; } } } else { port = ofproto_get_port(ofproto, req_port); error = (port ? handle_queue_get_config_request_for_port(port, req_queue, &replies) : OFPERR_OFPQOFC_BAD_PORT); } if (!error) { ofconn_send_replies(ofconn, &replies); } else { ofpbuf_list_delete(&replies); } return error; } /* Allocates, initializes, and constructs a new group in 'ofproto', obtaining * all the attributes for it from 'gm', and stores a pointer to it in * '*ofgroup'. Makes the new group visible from the flow table starting from * 'version'. * * Returns 0 if successful, otherwise an error code. If there is an error then * '*ofgroup' is indeterminate upon return. */ static enum ofperr init_group(struct ofproto *ofproto, const struct ofputil_group_mod *gm, ovs_version_t version, struct ofgroup **ofgroup) { enum ofperr error; const long long int now = time_msec(); if (gm->group_id > OFPG_MAX) { return OFPERR_OFPGMFC_INVALID_GROUP; } if (gm->type > OFPGT11_FF) { return OFPERR_OFPGMFC_BAD_TYPE; } *ofgroup = ofproto->ofproto_class->group_alloc(); if (!*ofgroup) { VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name); return OFPERR_OFPGMFC_OUT_OF_GROUPS; } *CONST_CAST(struct ofproto **, &(*ofgroup)->ofproto) = ofproto; *CONST_CAST(uint32_t *, &((*ofgroup)->group_id)) = gm->group_id; *CONST_CAST(enum ofp11_group_type *, &(*ofgroup)->type) = gm->type; *CONST_CAST(long long int *, &((*ofgroup)->created)) = now; *CONST_CAST(long long int *, &((*ofgroup)->modified)) = now; ovs_refcount_init(&(*ofgroup)->ref_count); (*ofgroup)->being_deleted = false; ovs_list_init(CONST_CAST(struct ovs_list *, &(*ofgroup)->buckets)); ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *, &(*ofgroup)->buckets), &gm->buckets, NULL); *CONST_CAST(uint32_t *, &(*ofgroup)->n_buckets) = ovs_list_size(&(*ofgroup)->buckets); ofputil_group_properties_copy(CONST_CAST(struct ofputil_group_props *, &(*ofgroup)->props), &gm->props); rule_collection_init(&(*ofgroup)->rules); /* Make group visible from 'version'. */ (*ofgroup)->versions = VERSIONS_INITIALIZER(version, OVS_VERSION_NOT_REMOVED); /* Construct called BEFORE any locks are held. */ error = ofproto->ofproto_class->group_construct(*ofgroup); if (error) { ofputil_group_properties_destroy(CONST_CAST(struct ofputil_group_props *, &(*ofgroup)->props)); ofputil_bucket_list_destroy(CONST_CAST(struct ovs_list *, &(*ofgroup)->buckets)); ofproto->ofproto_class->group_dealloc(*ofgroup); } return error; } /* Implements the OFPGC11_ADD operation specified by 'gm', adding a group to * 'ofproto''s group table. Returns 0 on success or an OpenFlow error code on * failure. */ static enum ofperr add_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; if (ofproto_group_exists(ofproto, ogm->gm.group_id)) { return OFPERR_OFPGMFC_GROUP_EXISTS; } if (ofproto->n_groups[ogm->gm.type] >= ofproto->ogf.max_groups[ogm->gm.type]) { return OFPERR_OFPGMFC_OUT_OF_GROUPS; } /* Allocate new group and initialize it. */ error = init_group(ofproto, &ogm->gm, ogm->version, &ogm->new_group); if (!error) { /* Insert new group. */ cmap_insert(&ofproto->groups, &ogm->new_group->cmap_node, hash_int(ogm->new_group->group_id, 0)); ofproto->n_groups[ogm->new_group->type]++; } return error; } /* Adds all of the buckets from 'ofgroup' to 'new_ofgroup'. The buckets * already in 'new_ofgroup' will be placed just after the (copy of the) bucket * in 'ofgroup' with bucket ID 'command_bucket_id'. Special * 'command_bucket_id' values OFPG15_BUCKET_FIRST and OFPG15_BUCKET_LAST are * also honored. */ static enum ofperr copy_buckets_for_insert_bucket(const struct ofgroup *ofgroup, struct ofgroup *new_ofgroup, uint32_t command_bucket_id) { struct ofputil_bucket *last = NULL; if (command_bucket_id <= OFPG15_BUCKET_MAX) { /* Check here to ensure that a bucket corresponding to * command_bucket_id exists in the old bucket list. * * The subsequent search of below of new_ofgroup covers * both buckets in the old bucket list and buckets added * by the insert buckets group mod message this function processes. */ if (!ofputil_bucket_find(&ofgroup->buckets, command_bucket_id)) { return OFPERR_OFPGMFC_UNKNOWN_BUCKET; } if (!ovs_list_is_empty(&new_ofgroup->buckets)) { last = ofputil_bucket_list_back(&new_ofgroup->buckets); } } ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *, &new_ofgroup->buckets), &ofgroup->buckets, NULL); if (ofputil_bucket_check_duplicate_id(&new_ofgroup->buckets)) { VLOG_INFO_RL(&rl, "Duplicate bucket id"); return OFPERR_OFPGMFC_BUCKET_EXISTS; } /* Rearrange list according to command_bucket_id */ if (command_bucket_id == OFPG15_BUCKET_LAST) { if (!ovs_list_is_empty(&ofgroup->buckets)) { struct ofputil_bucket *new_first; const struct ofputil_bucket *first; first = ofputil_bucket_list_front(&ofgroup->buckets); new_first = ofputil_bucket_find(&new_ofgroup->buckets, first->bucket_id); ovs_list_splice(new_ofgroup->buckets.next, &new_first->list_node, CONST_CAST(struct ovs_list *, &new_ofgroup->buckets)); } } else if (command_bucket_id <= OFPG15_BUCKET_MAX && last) { struct ofputil_bucket *after; /* Presence of bucket is checked above so after should never be NULL */ after = ofputil_bucket_find(&new_ofgroup->buckets, command_bucket_id); ovs_list_splice(after->list_node.next, new_ofgroup->buckets.next, last->list_node.next); } return 0; } /* Appends all of the a copy of all the buckets from 'ofgroup' to 'new_ofgroup' * with the exception of the bucket whose bucket id is 'command_bucket_id'. * Special 'command_bucket_id' values OFPG15_BUCKET_FIRST, OFPG15_BUCKET_LAST * and OFPG15_BUCKET_ALL are also honored. */ static enum ofperr copy_buckets_for_remove_bucket(const struct ofgroup *ofgroup, struct ofgroup *new_ofgroup, uint32_t command_bucket_id) { const struct ofputil_bucket *skip = NULL; if (command_bucket_id == OFPG15_BUCKET_ALL) { return 0; } if (command_bucket_id == OFPG15_BUCKET_FIRST) { if (!ovs_list_is_empty(&ofgroup->buckets)) { skip = ofputil_bucket_list_front(&ofgroup->buckets); } } else if (command_bucket_id == OFPG15_BUCKET_LAST) { if (!ovs_list_is_empty(&ofgroup->buckets)) { skip = ofputil_bucket_list_back(&ofgroup->buckets); } } else { skip = ofputil_bucket_find(&ofgroup->buckets, command_bucket_id); if (!skip) { return OFPERR_OFPGMFC_UNKNOWN_BUCKET; } } ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *, &new_ofgroup->buckets), &ofgroup->buckets, skip); return 0; } /* Implements OFPGC11_MODIFY, OFPGC15_INSERT_BUCKET and * OFPGC15_REMOVE_BUCKET. Returns 0 on success or an OpenFlow error code * on failure. * * Note that the group is re-created and then replaces the old group in * ofproto's ofgroup hash map. Thus, the group is never altered while users of * the xlate module hold a pointer to the group. */ static enum ofperr modify_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *old_group; /* Modified group. */ struct ofgroup *new_group; enum ofperr error; old_group = ofproto_group_lookup__(ofproto, ogm->gm.group_id, OVS_VERSION_MAX); if (!old_group) { return OFPERR_OFPGMFC_UNKNOWN_GROUP; } /* Inserting or deleting a bucket should not change the group's type or * properties, so change the group mod so that these aspects match the old * group. (See EXT-570.) */ if (ogm->gm.command == OFPGC15_INSERT_BUCKET || ogm->gm.command == OFPGC15_REMOVE_BUCKET) { ogm->gm.type = old_group->type; ofputil_group_properties_destroy(&ogm->gm.props); ofputil_group_properties_copy(&ogm->gm.props, &old_group->props); } if (old_group->type != ogm->gm.type && (ofproto->n_groups[ogm->gm.type] >= ofproto->ogf.max_groups[ogm->gm.type])) { return OFPERR_OFPGMFC_OUT_OF_GROUPS; } error = init_group(ofproto, &ogm->gm, ogm->version, &ogm->new_group); if (error) { return error; } new_group = ogm->new_group; /* Manipulate bucket list for bucket commands */ if (ogm->gm.command == OFPGC15_INSERT_BUCKET) { error = copy_buckets_for_insert_bucket(old_group, new_group, ogm->gm.command_bucket_id); } else if (ogm->gm.command == OFPGC15_REMOVE_BUCKET) { error = copy_buckets_for_remove_bucket(old_group, new_group, ogm->gm.command_bucket_id); } if (error) { goto out; } /* The group creation time does not change during modification. */ *CONST_CAST(long long int *, &(new_group->created)) = old_group->created; *CONST_CAST(long long int *, &(new_group->modified)) = time_msec(); group_collection_add(&ogm->old_groups, old_group); /* Mark the old group for deletion. */ versions_set_remove_version(&old_group->versions, ogm->version); /* Insert replacement group. */ cmap_insert(&ofproto->groups, &new_group->cmap_node, hash_int(new_group->group_id, 0)); /* Transfer rules. */ rule_collection_move(&new_group->rules, &old_group->rules); if (old_group->type != new_group->type) { ofproto->n_groups[old_group->type]--; ofproto->n_groups[new_group->type]++; } return 0; out: ofproto_group_unref(new_group); return error; } /* Implements the OFPGC11_ADD_OR_MOD command which creates the group when it does not * exist yet and modifies it otherwise */ static enum ofperr add_or_modify_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; if (!ofproto_group_exists(ofproto, ogm->gm.group_id)) { error = add_group_start(ofproto, ogm); } else { error = modify_group_start(ofproto, ogm); } return error; } static void delete_group_start(struct ofproto *ofproto, ovs_version_t version, struct group_collection *groups, struct ofgroup *group) OVS_REQUIRES(ofproto_mutex) { /* Makes flow deletion code leave the rule pointers in 'group->rules' * intact, so that we can later refer to the rules deleted due to the group * deletion. Rule pointers will be removed from all other groups, if any, * so we will never try to delete the same rule twice. */ group->being_deleted = true; /* Mark all the referring groups for deletion. */ delete_flows_start__(ofproto, version, &group->rules); group_collection_add(groups, group); versions_set_remove_version(&group->versions, version); ofproto->n_groups[group->type]--; } static void delete_group_finish(struct ofproto *ofproto, struct ofgroup *group) OVS_REQUIRES(ofproto_mutex) { /* Finish deletion of all flow entries containing this group in a group * action. */ delete_flows_finish__(ofproto, &group->rules, OFPRR_GROUP_DELETE, NULL); /* Group removal is postponed by the caller. */ } /* Implements OFPGC11_DELETE. */ static void delete_groups_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *group; if (ogm->gm.group_id == OFPG_ALL) { CMAP_FOR_EACH (group, cmap_node, &ofproto->groups) { if (versions_visible_in_version(&group->versions, ogm->version)) { delete_group_start(ofproto, ogm->version, &ogm->old_groups, group); } } } else { group = ofproto_group_lookup__(ofproto, ogm->gm.group_id, ogm->version); if (group) { delete_group_start(ofproto, ogm->version, &ogm->old_groups, group); } } } static enum ofperr ofproto_group_mod_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; ogm->new_group = NULL; group_collection_init(&ogm->old_groups); switch (ogm->gm.command) { case OFPGC11_ADD: error = add_group_start(ofproto, ogm); break; case OFPGC11_MODIFY: error = modify_group_start(ofproto, ogm); break; case OFPGC11_ADD_OR_MOD: error = add_or_modify_group_start(ofproto, ogm); break; case OFPGC11_DELETE: delete_groups_start(ofproto, ogm); error = 0; break; case OFPGC15_INSERT_BUCKET: error = modify_group_start(ofproto, ogm); break; case OFPGC15_REMOVE_BUCKET: error = modify_group_start(ofproto, ogm); break; default: if (ogm->gm.command > OFPGC11_DELETE) { VLOG_INFO_RL(&rl, "%s: Invalid group_mod command type %d", ofproto->name, ogm->gm.command); } error = OFPERR_OFPGMFC_BAD_COMMAND; break; } return error; } static void ofproto_group_mod_revert(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *new_group = ogm->new_group; struct ofgroup *old_group; /* Restore replaced or deleted groups. */ GROUP_COLLECTION_FOR_EACH (old_group, &ogm->old_groups) { ofproto->n_groups[old_group->type]++; if (new_group) { ovs_assert(group_collection_n(&ogm->old_groups) == 1); /* Transfer rules back. */ rule_collection_move(&old_group->rules, &new_group->rules); } else { old_group->being_deleted = false; /* Revert rule deletion. */ delete_flows_revert__(ofproto, &old_group->rules); } /* Restore visibility. */ versions_set_remove_version(&old_group->versions, OVS_VERSION_NOT_REMOVED); } if (new_group) { /* Remove the new group immediately. It was never visible to * lookups. */ cmap_remove(&ofproto->groups, &new_group->cmap_node, hash_int(new_group->group_id, 0)); ofproto->n_groups[new_group->type]--; ofproto_group_unref(new_group); } } static void ofproto_group_mod_finish(struct ofproto *ofproto, struct ofproto_group_mod *ogm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *new_group = ogm->new_group; struct ofgroup *old_group; if (new_group && group_collection_n(&ogm->old_groups) && ofproto->ofproto_class->group_modify) { /* Modify a group. */ ovs_assert(group_collection_n(&ogm->old_groups) == 1); /* XXX: OK to lose old group's stats? */ ofproto->ofproto_class->group_modify(new_group); } /* Delete old groups. */ GROUP_COLLECTION_FOR_EACH(old_group, &ogm->old_groups) { delete_group_finish(ofproto, old_group); } remove_groups_postponed(&ogm->old_groups); if (req) { struct ofputil_requestforward rf; rf.xid = req->request->xid; rf.reason = OFPRFR_GROUP_MOD; rf.group_mod = &ogm->gm; connmgr_send_requestforward(ofproto->connmgr, req->ofconn, &rf); } } /* Delete all groups from 'ofproto'. * * This is intended for use within an ofproto provider's 'destruct' * function. */ void ofproto_group_delete_all(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { struct ofproto_group_mod ogm; ogm.gm.command = OFPGC11_DELETE; ogm.gm.group_id = OFPG_ALL; ovs_mutex_lock(&ofproto_mutex); ogm.version = ofproto->tables_version + 1; ofproto_group_mod_start(ofproto, &ogm); ofproto_bump_tables_version(ofproto); ofproto_group_mod_finish(ofproto, &ogm, NULL); ovs_mutex_unlock(&ofproto_mutex); } static enum ofperr handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofproto_group_mod ogm; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_group_mod(oh, &ogm.gm); if (error) { return error; } ovs_mutex_lock(&ofproto_mutex); ogm.version = ofproto->tables_version + 1; error = ofproto_group_mod_start(ofproto, &ogm); if (!error) { struct openflow_mod_requester req = { ofconn, oh }; ofproto_bump_tables_version(ofproto); ofproto_group_mod_finish(ofproto, &ogm, &req); ofmonitor_flush(ofproto->connmgr); } ovs_mutex_unlock(&ofproto_mutex); ofputil_uninit_group_mod(&ogm.gm); return error; } enum ofputil_table_miss ofproto_table_get_miss_config(const struct ofproto *ofproto, uint8_t table_id) { enum ofputil_table_miss miss; atomic_read_relaxed(&ofproto->tables[table_id].miss_config, &miss); return miss; } static void table_mod__(struct oftable *oftable, const struct ofputil_table_mod *tm) { if (tm->miss == OFPUTIL_TABLE_MISS_DEFAULT) { /* This is how an OFPT_TABLE_MOD decodes if it doesn't specify any * table-miss configuration (because the protocol used doesn't have * such a concept), so there's nothing to do. */ } else { atomic_store_relaxed(&oftable->miss_config, tm->miss); } unsigned int new_eviction = oftable->eviction; if (tm->eviction == OFPUTIL_TABLE_EVICTION_ON) { new_eviction |= EVICTION_OPENFLOW; } else if (tm->eviction == OFPUTIL_TABLE_EVICTION_OFF) { new_eviction &= ~EVICTION_OPENFLOW; } if (new_eviction != oftable->eviction) { ovs_mutex_lock(&ofproto_mutex); oftable_configure_eviction(oftable, new_eviction, oftable->eviction_fields, oftable->n_eviction_fields); ovs_mutex_unlock(&ofproto_mutex); } if (tm->vacancy != OFPUTIL_TABLE_VACANCY_DEFAULT) { ovs_mutex_lock(&ofproto_mutex); oftable->vacancy_down = tm->table_vacancy.vacancy_down; oftable->vacancy_up = tm->table_vacancy.vacancy_up; if (tm->vacancy == OFPUTIL_TABLE_VACANCY_OFF) { oftable->vacancy_event = 0; } else if (!oftable->vacancy_event) { uint8_t vacancy = oftable_vacancy(oftable); oftable->vacancy_event = (vacancy < oftable->vacancy_up ? OFPTR_VACANCY_UP : OFPTR_VACANCY_DOWN); } ovs_mutex_unlock(&ofproto_mutex); } } static enum ofperr table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm) { if (!check_table_id(ofproto, tm->table_id)) { return OFPERR_OFPTMFC_BAD_TABLE; } /* Don't allow the eviction flags to be changed (except to the only fixed * value that OVS supports). OF1.4 says this is normal: "The * OFPTMPT_EVICTION property usually cannot be modified using a * OFP_TABLE_MOD request, because the eviction mechanism is switch * defined". */ if (tm->eviction_flags != UINT32_MAX && tm->eviction_flags != OFPROTO_EVICTION_FLAGS) { return OFPERR_OFPTMFC_BAD_CONFIG; } if (tm->table_id == OFPTT_ALL) { struct oftable *oftable; OFPROTO_FOR_EACH_TABLE (oftable, ofproto) { if (!(oftable->flags & (OFTABLE_HIDDEN | OFTABLE_READONLY))) { table_mod__(oftable, tm); } } } else { struct oftable *oftable = &ofproto->tables[tm->table_id]; if (oftable->flags & OFTABLE_READONLY) { return OFPERR_OFPTMFC_EPERM; } table_mod__(oftable, tm); } return 0; } static enum ofperr handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_table_mod tm; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_table_mod(oh, &tm); if (error) { return error; } return table_mod(ofproto, &tm); } /* Free resources that may be allocated by ofproto_flow_mod_init(). */ void ofproto_flow_mod_uninit(struct ofproto_flow_mod *ofm) { if (ofm->temp_rule) { ofproto_rule_unref(ofm->temp_rule); ofm->temp_rule = NULL; } if (ofm->criteria.version != OVS_VERSION_NOT_REMOVED) { rule_criteria_destroy(&ofm->criteria); } if (ofm->conjs) { free(ofm->conjs); ofm->conjs = NULL; ofm->n_conjs = 0; } } /* Initializes 'ofm' with 'ofproto', 'fm', and 'rule'. 'rule' may be null, but * if it is nonnull then the caller must own a reference to it, which on * success is transferred to 'ofm' and on failure is unreffed. */ static enum ofperr ofproto_flow_mod_init(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm, struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error; /* Forward flow mod fields we need later. */ ofm->command = fm->command; ofm->modify_cookie = fm->modify_cookie; ofm->modify_may_add_flow = (fm->new_cookie != OVS_BE64_MAX && fm->cookie_mask == htonll(0)); /* Old flags must be kept when modifying a flow, but we still must * honor the reset counts flag if present in the flow mod. */ ofm->modify_keep_counts = !(fm->flags & OFPUTIL_FF_RESET_COUNTS); /* Initialize state needed by ofproto_flow_mod_uninit(). */ ofm->temp_rule = rule; ofm->criteria.version = OVS_VERSION_NOT_REMOVED; ofm->conjs = NULL; ofm->n_conjs = 0; bool check_buffer_id = false; switch (ofm->command) { case OFPFC_ADD: check_buffer_id = true; error = add_flow_init(ofproto, ofm, fm); break; case OFPFC_MODIFY: check_buffer_id = true; error = modify_flows_init_loose(ofproto, ofm, fm); break; case OFPFC_MODIFY_STRICT: check_buffer_id = true; error = modify_flow_init_strict(ofproto, ofm, fm); break; case OFPFC_DELETE: error = delete_flows_init_loose(ofproto, ofm, fm); break; case OFPFC_DELETE_STRICT: error = delete_flows_init_strict(ofproto, ofm, fm); break; default: error = OFPERR_OFPFMFC_BAD_COMMAND; break; } if (!error && check_buffer_id && fm->buffer_id != UINT32_MAX) { error = OFPERR_OFPBRC_BUFFER_UNKNOWN; } if (error) { ofproto_flow_mod_uninit(ofm); } return error; } static enum ofperr ofproto_flow_mod_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; rule_collection_init(&ofm->old_rules); rule_collection_init(&ofm->new_rules); switch (ofm->command) { case OFPFC_ADD: error = add_flow_start(ofproto, ofm); break; case OFPFC_MODIFY: error = modify_flows_start_loose(ofproto, ofm); break; case OFPFC_MODIFY_STRICT: error = modify_flow_start_strict(ofproto, ofm); break; case OFPFC_DELETE: error = delete_flows_start_loose(ofproto, ofm); break; case OFPFC_DELETE_STRICT: error = delete_flow_start_strict(ofproto, ofm); break; default: OVS_NOT_REACHED(); } /* Release resources not needed after start. */ ofproto_flow_mod_uninit(ofm); if (error) { rule_collection_destroy(&ofm->old_rules); rule_collection_destroy(&ofm->new_rules); } return error; } static void ofproto_flow_mod_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { switch (ofm->command) { case OFPFC_ADD: add_flow_revert(ofproto, ofm); break; case OFPFC_MODIFY: case OFPFC_MODIFY_STRICT: modify_flows_revert(ofproto, ofm); break; case OFPFC_DELETE: case OFPFC_DELETE_STRICT: delete_flows_revert(ofproto, ofm); break; default: break; } rule_collection_destroy(&ofm->old_rules); rule_collection_destroy(&ofm->new_rules); } static void ofproto_flow_mod_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { switch (ofm->command) { case OFPFC_ADD: add_flow_finish(ofproto, ofm, req); break; case OFPFC_MODIFY: case OFPFC_MODIFY_STRICT: modify_flows_finish(ofproto, ofm, req); break; case OFPFC_DELETE: case OFPFC_DELETE_STRICT: delete_flows_finish(ofproto, ofm, req); break; default: break; } rule_collection_destroy(&ofm->old_rules); rule_collection_destroy(&ofm->new_rules); if (req) { ofconn_report_flow_mod(req->ofconn, ofm->command); } } /* Commit phases (all while locking ofproto_mutex): * * 1. Begin: Gather resources and make changes visible in the next version. * - Mark affected rules for removal in the next version. * - Create new replacement rules, make visible in the next * version. * - Do not send any events or notifications. * * 2. Revert: Fail if any errors are found. After this point no errors are * possible. No visible changes were made, so rollback is minimal (remove * added invisible rules, restore visibility of rules marked for removal). * * 3. Finish: Make the changes visible for lookups. Insert replacement rules to * the ofproto provider. Remove replaced and deleted rules from ofproto data * structures, and Schedule postponed removal of deleted rules from the * classifier. Send notifications, buffered packets, etc. */ static enum ofperr do_bundle_commit(struct ofconn *ofconn, uint32_t id, uint16_t flags) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); ovs_version_t version = ofproto->tables_version + 1; struct ofp_bundle *bundle; struct ofp_bundle_entry *be; enum ofperr error; bundle = ofconn_get_bundle(ofconn, id); if (!bundle) { return OFPERR_OFPBFC_BAD_ID; } if (bundle->flags != flags) { error = OFPERR_OFPBFC_BAD_FLAGS; } else { bool prev_is_port_mod = false; error = 0; ovs_mutex_lock(&ofproto_mutex); /* 1. Begin. */ LIST_FOR_EACH (be, node, &bundle->msg_list) { if (be->type == OFPTYPE_PORT_MOD) { /* Our port mods are not atomic. */ if (flags & OFPBF_ATOMIC) { error = OFPERR_OFPBFC_MSG_FAILED; } else { prev_is_port_mod = true; error = port_mod_start(ofconn, &be->opm.pm, &be->opm.port); } } else { /* Flow & group mods between port mods are applied as a single * version, but the versions are published only after we know * the commit is successful. */ if (prev_is_port_mod) { prev_is_port_mod = false; ++version; } if (be->type == OFPTYPE_FLOW_MOD) { /* Store the version in which the changes should take * effect. */ be->ofm.version = version; error = ofproto_flow_mod_start(ofproto, &be->ofm); } else if (be->type == OFPTYPE_GROUP_MOD) { /* Store the version in which the changes should take * effect. */ be->ogm.version = version; error = ofproto_group_mod_start(ofproto, &be->ogm); } else if (be->type == OFPTYPE_PACKET_OUT) { be->opo.version = version; error = ofproto_packet_out_start(ofproto, &be->opo); } else { OVS_NOT_REACHED(); } } if (error) { break; } } if (error) { /* Send error referring to the original message. */ if (error) { ofconn_send_error(ofconn, &be->ofp_msg, error); error = OFPERR_OFPBFC_MSG_FAILED; } /* 2. Revert. Undo all the changes made above. */ LIST_FOR_EACH_REVERSE_CONTINUE(be, node, &bundle->msg_list) { if (be->type == OFPTYPE_FLOW_MOD) { ofproto_flow_mod_revert(ofproto, &be->ofm); } else if (be->type == OFPTYPE_GROUP_MOD) { ofproto_group_mod_revert(ofproto, &be->ogm); } else if (be->type == OFPTYPE_PACKET_OUT) { ofproto_packet_out_revert(ofproto, &be->opo); } /* Nothing needs to be reverted for a port mod. */ } } else { /* 4. Finish. */ LIST_FOR_EACH (be, node, &bundle->msg_list) { if (be->type == OFPTYPE_PORT_MOD) { /* Perform the actual port mod. This is not atomic, i.e., * the effects will be immediately seen by upcall * processing regardless of the lookup version. It should * be noted that port configuration changes can originate * also from OVSDB changes asynchronously to all upcall * processing. */ port_mod_finish(ofconn, &be->opm.pm, be->opm.port); } else { version = (be->type == OFPTYPE_FLOW_MOD) ? be->ofm.version : (be->type == OFPTYPE_GROUP_MOD) ? be->ogm.version : (be->type == OFPTYPE_PACKET_OUT) ? be->opo.version : version; /* Bump the lookup version to the one of the current * message. This makes all the changes in the bundle at * this version visible to lookups at once. */ if (ofproto->tables_version < version) { ofproto->tables_version = version; ofproto->ofproto_class->set_tables_version( ofproto, ofproto->tables_version); } struct openflow_mod_requester req = { ofconn, &be->ofp_msg }; if (be->type == OFPTYPE_FLOW_MOD) { ofproto_flow_mod_finish(ofproto, &be->ofm, &req); } else if (be->type == OFPTYPE_GROUP_MOD) { ofproto_group_mod_finish(ofproto, &be->ogm, &req); } else if (be->type == OFPTYPE_PACKET_OUT) { ofproto_packet_out_finish(ofproto, &be->opo); } } } } ofmonitor_flush(ofproto->connmgr); ovs_mutex_unlock(&ofproto_mutex); } /* The bundle is discarded regardless the outcome. */ ofp_bundle_remove__(ofconn, bundle); return error; } static enum ofperr handle_bundle_control(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_bundle_ctrl_msg bctrl; struct ofputil_bundle_ctrl_msg reply; struct ofpbuf *buf; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_bundle_ctrl(oh, &bctrl); if (error) { return error; } reply.flags = 0; reply.bundle_id = bctrl.bundle_id; switch (bctrl.type) { case OFPBCT_OPEN_REQUEST: error = ofp_bundle_open(ofconn, bctrl.bundle_id, bctrl.flags, oh); reply.type = OFPBCT_OPEN_REPLY; break; case OFPBCT_CLOSE_REQUEST: error = ofp_bundle_close(ofconn, bctrl.bundle_id, bctrl.flags); reply.type = OFPBCT_CLOSE_REPLY; break; case OFPBCT_COMMIT_REQUEST: error = do_bundle_commit(ofconn, bctrl.bundle_id, bctrl.flags); reply.type = OFPBCT_COMMIT_REPLY; break; case OFPBCT_DISCARD_REQUEST: error = ofp_bundle_discard(ofconn, bctrl.bundle_id); reply.type = OFPBCT_DISCARD_REPLY; break; case OFPBCT_OPEN_REPLY: case OFPBCT_CLOSE_REPLY: case OFPBCT_COMMIT_REPLY: case OFPBCT_DISCARD_REPLY: return OFPERR_OFPBFC_BAD_TYPE; break; } if (!error) { buf = ofputil_encode_bundle_ctrl_reply(oh, &reply); ofconn_send_reply(ofconn, buf); } return error; } static enum ofperr handle_bundle_add(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); enum ofperr error; struct ofputil_bundle_add_msg badd; struct ofp_bundle_entry *bmsg; enum ofptype type; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_bundle_add(oh, &badd, &type); if (error) { return error; } bmsg = ofp_bundle_entry_alloc(type, badd.msg); struct ofpbuf ofpacts; uint64_t ofpacts_stub[1024 / 8]; ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); if (type == OFPTYPE_PORT_MOD) { error = ofputil_decode_port_mod(badd.msg, &bmsg->opm.pm, false); } else if (type == OFPTYPE_FLOW_MOD) { struct ofputil_flow_mod fm; error = ofputil_decode_flow_mod(&fm, badd.msg, ofconn_get_protocol(ofconn), ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map, &ofpacts, u16_to_ofp(ofproto->max_ports), ofproto->n_tables); if (!error) { error = ofproto_flow_mod_init(ofproto, &bmsg->ofm, &fm, NULL); } } else if (type == OFPTYPE_GROUP_MOD) { error = ofputil_decode_group_mod(badd.msg, &bmsg->ogm.gm); } else if (type == OFPTYPE_PACKET_OUT) { struct ofputil_packet_out po; COVERAGE_INC(ofproto_packet_out); /* Decode message. */ error = ofputil_decode_packet_out(&po, badd.msg, &ofpacts); if (!error) { po.ofpacts = ofpbuf_steal_data(&ofpacts); /* Move to heap. */ error = ofproto_packet_out_init(ofproto, ofconn, &bmsg->opo, &po); } } else { OVS_NOT_REACHED(); } ofpbuf_uninit(&ofpacts); if (!error) { error = ofp_bundle_add_message(ofconn, badd.bundle_id, badd.flags, bmsg, oh); } if (error) { ofp_bundle_entry_free(bmsg); } return error; } static enum ofperr handle_tlv_table_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct tun_table *old_tab, *new_tab; struct ofputil_tlv_table_mod ttm; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_tlv_table_mod(oh, &ttm); if (error) { return error; } old_tab = ovsrcu_get_protected(struct tun_table *, &ofproto->metadata_tab); error = tun_metadata_table_mod(&ttm, old_tab, &new_tab); if (!error) { ovs_mutex_lock(&ofproto->vl_mff_map.mutex); error = mf_vl_mff_map_mod_from_tun_metadata(&ofproto->vl_mff_map, &ttm); ovs_mutex_unlock(&ofproto->vl_mff_map.mutex); if (!error) { ovsrcu_set(&ofproto->metadata_tab, new_tab); tun_metadata_postpone_free(old_tab); } } ofputil_uninit_tlv_table(&ttm.mappings); return error; } static enum ofperr handle_tlv_table_request(struct ofconn *ofconn, const struct ofp_header *oh) { const struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_tlv_table_reply ttr; struct ofpbuf *b; tun_metadata_table_request(ofproto_get_tun_tab(ofproto), &ttr); b = ofputil_encode_tlv_table_reply(oh, &ttr); ofputil_uninit_tlv_table(&ttr.mappings); ofconn_send_reply(ofconn, b); return 0; } static enum ofperr handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) OVS_EXCLUDED(ofproto_mutex) { const struct ofp_header *oh = msg->data; enum ofptype type; enum ofperr error; error = ofptype_decode(&type, oh); if (error) { return error; } if (oh->version >= OFP13_VERSION && ofpmsg_is_stat_request(oh) && ofpmp_more(oh)) { /* We have no buffer implementation for multipart requests. * Report overflow for requests which consists of multiple * messages. */ return OFPERR_OFPBRC_MULTIPART_BUFFER_OVERFLOW; } switch (type) { /* OpenFlow requests. */ case OFPTYPE_ECHO_REQUEST: return handle_echo_request(ofconn, oh); case OFPTYPE_FEATURES_REQUEST: return handle_features_request(ofconn, oh); case OFPTYPE_GET_CONFIG_REQUEST: return handle_get_config_request(ofconn, oh); case OFPTYPE_SET_CONFIG: return handle_set_config(ofconn, oh); case OFPTYPE_PACKET_OUT: return handle_packet_out(ofconn, oh); case OFPTYPE_PORT_MOD: return handle_port_mod(ofconn, oh); case OFPTYPE_FLOW_MOD: return handle_flow_mod(ofconn, oh); case OFPTYPE_GROUP_MOD: return handle_group_mod(ofconn, oh); case OFPTYPE_TABLE_MOD: return handle_table_mod(ofconn, oh); case OFPTYPE_METER_MOD: return handle_meter_mod(ofconn, oh); case OFPTYPE_BARRIER_REQUEST: return handle_barrier_request(ofconn, oh); case OFPTYPE_ROLE_REQUEST: return handle_role_request(ofconn, oh); /* OpenFlow replies. */ case OFPTYPE_ECHO_REPLY: return 0; /* Nicira extension requests. */ case OFPTYPE_FLOW_MOD_TABLE_ID: return handle_nxt_flow_mod_table_id(ofconn, oh); case OFPTYPE_SET_FLOW_FORMAT: return handle_nxt_set_flow_format(ofconn, oh); case OFPTYPE_SET_PACKET_IN_FORMAT: return handle_nxt_set_packet_in_format(ofconn, oh); case OFPTYPE_SET_CONTROLLER_ID: return handle_nxt_set_controller_id(ofconn, oh); case OFPTYPE_FLOW_AGE: /* Nothing to do. */ return 0; case OFPTYPE_FLOW_MONITOR_CANCEL: return handle_flow_monitor_cancel(ofconn, oh); case OFPTYPE_SET_ASYNC_CONFIG: return handle_nxt_set_async_config(ofconn, oh); case OFPTYPE_GET_ASYNC_REQUEST: return handle_nxt_get_async_request(ofconn, oh); case OFPTYPE_NXT_RESUME: return handle_nxt_resume(ofconn, oh); /* Statistics requests. */ case OFPTYPE_DESC_STATS_REQUEST: return handle_desc_stats_request(ofconn, oh); case OFPTYPE_FLOW_STATS_REQUEST: return handle_flow_stats_request(ofconn, oh); case OFPTYPE_AGGREGATE_STATS_REQUEST: return handle_aggregate_stats_request(ofconn, oh); case OFPTYPE_TABLE_STATS_REQUEST: return handle_table_stats_request(ofconn, oh); case OFPTYPE_TABLE_FEATURES_STATS_REQUEST: return handle_table_features_request(ofconn, oh); case OFPTYPE_TABLE_DESC_REQUEST: return handle_table_desc_request(ofconn, oh); case OFPTYPE_PORT_STATS_REQUEST: return handle_port_stats_request(ofconn, oh); case OFPTYPE_QUEUE_STATS_REQUEST: return handle_queue_stats_request(ofconn, oh); case OFPTYPE_PORT_DESC_STATS_REQUEST: return handle_port_desc_stats_request(ofconn, oh); case OFPTYPE_FLOW_MONITOR_STATS_REQUEST: return handle_flow_monitor_request(ofconn, oh); case OFPTYPE_METER_STATS_REQUEST: case OFPTYPE_METER_CONFIG_STATS_REQUEST: return handle_meter_request(ofconn, oh, type); case OFPTYPE_METER_FEATURES_STATS_REQUEST: return handle_meter_features_request(ofconn, oh); case OFPTYPE_GROUP_STATS_REQUEST: return handle_group_stats_request(ofconn, oh); case OFPTYPE_GROUP_DESC_STATS_REQUEST: return handle_group_desc_stats_request(ofconn, oh); case OFPTYPE_GROUP_FEATURES_STATS_REQUEST: return handle_group_features_stats_request(ofconn, oh); case OFPTYPE_QUEUE_GET_CONFIG_REQUEST: return handle_queue_get_config_request(ofconn, oh); case OFPTYPE_BUNDLE_CONTROL: return handle_bundle_control(ofconn, oh); case OFPTYPE_BUNDLE_ADD_MESSAGE: return handle_bundle_add(ofconn, oh); case OFPTYPE_NXT_TLV_TABLE_MOD: return handle_tlv_table_mod(ofconn, oh); case OFPTYPE_NXT_TLV_TABLE_REQUEST: return handle_tlv_table_request(ofconn, oh); case OFPTYPE_IPFIX_BRIDGE_STATS_REQUEST: return handle_ipfix_bridge_stats_request(ofconn, oh); case OFPTYPE_IPFIX_FLOW_STATS_REQUEST: return handle_ipfix_flow_stats_request(ofconn, oh); case OFPTYPE_CT_FLUSH_ZONE: return handle_nxt_ct_flush_zone(ofconn, oh); case OFPTYPE_HELLO: case OFPTYPE_ERROR: case OFPTYPE_FEATURES_REPLY: case OFPTYPE_GET_CONFIG_REPLY: case OFPTYPE_PACKET_IN: case OFPTYPE_FLOW_REMOVED: case OFPTYPE_PORT_STATUS: case OFPTYPE_BARRIER_REPLY: case OFPTYPE_QUEUE_GET_CONFIG_REPLY: case OFPTYPE_DESC_STATS_REPLY: case OFPTYPE_FLOW_STATS_REPLY: case OFPTYPE_QUEUE_STATS_REPLY: case OFPTYPE_PORT_STATS_REPLY: case OFPTYPE_TABLE_STATS_REPLY: case OFPTYPE_AGGREGATE_STATS_REPLY: case OFPTYPE_PORT_DESC_STATS_REPLY: case OFPTYPE_ROLE_REPLY: case OFPTYPE_FLOW_MONITOR_PAUSED: case OFPTYPE_FLOW_MONITOR_RESUMED: case OFPTYPE_FLOW_MONITOR_STATS_REPLY: case OFPTYPE_GET_ASYNC_REPLY: case OFPTYPE_GROUP_STATS_REPLY: case OFPTYPE_GROUP_DESC_STATS_REPLY: case OFPTYPE_GROUP_FEATURES_STATS_REPLY: case OFPTYPE_METER_STATS_REPLY: case OFPTYPE_METER_CONFIG_STATS_REPLY: case OFPTYPE_METER_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_DESC_REPLY: case OFPTYPE_ROLE_STATUS: case OFPTYPE_REQUESTFORWARD: case OFPTYPE_TABLE_STATUS: case OFPTYPE_NXT_TLV_TABLE_REPLY: case OFPTYPE_IPFIX_BRIDGE_STATS_REPLY: case OFPTYPE_IPFIX_FLOW_STATS_REPLY: default: if (ofpmsg_is_stat_request(oh)) { return OFPERR_OFPBRC_BAD_STAT; } else { return OFPERR_OFPBRC_BAD_TYPE; } } } static void handle_openflow(struct ofconn *ofconn, const struct ofpbuf *ofp_msg) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error = handle_openflow__(ofconn, ofp_msg); if (error) { ofconn_send_error(ofconn, ofp_msg->data, error); } COVERAGE_INC(ofproto_recv_openflow); } static uint64_t pick_datapath_id(const struct ofproto *ofproto) { const struct ofport *port; port = ofproto_get_port(ofproto, OFPP_LOCAL); if (port) { struct eth_addr ea; int error; error = netdev_get_etheraddr(port->netdev, &ea); if (!error) { return eth_addr_to_uint64(ea); } VLOG_WARN("%s: could not get MAC address for %s (%s)", ofproto->name, netdev_get_name(port->netdev), ovs_strerror(error)); } return ofproto->fallback_dpid; } static uint64_t pick_fallback_dpid(void) { struct eth_addr ea; eth_addr_nicira_random(&ea); return eth_addr_to_uint64(ea); } /* Table overflow policy. */ /* Chooses and updates 'rulep' with a rule to evict from 'table'. Sets 'rulep' * to NULL if the table is not configured to evict rules or if the table * contains no evictable rules. (Rules with a readlock on their evict rwlock, * or with no timeouts are not evictable.) */ static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep) OVS_REQUIRES(ofproto_mutex) { struct eviction_group *evg; *rulep = NULL; if (!table->eviction) { return false; } /* In the common case, the outer and inner loops here will each be entered * exactly once: * * - The inner loop normally "return"s in its first iteration. If the * eviction group has any evictable rules, then it always returns in * some iteration. * * - The outer loop only iterates more than once if the largest eviction * group has no evictable rules. * * - The outer loop can exit only if table's 'max_flows' is all filled up * by unevictable rules. */ HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) { struct rule *rule; HEAP_FOR_EACH (rule, evg_node, &evg->rules) { *rulep = rule; return true; } } return false; } /* Eviction groups. */ /* Returns the priority to use for an eviction_group that contains 'n_rules' * rules. The priority contains low-order random bits to ensure that eviction * groups with the same number of rules are prioritized randomly. */ static uint32_t eviction_group_priority(size_t n_rules) { uint16_t size = MIN(UINT16_MAX, n_rules); return (size << 16) | random_uint16(); } /* Updates 'evg', an eviction_group within 'table', following a change that * adds or removes rules in 'evg'. */ static void eviction_group_resized(struct oftable *table, struct eviction_group *evg) OVS_REQUIRES(ofproto_mutex) { heap_change(&table->eviction_groups_by_size, &evg->size_node, eviction_group_priority(heap_count(&evg->rules))); } /* Destroys 'evg', an eviction_group within 'table': * * - Removes all the rules, if any, from 'evg'. (It doesn't destroy the * rules themselves, just removes them from the eviction group.) * * - Removes 'evg' from 'table'. * * - Frees 'evg'. */ static void eviction_group_destroy(struct oftable *table, struct eviction_group *evg) OVS_REQUIRES(ofproto_mutex) { while (!heap_is_empty(&evg->rules)) { struct rule *rule; rule = CONTAINER_OF(heap_pop(&evg->rules), struct rule, evg_node); rule->eviction_group = NULL; } hmap_remove(&table->eviction_groups_by_id, &evg->id_node); heap_remove(&table->eviction_groups_by_size, &evg->size_node); heap_destroy(&evg->rules); free(evg); } /* Removes 'rule' from its eviction group, if any. */ static void eviction_group_remove_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { if (rule->eviction_group) { struct oftable *table = &rule->ofproto->tables[rule->table_id]; struct eviction_group *evg = rule->eviction_group; rule->eviction_group = NULL; heap_remove(&evg->rules, &rule->evg_node); if (heap_is_empty(&evg->rules)) { eviction_group_destroy(table, evg); } else { eviction_group_resized(table, evg); } } } /* Hashes the 'rule''s values for the eviction_fields of 'rule''s table, and * returns the hash value. */ static uint32_t eviction_group_hash_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { struct oftable *table = &rule->ofproto->tables[rule->table_id]; const struct mf_subfield *sf; struct flow flow; uint32_t hash; hash = table->eviction_group_id_basis; miniflow_expand(rule->cr.match.flow, &flow); for (sf = table->eviction_fields; sf < &table->eviction_fields[table->n_eviction_fields]; sf++) { if (mf_are_prereqs_ok(sf->field, &flow, NULL)) { union mf_value value; mf_get_value(sf->field, &flow, &value); if (sf->ofs) { bitwise_zero(&value, sf->field->n_bytes, 0, sf->ofs); } if (sf->ofs + sf->n_bits < sf->field->n_bytes * 8) { unsigned int start = sf->ofs + sf->n_bits; bitwise_zero(&value, sf->field->n_bytes, start, sf->field->n_bytes * 8 - start); } hash = hash_bytes(&value, sf->field->n_bytes, hash); } else { hash = hash_int(hash, 0); } } return hash; } /* Returns an eviction group within 'table' with the given 'id', creating one * if necessary. */ static struct eviction_group * eviction_group_find(struct oftable *table, uint32_t id) OVS_REQUIRES(ofproto_mutex) { struct eviction_group *evg; HMAP_FOR_EACH_WITH_HASH (evg, id_node, id, &table->eviction_groups_by_id) { return evg; } evg = xmalloc(sizeof *evg); hmap_insert(&table->eviction_groups_by_id, &evg->id_node, id); heap_insert(&table->eviction_groups_by_size, &evg->size_node, eviction_group_priority(0)); heap_init(&evg->rules); return evg; } /* Returns an eviction priority for 'rule'. The return value should be * interpreted so that higher priorities make a rule a more attractive * candidate for eviction. */ static uint64_t rule_eviction_priority(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { /* Calculate absolute time when this flow will expire. If it will never * expire, then return 0 to make it unevictable. */ long long int expiration = LLONG_MAX; if (rule->hard_timeout) { /* 'modified' needs protection even when we hold 'ofproto_mutex'. */ ovs_mutex_lock(&rule->mutex); long long int modified = rule->modified; ovs_mutex_unlock(&rule->mutex); expiration = modified + rule->hard_timeout * 1000; } if (rule->idle_timeout) { uint64_t packets, bytes; long long int used; long long int idle_expiration; ofproto->ofproto_class->rule_get_stats(rule, &packets, &bytes, &used); idle_expiration = used + rule->idle_timeout * 1000; expiration = MIN(expiration, idle_expiration); } if (expiration == LLONG_MAX) { return 0; } /* Calculate the time of expiration as a number of (approximate) seconds * after program startup. * * This should work OK for program runs that last UINT32_MAX seconds or * less. Therefore, please restart OVS at least once every 136 years. */ uint32_t expiration_ofs = (expiration >> 10) - (time_boot_msec() >> 10); /* Combine expiration time with OpenFlow "importance" to form a single * priority value. We want flows with relatively low "importance" to be * evicted before even considering expiration time, so put "importance" in * the most significant bits and expiration time in the least significant * bits. * * Small 'priority' should be evicted before those with large 'priority'. * The caller expects the opposite convention (a large return value being * more attractive for eviction) so we invert it before returning. */ uint64_t priority = ((uint64_t) rule->importance << 32) + expiration_ofs; return UINT64_MAX - priority; } /* Adds 'rule' to an appropriate eviction group for its oftable's * configuration. Does nothing if 'rule''s oftable doesn't have eviction * enabled, or if 'rule' is a permanent rule (one that will never expire on its * own). * * The caller must ensure that 'rule' is not already in an eviction group. */ static void eviction_group_add_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { struct ofproto *ofproto = rule->ofproto; struct oftable *table = &ofproto->tables[rule->table_id]; bool has_timeout; /* Timeouts may be modified only when holding 'ofproto_mutex'. We have it * so no additional protection is needed. */ has_timeout = rule->hard_timeout || rule->idle_timeout; if (table->eviction && has_timeout) { struct eviction_group *evg; evg = eviction_group_find(table, eviction_group_hash_rule(rule)); rule->eviction_group = evg; heap_insert(&evg->rules, &rule->evg_node, rule_eviction_priority(ofproto, rule)); eviction_group_resized(table, evg); } } /* oftables. */ /* Initializes 'table'. */ static void oftable_init(struct oftable *table) { memset(table, 0, sizeof *table); classifier_init(&table->cls, flow_segment_u64s); table->max_flows = UINT_MAX; table->n_flows = 0; hmap_init(&table->eviction_groups_by_id); heap_init(&table->eviction_groups_by_size); atomic_init(&table->miss_config, OFPUTIL_TABLE_MISS_DEFAULT); classifier_set_prefix_fields(&table->cls, default_prefix_fields, ARRAY_SIZE(default_prefix_fields)); atomic_init(&table->n_matched, 0); atomic_init(&table->n_missed, 0); } /* Destroys 'table', including its classifier and eviction groups. * * The caller is responsible for freeing 'table' itself. */ static void oftable_destroy(struct oftable *table) { ovs_assert(classifier_is_empty(&table->cls)); ovs_mutex_lock(&ofproto_mutex); oftable_configure_eviction(table, 0, NULL, 0); ovs_mutex_unlock(&ofproto_mutex); hmap_destroy(&table->eviction_groups_by_id); heap_destroy(&table->eviction_groups_by_size); classifier_destroy(&table->cls); free(table->name); } /* Changes the name of 'table' to 'name'. If 'name' is NULL or the empty * string, then 'table' will use its default name. * * This only affects the name exposed for a table exposed through the OpenFlow * OFPST_TABLE (as printed by "ovs-ofctl dump-tables"). */ static void oftable_set_name(struct oftable *table, const char *name) { if (name && name[0]) { int len = strnlen(name, OFP_MAX_TABLE_NAME_LEN); if (!table->name || strncmp(name, table->name, len)) { free(table->name); table->name = xmemdup0(name, len); } } else { free(table->name); table->name = NULL; } } /* oftables support a choice of two policies when adding a rule would cause the * number of flows in the table to exceed the configured maximum number: either * they can refuse to add the new flow or they can evict some existing flow. * This function configures the latter policy on 'table', with fairness based * on the values of the 'n_fields' fields specified in 'fields'. (Specifying * 'n_fields' as 0 disables fairness.) */ static void oftable_configure_eviction(struct oftable *table, unsigned int eviction, const struct mf_subfield *fields, size_t n_fields) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; if ((table->eviction != 0) == (eviction != 0) && n_fields == table->n_eviction_fields && (!n_fields || !memcmp(fields, table->eviction_fields, n_fields * sizeof *fields))) { /* The set of eviction fields did not change. If 'eviction' changed, * it remains nonzero, so that we can just update table->eviction * without fussing with the eviction groups. */ table->eviction = eviction; return; } /* Destroy existing eviction groups, then destroy and recreate data * structures to recover memory. */ struct eviction_group *evg, *next; HMAP_FOR_EACH_SAFE (evg, next, id_node, &table->eviction_groups_by_id) { eviction_group_destroy(table, evg); } hmap_destroy(&table->eviction_groups_by_id); hmap_init(&table->eviction_groups_by_id); heap_destroy(&table->eviction_groups_by_size); heap_init(&table->eviction_groups_by_size); /* Replace eviction groups by the new ones, if there is a change. Free the * old fields only after allocating the new ones, because 'fields == * table->eviction_fields' is possible. */ struct mf_subfield *old_fields = table->eviction_fields; table->n_eviction_fields = n_fields; table->eviction_fields = (fields ? xmemdup(fields, n_fields * sizeof *fields) : NULL); free(old_fields); /* Add the new eviction groups, if enabled. */ table->eviction = eviction; if (table->eviction) { table->eviction_group_id_basis = random_uint32(); CLS_FOR_EACH (rule, cr, &table->cls) { eviction_group_add_rule(rule); } } } /* Inserts 'rule' from the ofproto data structures BEFORE caller has inserted * it to the classifier. */ static void ofproto_rule_insert__(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { const struct rule_actions *actions = rule_get_actions(rule); /* A rule may not be reinserted. */ ovs_assert(rule->state == RULE_INITIALIZED); if (rule->hard_timeout || rule->idle_timeout) { ovs_list_insert(&ofproto->expirable, &rule->expirable); } cookies_insert(ofproto, rule); eviction_group_add_rule(rule); if (actions->has_meter) { meter_insert_rule(rule); } if (actions->has_groups) { const struct ofpact_group *a; OFPACT_FOR_EACH_TYPE_FLATTENED (a, GROUP, actions->ofpacts, actions->ofpacts_len) { struct ofgroup *group; group = ofproto_group_lookup(ofproto, a->group_id, OVS_VERSION_MAX, false); ovs_assert(group != NULL); group_add_rule(group, rule); } } rule->state = RULE_INSERTED; } /* Removes 'rule' from the ofproto data structures. Caller may have deferred * the removal from the classifier. */ static void ofproto_rule_remove__(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { ovs_assert(rule->state == RULE_INSERTED); cookies_remove(ofproto, rule); eviction_group_remove_rule(rule); if (!ovs_list_is_empty(&rule->expirable)) { ovs_list_remove(&rule->expirable); } if (!ovs_list_is_empty(&rule->meter_list_node)) { ovs_list_remove(&rule->meter_list_node); ovs_list_init(&rule->meter_list_node); } /* Remove the rule from any groups, except from the group that is being * deleted, if any. */ const struct rule_actions *actions = rule_get_actions(rule); if (actions->has_groups) { const struct ofpact_group *a; OFPACT_FOR_EACH_TYPE_FLATTENED(a, GROUP, actions->ofpacts, actions->ofpacts_len) { struct ofgroup *group; group = ofproto_group_lookup(ofproto, a->group_id, OVS_VERSION_MAX, false); ovs_assert(group); /* Leave the rule for the group that is being deleted, if any, * as we still need the list of rules for clean-up. */ if (!group->being_deleted) { group_remove_rule(group, rule); } } } rule->state = RULE_REMOVED; } /* unixctl commands. */ struct ofproto * ofproto_lookup(const char *name) { struct ofproto *ofproto; HMAP_FOR_EACH_WITH_HASH (ofproto, hmap_node, hash_string(name, 0), &all_ofprotos) { if (!strcmp(ofproto->name, name)) { return ofproto; } } return NULL; } static void ofproto_unixctl_list(struct unixctl_conn *conn, int argc OVS_UNUSED, const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) { struct ofproto *ofproto; struct ds results; ds_init(&results); HMAP_FOR_EACH (ofproto, hmap_node, &all_ofprotos) { ds_put_format(&results, "%s\n", ofproto->name); } unixctl_command_reply(conn, ds_cstr(&results)); ds_destroy(&results); } static void ofproto_unixctl_init(void) { static bool registered; if (registered) { return; } registered = true; unixctl_command_register("ofproto/list", "", 0, 0, ofproto_unixctl_list, NULL); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_390_0
crossvul-cpp_data_bad_1771_1
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* We also supports FDP which is very similar to CDPv1 */ #include "lldpd.h" #include "frame.h" #if defined (ENABLE_CDP) || defined (ENABLE_FDP) #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <assert.h> static int cdp_send(struct lldpd *global, struct lldpd_hardware *hardware, int version) { const char *platform = "Unknown"; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; struct lldpd_port *port; u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; u_int8_t llcorg[] = LLC_ORG_CISCO; #ifdef ENABLE_FDP char *capstr; #endif u_int16_t checksum; int length, i; u_int32_t cap; u_int8_t *packet; u_int8_t *pos, *pos_len_eh, *pos_llc, *pos_cdp, *pos_checksum, *tlv, *end; log_debug("cdp", "send CDP frame on %s", hardware->h_ifname); port = &(hardware->h_lport); chassis = port->p_chassis; #ifdef ENABLE_FDP if (version == 0) { /* With FDP, change multicast address and LLC PID */ const u_int8_t fdpmcastaddr[] = FDP_MULTICAST_ADDR; const u_int8_t fdpllcorg[] = LLC_ORG_FOUNDRY; memcpy(mcastaddr, fdpmcastaddr, sizeof(mcastaddr)); memcpy(llcorg, fdpllcorg, sizeof(llcorg)); } #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && POKE_UINT8(0xaa) && /* SSAP */ POKE_UINT8(0xaa) && /* DSAP */ POKE_UINT8(0x03) && /* Control field */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_CDP))) goto toobig; /* CDP header */ if (!( POKE_SAVE(pos_cdp) && POKE_UINT8((version == 0)?1:version) && POKE_UINT8(chassis->c_ttl) && POKE_SAVE(pos_checksum) && /* Save checksum position */ POKE_UINT16(0))) goto toobig; /* Chassis ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_CHASSIS) && (chassis->c_name? POKE_BYTES(chassis->c_name, strlen(chassis->c_name)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Adresses */ /* See: * http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#xtocid12 * * It seems that Cisco implies that CDP supports IPv6 using * 802.2 address format with 0xAAAA03 0x000000 0x0800, but * 0x0800 is the Ethernet protocol type for IPv4. Therefore, * we support only IPv4. */ i = 0; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) if (mgmt->m_family == LLDPD_AF_IPV4) i++; if (i > 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_ADDRESSES) && POKE_UINT32(i))) goto toobig; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { switch (mgmt->m_family) { case LLDPD_AF_IPV4: if (!( POKE_UINT8(1) && /* Type: NLPID */ POKE_UINT8(1) && /* Length: 1 */ POKE_UINT8(CDP_ADDRESS_PROTO_IP) && /* IP */ POKE_UINT16(sizeof(struct in_addr)) && /* Address length */ POKE_BYTES(&mgmt->m_addr, sizeof(struct in_addr)))) goto toobig; break; } } if (!(POKE_END_CDP_TLV)) goto toobig; } /* Port ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_PORT) && (hardware->h_lport.p_descr? POKE_BYTES(hardware->h_lport.p_descr, strlen(hardware->h_lport.p_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Capabilities */ if (version != 0) { cap = 0; if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) cap |= CDP_CAP_ROUTER; if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) cap |= CDP_CAP_SWITCH; cap |= CDP_CAP_HOST; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_UINT32(cap) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_FDP } else { /* With FDP, it seems that a string is used in place of an int */ if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) capstr = "Router"; else if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) capstr = "Switch"; else if (chassis->c_cap_enabled & LLDP_CAP_REPEATER) capstr = "Bridge"; else capstr = "Host"; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_BYTES(capstr, strlen(capstr)) && POKE_END_CDP_TLV)) goto toobig; #endif } /* Native VLAN */ #ifdef ENABLE_DOT1 if (version >=2 && hardware->h_lport.p_pvid != 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_NATIVEVLAN) && POKE_UINT16(hardware->h_lport.p_pvid) && POKE_END_CDP_TLV)) goto toobig; } #endif /* Software version */ if (!( POKE_START_CDP_TLV(CDP_TLV_SOFTWARE) && (chassis->c_descr? POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Platform */ if (global && global->g_config.c_platform) platform = global->g_config.c_platform; if (!( POKE_START_CDP_TLV(CDP_TLV_PLATFORM) && POKE_BYTES(platform, strlen(platform)) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_LLDPMED /* Power use */ if ((version >= 2) && port->p_med_cap_enabled && (port->p_med_power.source != LLDP_MED_POW_SOURCE_LOCAL) && (port->p_med_power.val > 0) && (port->p_med_power.val <= 655)) { if (!( POKE_START_CDP_TLV(CDP_TLV_POWER_CONSUMPTION) && POKE_UINT16(port->p_med_power.val * 100) && POKE_END_CDP_TLV)) goto toobig; } #endif (void)POKE_SAVE(end); /* Compute len and checksum */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(end - pos_llc))) goto toobig; checksum = frame_checksum(pos_cdp, end - pos_cdp, (version != 0) ? 1 : 0); POKE_RESTORE(pos_checksum); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("cdp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; free(packet); return 0; toobig: free(packet); return -1; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("cdp", name " CDP/FDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) /* cdp_decode also decodes FDP */ int cdp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; struct in_addr addr; #if 0 u_int16_t cksum; #endif u_int8_t *software = NULL, *platform = NULL; int software_len = 0, platform_len = 0, proto, version, nb, caps; const unsigned char cdpaddr[] = CDP_MULTICAST_ADDR; #ifdef ENABLE_FDP const unsigned char fdpaddr[] = CDP_MULTICAST_ADDR; int fdp = 0; #endif u_int8_t *pos, *tlv, *pos_address, *pos_next_address; int length, len_eth, tlv_type, tlv_len, addresses_len, address_len; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; #endif log_debug("cdp", "decode CDP frame received on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("cdp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("cdp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) { log_warn("cdp", "too short CDP/FDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(cdpaddr, sizeof(cdpaddr)) != 0) { #ifdef ENABLE_FDP PEEK_RESTORE((u_int8_t*)frame); if (PEEK_CMP(fdpaddr, sizeof(fdpaddr)) != 0) fdp = 1; else { #endif log_info("cdp", "frame not targeted at CDP/FDP multicast address received on %s", hardware->h_ifname); goto malformed; #ifdef ENABLE_FDP } #endif } PEEK_DISCARD(ETHER_ADDR_LEN); /* Don't care of source address */ len_eth = PEEK_UINT16; if (len_eth > length) { log_warnx("cdp", "incorrect 802.3 frame size reported on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(6); /* Skip beginning of LLC */ proto = PEEK_UINT16; if (proto != LLC_PID_CDP) { if ((proto != LLC_PID_DRIP) && (proto != LLC_PID_PAGP) && (proto != LLC_PID_PVSTP) && (proto != LLC_PID_UDLD) && (proto != LLC_PID_VTP) && (proto != LLC_PID_DTP) && (proto != LLC_PID_STP)) log_debug("cdp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } #if 0 /* Check checksum */ cksum = frame_checksum(pos, len_eth - 8, #ifdef ENABLE_FDP !fdp /* fdp = 0 -> cisco checksum */ #else 1 /* cisco checksum */ #endif ); if (cksum != 0) { log_info("cdp", "incorrect CDP/FDP checksum for frame received on %s (%d)", hardware->h_ifname, cksum); goto malformed; } #endif /* Check version */ version = PEEK_UINT8; if ((version != 1) && (version != 2)) { log_warnx("cdp", "incorrect CDP/FDP version (%d) for frame received on %s", version, hardware->h_ifname); goto malformed; } chassis->c_ttl = PEEK_UINT8; /* TTL */ PEEK_DISCARD_UINT16; /* Checksum, already checked */ while (length) { if (length < 4) { log_warnx("cdp", "CDP/FDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT16; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (length < tlv_len)) { log_warnx("cdp", "incorrect size in CDP/FDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case CDP_TLV_CHASSIS: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis name"); goto malformed; } PEEK_BYTES(chassis->c_name, tlv_len); chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; if ((chassis->c_id = (char *)malloc(tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis ID"); goto malformed; } memcpy(chassis->c_id, chassis->c_name, tlv_len); chassis->c_id_len = tlv_len; break; case CDP_TLV_ADDRESSES: CHECK_TLV_SIZE(4, "Address"); addresses_len = tlv_len - 4; for (nb = PEEK_UINT32; nb > 0; nb--) { (void)PEEK_SAVE(pos_address); /* We first try to get the real length of the packet */ if (addresses_len < 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; addresses_len--; address_len = PEEK_UINT8; addresses_len--; if (addresses_len < address_len + 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); addresses_len -= address_len; address_len = PEEK_UINT16; addresses_len -= 2; if (addresses_len < address_len) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); (void)PEEK_SAVE(pos_next_address); /* Next, we go back and try to extract IPv4 address */ PEEK_RESTORE(pos_address); if ((PEEK_UINT8 == 1) && (PEEK_UINT8 == 1) && (PEEK_UINT8 == CDP_ADDRESS_PROTO_IP) && (PEEK_UINT16 == sizeof(struct in_addr))) { PEEK_BYTES(&addr, sizeof(struct in_addr)); mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &addr, sizeof(struct in_addr), 0); if (mgmt == NULL) { assert(errno == ENOMEM); log_warn("cdp", "unable to allocate memory for management address"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } /* Go to the end of the address */ PEEK_RESTORE(pos_next_address); } break; case CDP_TLV_PORT: if (tlv_len == 0) { log_warn("cdp", "too short port description received"); goto malformed; } if ((port->p_descr = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for port description"); goto malformed; } PEEK_BYTES(port->p_descr, tlv_len); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; if ((port->p_id = (char *)calloc(1, tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for port ID"); goto malformed; } memcpy(port->p_id, port->p_descr, tlv_len); port->p_id_len = tlv_len; break; case CDP_TLV_CAPABILITIES: #ifdef ENABLE_FDP if (fdp) { /* Capabilities are string with FDP */ if (!strncmp("Router", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_ROUTER; else if (!strncmp("Switch", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_BRIDGE; else if (!strncmp("Bridge", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_REPEATER; else chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; } #endif CHECK_TLV_SIZE(4, "Capabilities"); caps = PEEK_UINT32; if (caps & CDP_CAP_ROUTER) chassis->c_cap_enabled |= LLDP_CAP_ROUTER; if (caps & 0x0e) chassis->c_cap_enabled |= LLDP_CAP_BRIDGE; if (chassis->c_cap_enabled == 0) chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; case CDP_TLV_SOFTWARE: software_len = tlv_len; (void)PEEK_SAVE(software); break; case CDP_TLV_PLATFORM: platform_len = tlv_len; (void)PEEK_SAVE(platform); break; #ifdef ENABLE_DOT1 case CDP_TLV_NATIVEVLAN: CHECK_TLV_SIZE(2, "Native VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("cdp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = port->p_pvid = PEEK_UINT16; if (asprintf(&vlan->v_name, "VLAN #%d", vlan->v_vid) == -1) { log_warn("cdp", "unable to alloc VLAN name for " "TLV received on %s", hardware->h_ifname); free(vlan); goto malformed; } TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); break; #endif default: log_debug("cdp", "unknown CDP/FDP TLV type (%d) received on %s", ntohs(tlv_type), hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if (!software && platform) { if ((chassis->c_descr = (char *)calloc(1, platform_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); } else if (software && !platform) { if ((chassis->c_descr = (char *)calloc(1, software_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, software, software_len); } else if (software && platform) { #define CONCAT_PLATFORM " running on\n" if ((chassis->c_descr = (char *)calloc(1, software_len + platform_len + strlen(CONCAT_PLATFORM) + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); memcpy(chassis->c_descr + platform_len, CONCAT_PLATFORM, strlen(CONCAT_PLATFORM)); memcpy(chassis->c_descr + platform_len + strlen(CONCAT_PLATFORM), software, software_len); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (chassis->c_ttl == 0) || (chassis->c_cap_enabled == 0)) { log_warnx("cdp", "some mandatory CDP/FDP tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #ifdef ENABLE_CDP int cdpv1_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 1); } int cdpv2_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 2); } #endif #ifdef ENABLE_FDP int fdp_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 0); } #endif #ifdef ENABLE_CDP static int cdp_guess(char *pos, int length, int version) { const u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) return 0; if (PEEK_CMP(mcastaddr, ETHER_ADDR_LEN) != 0) return 0; PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; /* Ethernet */ PEEK_DISCARD(8); /* LLC */ return (PEEK_UINT8 == version); } int cdpv1_guess(char *frame, int len) { return cdp_guess(frame, len, 1); } int cdpv2_guess(char *frame, int len) { return cdp_guess(frame, len, 2); } #endif #endif /* defined (ENABLE_CDP) || defined (ENABLE_FDP) */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1771_1
crossvul-cpp_data_bad_2742_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % M M AAA TTTTT L AAA BBBB % % MM MM A A T L A A B B % % M M M AAAAA T L AAAAA BBBB % % M M A A T L A A B B % % M M A A T LLLLL A A BBBB % % % % % % Read MATLAB Image Format % % % % Software Design % % Jaroslav Fojtik % % 2001-2008 % % % % % % Permission is hereby granted, free of charge, to any person obtaining a % % copy of this software and associated documentation files ("ImageMagick"), % % to deal in ImageMagick without restriction, including without limitation % % the rights to use, copy, modify, merge, publish, distribute, sublicense, % % and/or sell copies of ImageMagick, and to permit persons to whom the % % ImageMagick is furnished to do so, subject to the following conditions: % % % % The above copyright notice and this permission notice shall be included in % % all copies or substantial portions of ImageMagick. % % % % The software is provided "as is", without warranty of any kind, express or % % implied, including but not limited to the warranties of merchantability, % % fitness for a particular purpose and noninfringement. In no event shall % % ImageMagick Studio be liable for any claim, damages or other liability, % % whether in an action of contract, tort or otherwise, arising from, out of % % or in connection with ImageMagick or the use or other dealings in % % ImageMagick. % % % % Except as contained in this notice, the name of the ImageMagick Studio % % shall not be used in advertising or otherwise to promote the sale, use or % % other dealings in ImageMagick without prior written authorization from the % % ImageMagick Studio. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace-private.h" #include "magick/distort.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" #include "magick/transform.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Forward declaration. */ static MagickBooleanType WriteMATImage(const ImageInfo *,Image *); /* Auto coloring method, sorry this creates some artefact inside data MinReal+j*MaxComplex = red MaxReal+j*MaxComplex = black MinReal+j*0 = white MaxReal+j*0 = black MinReal+j*MinComplex = blue MaxReal+j*MinComplex = black */ typedef struct { char identific[124]; unsigned short Version; char EndianIndicator[2]; unsigned long DataType; unsigned int ObjectSize; unsigned long unknown1; unsigned long unknown2; unsigned short unknown5; unsigned char StructureFlag; unsigned char StructureClass; unsigned long unknown3; unsigned long unknown4; unsigned long DimFlag; unsigned long SizeX; unsigned long SizeY; unsigned short Flag1; unsigned short NameFlag; } MATHeader; static const char *MonthsTab[12]={"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"}; static const char *DayOfWTab[7]={"Sun","Mon","Tue","Wed","Thu","Fri","Sat"}; static const char *OsDesc= #if defined(MAGICKCORE_WINDOWS_SUPPORT) "PCWIN"; #else #ifdef __APPLE__ "MAC"; #else "LNX86"; #endif #endif typedef enum { miINT8 = 1, /* 8 bit signed */ miUINT8, /* 8 bit unsigned */ miINT16, /* 16 bit signed */ miUINT16, /* 16 bit unsigned */ miINT32, /* 32 bit signed */ miUINT32, /* 32 bit unsigned */ miSINGLE, /* IEEE 754 single precision float */ miRESERVE1, miDOUBLE, /* IEEE 754 double precision float */ miRESERVE2, miRESERVE3, miINT64, /* 64 bit signed */ miUINT64, /* 64 bit unsigned */ miMATRIX, /* MATLAB array */ miCOMPRESSED, /* Compressed Data */ miUTF8, /* Unicode UTF-8 Encoded Character Data */ miUTF16, /* Unicode UTF-16 Encoded Character Data */ miUTF32 /* Unicode UTF-32 Encoded Character Data */ } mat5_data_type; typedef enum { mxCELL_CLASS=1, /* cell array */ mxSTRUCT_CLASS, /* structure */ mxOBJECT_CLASS, /* object */ mxCHAR_CLASS, /* character array */ mxSPARSE_CLASS, /* sparse array */ mxDOUBLE_CLASS, /* double precision array */ mxSINGLE_CLASS, /* single precision floating point */ mxINT8_CLASS, /* 8 bit signed integer */ mxUINT8_CLASS, /* 8 bit unsigned integer */ mxINT16_CLASS, /* 16 bit signed integer */ mxUINT16_CLASS, /* 16 bit unsigned integer */ mxINT32_CLASS, /* 32 bit signed integer */ mxUINT32_CLASS, /* 32 bit unsigned integer */ mxINT64_CLASS, /* 64 bit signed integer */ mxUINT64_CLASS, /* 64 bit unsigned integer */ mxFUNCTION_CLASS /* Function handle */ } arrayclasstype; #define FLAG_COMPLEX 0x8 #define FLAG_GLOBAL 0x4 #define FLAG_LOGICAL 0x2 static const QuantumType z2qtype[4] = {GrayQuantum, BlueQuantum, GreenQuantum, RedQuantum}; static void InsertComplexDoubleRow(double *p, int y, Image * image, double MinVal, double MaxVal) { ExceptionInfo *exception; double f; int x; register PixelPacket *q; if (MinVal == 0) MinVal = -1; if (MaxVal == 0) MaxVal = 1; exception=(&image->exception); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) return; for (x = 0; x < (ssize_t) image->columns; x++) { if (*p > 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelRed(q)); if (f + GetPixelRed(q) > QuantumRange) SetPixelRed(q,QuantumRange); else SetPixelRed(q,GetPixelRed(q)+(int) f); if ((int) f / 2.0 > GetPixelGreen(q)) { SetPixelGreen(q,0); SetPixelBlue(q,0); } else { SetPixelBlue(q,GetPixelBlue(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelBlue(q)); } } if (*p < 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelBlue(q)); if (f + GetPixelBlue(q) > QuantumRange) SetPixelBlue(q,QuantumRange); else SetPixelBlue(q,GetPixelBlue(q)+(int) f); if ((int) f / 2.0 > q->green) { SetPixelRed(q,0); SetPixelGreen(q,0); } else { SetPixelRed(q,GetPixelRed(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelRed(q)); } } p++; q++; } if (!SyncAuthenticPixels(image,exception)) return; return; } static void InsertComplexFloatRow(float *p, int y, Image * image, double MinVal, double MaxVal) { ExceptionInfo *exception; double f; int x; register PixelPacket *q; if (MinVal == 0) MinVal = -1; if (MaxVal == 0) MaxVal = 1; exception=(&image->exception); q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (PixelPacket *) NULL) return; for (x = 0; x < (ssize_t) image->columns; x++) { if (*p > 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelRed(q)); if (f + GetPixelRed(q) > QuantumRange) SetPixelRed(q,QuantumRange); else SetPixelRed(q,GetPixelRed(q)+(int) f); if ((int) f / 2.0 > GetPixelGreen(q)) { SetPixelGreen(q,0); SetPixelBlue(q,0); } else { SetPixelBlue(q,GetPixelBlue(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelBlue(q)); } } if (*p < 0) { f = (*p / MaxVal) * (QuantumRange - GetPixelBlue(q)); if (f + GetPixelBlue(q) > QuantumRange) SetPixelBlue(q,QuantumRange); else SetPixelBlue(q,GetPixelBlue(q)+(int) f); if ((int) f / 2.0 > q->green) { SetPixelGreen(q,0); SetPixelRed(q,0); } else { SetPixelRed(q,GetPixelRed(q)-(int) (f/2.0)); SetPixelGreen(q,GetPixelRed(q)); } } p++; q++; } if (!SyncAuthenticPixels(image,exception)) return; return; } /************** READERS ******************/ /* This function reads one block of floats*/ static void ReadBlobFloatsLSB(Image * image, size_t len, float *data) { while (len >= 4) { *data++ = ReadBlobFloat(image); len -= sizeof(float); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } static void ReadBlobFloatsMSB(Image * image, size_t len, float *data) { while (len >= 4) { *data++ = ReadBlobFloat(image); len -= sizeof(float); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } /* This function reads one block of doubles*/ static void ReadBlobDoublesLSB(Image * image, size_t len, double *data) { while (len >= 8) { *data++ = ReadBlobDouble(image); len -= sizeof(double); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } static void ReadBlobDoublesMSB(Image * image, size_t len, double *data) { while (len >= 8) { *data++ = ReadBlobDouble(image); len -= sizeof(double); } if (len > 0) (void) SeekBlob(image, len, SEEK_CUR); } /* Calculate minimum and maximum from a given block of data */ static void CalcMinMax(Image *image, int endian_indicator, int SizeX, int SizeY, size_t CellType, unsigned ldblk, void *BImgBuff, double *Min, double *Max) { MagickOffsetType filepos; int i, x; void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data); void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data); double *dblrow; float *fltrow; if (endian_indicator == LSBEndian) { ReadBlobDoublesXXX = ReadBlobDoublesLSB; ReadBlobFloatsXXX = ReadBlobFloatsLSB; } else /* MI */ { ReadBlobDoublesXXX = ReadBlobDoublesMSB; ReadBlobFloatsXXX = ReadBlobFloatsMSB; } filepos = TellBlob(image); /* Please note that file seeking occurs only in the case of doubles */ for (i = 0; i < SizeY; i++) { if (CellType==miDOUBLE) { ReadBlobDoublesXXX(image, ldblk, (double *)BImgBuff); dblrow = (double *)BImgBuff; if (i == 0) { *Min = *Max = *dblrow; } for (x = 0; x < SizeX; x++) { if (*Min > *dblrow) *Min = *dblrow; if (*Max < *dblrow) *Max = *dblrow; dblrow++; } } if (CellType==miSINGLE) { ReadBlobFloatsXXX(image, ldblk, (float *)BImgBuff); fltrow = (float *)BImgBuff; if (i == 0) { *Min = *Max = *fltrow; } for (x = 0; x < (ssize_t) SizeX; x++) { if (*Min > *fltrow) *Min = *fltrow; if (*Max < *fltrow) *Max = *fltrow; fltrow++; } } } (void) SeekBlob(image, filepos, SEEK_SET); } static void FixSignedValues(PixelPacket *q, int y) { while(y-->0) { /* Please note that negative values will overflow Q=8; QuantumRange=255: <0;127> + 127+1 = <128; 255> <-1;-128> + 127+1 = <0; 127> */ SetPixelRed(q,GetPixelRed(q)+QuantumRange/2+1); SetPixelGreen(q,GetPixelGreen(q)+QuantumRange/2+1); SetPixelBlue(q,GetPixelBlue(q)+QuantumRange/2+1); q++; } } /** Fix whole row of logical/binary data. It means pack it. */ static void FixLogical(unsigned char *Buff,int ldblk) { unsigned char mask=128; unsigned char *BuffL = Buff; unsigned char val = 0; while(ldblk-->0) { if(*Buff++ != 0) val |= mask; mask >>= 1; if(mask==0) { *BuffL++ = val; val = 0; mask = 128; } } *BuffL = val; } #if defined(MAGICKCORE_ZLIB_DELEGATE) static voidpf AcquireZIPMemory(voidpf context,unsigned int items, unsigned int size) { (void) context; return((voidpf) AcquireQuantumMemory(items,size)); } static void RelinquishZIPMemory(voidpf context,voidpf memory) { (void) context; memory=RelinquishMagickMemory(memory); } #endif #if defined(MAGICKCORE_ZLIB_DELEGATE) /** This procedure decompreses an image block for a new MATLAB format. */ static Image *decompress_block(Image *orig, unsigned int *Size, ImageInfo *clone_info, ExceptionInfo *exception) { Image *image2; void *cache_block, *decompress_block; z_stream zip_info; FILE *mat_file; size_t magick_size; size_t extent; int file; int status; int zip_status; ssize_t TotalSize = 0; if(clone_info==NULL) return NULL; if(clone_info->file) /* Close file opened from previous transaction. */ { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } cache_block = AcquireQuantumMemory((size_t)(*Size< 16384) ? *Size: 16384,sizeof(unsigned char *)); if(cache_block==NULL) return NULL; decompress_block = AcquireQuantumMemory((size_t)(4096),sizeof(unsigned char *)); if(decompress_block==NULL) { RelinquishMagickMemory(cache_block); return NULL; } mat_file=0; file = AcquireUniqueFileResource(clone_info->filename); if (file != -1) mat_file = fdopen(file,"w"); if(!mat_file) { RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Cannot create file stream for decompressed image"); return NULL; } zip_info.zalloc=AcquireZIPMemory; zip_info.zfree=RelinquishZIPMemory; zip_info.opaque = (voidpf) NULL; zip_status = inflateInit(&zip_info); if (zip_status != Z_OK) { RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "UnableToUncompressImage","`%s'",clone_info->filename); (void) fclose(mat_file); RelinquishUniqueFileResource(clone_info->filename); return NULL; } /* zip_info.next_out = 8*4;*/ zip_info.avail_in = 0; zip_info.total_out = 0; while(*Size>0 && !EOFBlob(orig)) { magick_size = ReadBlob(orig, (*Size < 16384) ? *Size : 16384, (unsigned char *) cache_block); zip_info.next_in = (Bytef *) cache_block; zip_info.avail_in = (uInt) magick_size; while(zip_info.avail_in>0) { zip_info.avail_out = 4096; zip_info.next_out = (Bytef *) decompress_block; zip_status = inflate(&zip_info,Z_NO_FLUSH); if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END)) break; extent=fwrite(decompress_block, 4096-zip_info.avail_out, 1, mat_file); (void) extent; TotalSize += 4096-zip_info.avail_out; if(zip_status == Z_STREAM_END) goto DblBreak; } if ((zip_status != Z_OK) && (zip_status != Z_STREAM_END)) break; *Size -= magick_size; } DblBreak: inflateEnd(&zip_info); (void)fclose(mat_file); RelinquishMagickMemory(cache_block); RelinquishMagickMemory(decompress_block); *Size = TotalSize; if((clone_info->file=fopen(clone_info->filename,"rb"))==NULL) goto UnlinkFile; if( (image2 = AcquireImage(clone_info))==NULL ) goto EraseFile; status = OpenBlob(clone_info,image2,ReadBinaryBlobMode,exception); if (status == MagickFalse) { DeleteImageFromList(&image2); EraseFile: fclose(clone_info->file); clone_info->file = NULL; UnlinkFile: RelinquishUniqueFileResource(clone_info->filename); return NULL; } return image2; } #endif static Image *ReadMATImageV4(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { typedef struct { unsigned char Type[4]; unsigned int nRows; unsigned int nCols; unsigned int imagf; unsigned int nameLen; } MAT4_HDR; long ldblk; EndianType endian; Image *rotate_image; MagickBooleanType status; MAT4_HDR HDR; QuantumInfo *quantum_info; QuantumFormatType format_type; register ssize_t i; ssize_t count, y; unsigned char *pixels; unsigned int depth; (void) SeekBlob(image,0,SEEK_SET); while (EOFBlob(image) != MagickFalse) { /* Object parser. */ ldblk=ReadBlobLSBLong(image); if (EOFBlob(image) != MagickFalse) break; if ((ldblk > 9999) || (ldblk < 0)) break; HDR.Type[3]=ldblk % 10; ldblk /= 10; /* T digit */ HDR.Type[2]=ldblk % 10; ldblk /= 10; /* P digit */ HDR.Type[1]=ldblk % 10; ldblk /= 10; /* O digit */ HDR.Type[0]=ldblk; /* M digit */ if (HDR.Type[3] != 0) break; /* Data format */ if (HDR.Type[2] != 0) break; /* Always 0 */ if (HDR.Type[0] == 0) { HDR.nRows=ReadBlobLSBLong(image); HDR.nCols=ReadBlobLSBLong(image); HDR.imagf=ReadBlobLSBLong(image); HDR.nameLen=ReadBlobLSBLong(image); endian=LSBEndian; } else { HDR.nRows=ReadBlobMSBLong(image); HDR.nCols=ReadBlobMSBLong(image); HDR.imagf=ReadBlobMSBLong(image); HDR.nameLen=ReadBlobMSBLong(image); endian=MSBEndian; } if ((HDR.imagf !=0) && (HDR.imagf !=1)) break; if (HDR.nameLen > 0xFFFF) break; for (i=0; i < (ssize_t) HDR.nameLen; i++) { int byte; /* Skip matrix name. */ byte=ReadBlobByte(image); if (byte == EOF) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } } image->columns=(size_t) HDR.nRows; image->rows=(size_t) HDR.nCols; SetImageColorspace(image,GRAYColorspace); if (image_info->ping != MagickFalse) { Swap(image->columns,image->rows); return(image); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) return((Image *) NULL); quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) return((Image *) NULL); switch(HDR.Type[1]) { case 0: format_type=FloatingPointQuantumFormat; depth=64; break; case 1: format_type=FloatingPointQuantumFormat; depth=32; break; case 2: format_type=UnsignedQuantumFormat; depth=16; break; case 3: format_type=SignedQuantumFormat; depth=16; break; case 4: format_type=UnsignedQuantumFormat; depth=8; break; default: format_type=UnsignedQuantumFormat; depth=8; break; } image->depth=depth; if (HDR.Type[0] != 0) SetQuantumEndian(image,quantum_info,MSBEndian); status=SetQuantumFormat(image,quantum_info,format_type); status=SetQuantumDepth(image,quantum_info,depth); status=SetQuantumEndian(image,quantum_info,endian); SetQuantumScale(quantum_info,1.0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; count=ReadBlob(image,depth/8*image->columns,(unsigned char *) pixels); if (count == -1) break; q=QueueAuthenticPixels(image,0,image->rows-y-1,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,pixels,exception); if ((HDR.Type[1] == 2) || (HDR.Type[1] == 3)) FixSignedValues(q,image->columns); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (HDR.imagf == 1) for (y=0; y < (ssize_t) image->rows; y++) { /* Read complex pixels. */ count=ReadBlob(image,depth/8*image->columns,(unsigned char *) pixels); if (count == -1) break; if (HDR.Type[1] == 0) InsertComplexDoubleRow((double *) pixels,y,image,0,0); else InsertComplexFloatRow((float *) pixels,y,image,0,0); } quantum_info=DestroyQuantumInfo(quantum_info); rotate_image=RotateImage(image,90.0,exception); if (rotate_image != (Image *) NULL) { image=DestroyImage(image); image=rotate_image; } if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d M A T L A B i m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadMATImage() reads an MAT X image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadMATImage method is: % % Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadMATImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: Specifies a pointer to a ImageInfo structure. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image, *image2=NULL, *rotated_image; PixelPacket *q; unsigned int status; MATHeader MATLAB_HDR; size_t size; size_t CellType; QuantumInfo *quantum_info; ImageInfo *clone_info; int i; ssize_t ldblk; unsigned char *BImgBuff = NULL; double MinVal, MaxVal; size_t Unknown6; unsigned z, z2; unsigned Frames; int logging; int sample_size; MagickOffsetType filepos=0x80; BlobInfo *blob; size_t one; unsigned int (*ReadBlobXXXLong)(Image *image); unsigned short (*ReadBlobXXXShort)(Image *image); void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data); void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data); assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter"); /* Open image file. */ quantum_info=(QuantumInfo *) NULL; image = AcquireImage(image_info); status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read MATLAB image. */ clone_info=(ImageInfo *) NULL; if(ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (strncmp(MATLAB_HDR.identific,"MATLAB",6) != 0) { image2=ReadMATImageV4(image_info,image,exception); if (image2 == NULL) goto MATLAB_KO; image=image2; goto END_OF_READING; } MATLAB_HDR.Version = ReadBlobLSBShort(image); if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c", MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]); if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2)) { ReadBlobXXXLong = ReadBlobLSBLong; ReadBlobXXXShort = ReadBlobLSBShort; ReadBlobDoublesXXX = ReadBlobDoublesLSB; ReadBlobFloatsXXX = ReadBlobFloatsLSB; image->endian = LSBEndian; } else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2)) { ReadBlobXXXLong = ReadBlobMSBLong; ReadBlobXXXShort = ReadBlobMSBShort; ReadBlobDoublesXXX = ReadBlobDoublesMSB; ReadBlobFloatsXXX = ReadBlobFloatsMSB; image->endian = MSBEndian; } else goto MATLAB_KO; /* unsupported endian */ if (strncmp(MATLAB_HDR.identific, "MATLAB", 6)) { MATLAB_KO: clone_info=DestroyImageInfo(clone_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } filepos = TellBlob(image); while(!EOFBlob(image)) /* object parser loop */ { Frames = 1; (void) SeekBlob(image,filepos,SEEK_SET); /* printf("pos=%X\n",TellBlob(image)); */ MATLAB_HDR.DataType = ReadBlobXXXLong(image); if(EOFBlob(image)) break; MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image); if(EOFBlob(image)) break; if((MagickSizeType) (MATLAB_HDR.ObjectSize+filepos) > GetBlobSize(image)) goto MATLAB_KO; filepos += MATLAB_HDR.ObjectSize + 4 + 4; clone_info=CloneImageInfo(image_info); image2 = image; #if defined(MAGICKCORE_ZLIB_DELEGATE) if(MATLAB_HDR.DataType == miCOMPRESSED) { image2 = decompress_block(image,&MATLAB_HDR.ObjectSize,clone_info,exception); if(image2==NULL) continue; MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */ } #endif if(MATLAB_HDR.DataType!=miMATRIX) continue; /* skip another objects. */ MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2); MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2); MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2); MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF; MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF; MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2); if(image!=image2) MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */ MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2); MATLAB_HDR.SizeX = ReadBlobXXXLong(image2); MATLAB_HDR.SizeY = ReadBlobXXXLong(image2); switch(MATLAB_HDR.DimFlag) { case 8: z2=z=1; break; /* 2D matrix*/ case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/ Unknown6 = ReadBlobXXXLong(image2); (void) Unknown6; if(z!=3) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); break; case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */ if(z!=3 && z!=1) ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); Frames = ReadBlobXXXLong(image2); if (Frames == 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); break; default: ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); } MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2); MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2); if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), "MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass); if (MATLAB_HDR.StructureClass != mxCHAR_CLASS && MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */ MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */ MATLAB_HDR.StructureClass != mxINT8_CLASS && MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */ MATLAB_HDR.StructureClass != mxINT16_CLASS && MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */ MATLAB_HDR.StructureClass != mxINT32_CLASS && MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */ MATLAB_HDR.StructureClass != mxINT64_CLASS && MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */ ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix"); switch (MATLAB_HDR.NameFlag) { case 0: size = ReadBlobXXXLong(image2); /* Object name string size */ size = 4 * (ssize_t) ((size + 3 + 1) / 4); (void) SeekBlob(image2, size, SEEK_CUR); break; case 1: case 2: case 3: case 4: (void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */ break; default: goto MATLAB_KO; } CellType = ReadBlobXXXLong(image2); /* Additional object type */ if (logging) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "MATLAB_HDR.CellType: %.20g",(double) CellType); (void) ReadBlob(image2, 4, (unsigned char *) &size); /* data size */ NEXT_FRAME: switch (CellType) { case miINT8: case miUINT8: sample_size = 8; if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL) image->depth = 1; else image->depth = 8; /* Byte type cell */ ldblk = (ssize_t) MATLAB_HDR.SizeX; break; case miINT16: case miUINT16: sample_size = 16; image->depth = 16; /* Word type cell */ ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX); break; case miINT32: case miUINT32: sample_size = 32; image->depth = 32; /* Dword type cell */ ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX); break; case miINT64: case miUINT64: sample_size = 64; image->depth = 64; /* Qword type cell */ ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX); break; case miSINGLE: sample_size = 32; image->depth = 32; /* double type cell */ (void) SetImageOption(clone_info,"quantum:format","floating-point"); if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* complex float type cell */ } ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX); break; case miDOUBLE: sample_size = 64; image->depth = 64; /* double type cell */ (void) SetImageOption(clone_info,"quantum:format","floating-point"); DisableMSCWarning(4127) if (sizeof(double) != 8) RestoreMSCWarning ThrowReaderException(CoderError, "IncompatibleSizeOfDouble"); if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* complex double type cell */ } ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX); break; default: if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if (clone_info) clone_info=DestroyImageInfo(clone_info); ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix"); } (void) sample_size; image->columns = MATLAB_HDR.SizeX; image->rows = MATLAB_HDR.SizeY; one=1; image->colors = one << image->depth; if (image->columns == 0 || image->rows == 0) goto MATLAB_KO; if((unsigned long)ldblk*MATLAB_HDR.SizeY > MATLAB_HDR.ObjectSize) goto MATLAB_KO; /* Image is gray when no complex flag is set and 2D Matrix */ if ((MATLAB_HDR.DimFlag == 8) && ((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0)) { SetImageColorspace(image,GRAYColorspace); image->type=GrayscaleType; } /* If ping is true, then only set image size and colors without reading any image data. */ if (image_info->ping) { size_t temp = image->columns; image->columns = image->rows; image->rows = temp; goto done_reading; /* !!!!!! BAD !!!! */ } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } quantum_info=AcquireQuantumInfo(clone_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* ----- Load raster data ----- */ BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */ if (BImgBuff == NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(BImgBuff,0,ldblk*sizeof(double)); MinVal = 0; MaxVal = 0; if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */ { CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum); } /* Main loop for reading all scanlines */ if(z==1) z=0; /* read grey scanlines */ /* else read color scanlines */ do { for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception); if (q == (PixelPacket *) NULL) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto done_reading; /* Skip image rotation, when cannot set image pixels */ } if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto ExitLoop; } if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL)) { FixLogical((unsigned char *)BImgBuff,ldblk); if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0) { ImportQuantumPixelsFailed: if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1)); break; } } else { if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0) goto ImportQuantumPixelsFailed; if (z<=1 && /* fix only during a last pass z==0 || z==1 */ (CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64)) FixSignedValues(q,MATLAB_HDR.SizeX); } if (!SyncAuthenticPixels(image,exception)) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto ExitLoop; } } } while(z-- >= 2); ExitLoop: /* Read complex part of numbers here */ if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* Find Min and Max Values for complex parts of floats */ CellType = ReadBlobXXXLong(image2); /* Additional object type */ i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/ if (CellType==miDOUBLE || CellType==miSINGLE) { CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal); } if (CellType==miDOUBLE) for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff); InsertComplexDoubleRow((double *)BImgBuff, i, image, MinVal, MaxVal); } if (CellType==miSINGLE) for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff); InsertComplexFloatRow((float *)BImgBuff, i, image, MinVal, MaxVal); } } /* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */ if ((MATLAB_HDR.DimFlag == 8) && ((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0)) image->type=GrayscaleType; if (image->depth == 1) image->type=BilevelType; if(image2==image) image2 = NULL; /* Remove shadow copy to an image before rotation. */ /* Rotate image. */ rotated_image = RotateImage(image, 90.0, exception); if (rotated_image != (Image *) NULL) { /* Remove page offsets added by RotateImage */ rotated_image->page.x=0; rotated_image->page.y=0; blob = rotated_image->blob; rotated_image->blob = image->blob; rotated_image->colors = image->colors; image->blob = blob; AppendImageToList(&image,rotated_image); DeleteImageFromList(&image); } done_reading: if(image2!=NULL) if(image2!=image) { DeleteImageFromList(&image2); if(clone_info) { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } } } /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (image->next == (Image *) NULL) break; image=SyncNextImageInList(image); image->columns=image->rows=0; image->colors=0; /* row scan buffer is no longer needed */ RelinquishMagickMemory(BImgBuff); BImgBuff = NULL; if(--Frames>0) { z = z2; if(image2==NULL) image2 = image; goto NEXT_FRAME; } if(image2!=NULL) if(image2!=image) /* Does shadow temporary decompressed image exist? */ { /* CloseBlob(image2); */ DeleteImageFromList(&image2); if(clone_info) { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) unlink(clone_info->filename); } } } } RelinquishMagickMemory(BImgBuff); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); END_OF_READING: if (clone_info) clone_info=DestroyImageInfo(clone_info); CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=scene++; } if(clone_info != NULL) /* cleanup garbage file from compression */ { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } DestroyImageInfo(clone_info); clone_info = NULL; } if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return"); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if(image==NULL) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); return (image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r M A T I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method RegisterMATImage adds attributes for the MAT image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterMATImage method is: % % size_t RegisterMATImage(void) % */ ModuleExport size_t RegisterMATImage(void) { MagickInfo *entry; entry=SetMagickInfo("MAT"); entry->decoder=(DecodeImageHandler *) ReadMATImage; entry->encoder=(EncodeImageHandler *) WriteMATImage; entry->blob_support=MagickFalse; entry->seekable_stream=MagickTrue; entry->description=AcquireString("MATLAB level 5 image format"); entry->module=AcquireString("MAT"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r M A T I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Method UnregisterMATImage removes format registrations made by the % MAT module from the list of supported formats. % % The format of the UnregisterMATImage method is: % % UnregisterMATImage(void) % */ ModuleExport void UnregisterMATImage(void) { (void) UnregisterMagickInfo("MAT"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M A T L A B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Function WriteMATImage writes an Matlab matrix to a file. % % The format of the WriteMATImage method is: % % unsigned int WriteMATImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o status: Function WriteMATImage return True if the image is written. % False is returned is there is a memory shortage or if the image file % fails to write. % % o image_info: Specifies a pointer to a ImageInfo structure. % % o image: A pointer to an Image structure. % */ static MagickBooleanType WriteMATImage(const ImageInfo *image_info,Image *image) { char MATLAB_HDR[0x80]; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType scene; struct tm local_time; time_t current_time; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"enter MAT"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(MagickFalse); image->depth=8; current_time=time((time_t *) NULL); #if defined(MAGICKCORE_HAVE_LOCALTIME_R) (void) localtime_r(&current_time,&local_time); #else (void) memcpy(&local_time,localtime(&current_time),sizeof(local_time)); #endif (void) memset(MATLAB_HDR,' ',MagickMin(sizeof(MATLAB_HDR),124)); FormatLocaleString(MATLAB_HDR,sizeof(MATLAB_HDR), "MATLAB 5.0 MAT-file, Platform: %s, Created on: %s %s %2d %2d:%2d:%2d %d", OsDesc,DayOfWTab[local_time.tm_wday],MonthsTab[local_time.tm_mon], local_time.tm_mday,local_time.tm_hour,local_time.tm_min, local_time.tm_sec,local_time.tm_year+1900); MATLAB_HDR[0x7C]=0; MATLAB_HDR[0x7D]=1; MATLAB_HDR[0x7E]='I'; MATLAB_HDR[0x7F]='M'; (void) WriteBlob(image,sizeof(MATLAB_HDR),(unsigned char *) MATLAB_HDR); scene=0; do { char padding; MagickBooleanType is_gray; QuantumInfo *quantum_info; size_t data_size; unsigned char *pixels; unsigned int z; (void) TransformImageColorspace(image,sRGBColorspace); is_gray=SetImageGray(image,&image->exception); z=(is_gray != MagickFalse) ? 0 : 3; /* Store MAT header. */ data_size=image->rows*image->columns; if (is_gray == MagickFalse) data_size*=3; padding=((unsigned char)(data_size-1) & 0x7) ^ 0x7; (void) WriteBlobLSBLong(image,miMATRIX); (void) WriteBlobLSBLong(image,(unsigned int) data_size+padding+ ((is_gray != MagickFalse) ? 48 : 56)); (void) WriteBlobLSBLong(image,0x6); /* 0x88 */ (void) WriteBlobLSBLong(image,0x8); /* 0x8C */ (void) WriteBlobLSBLong(image,0x6); /* 0x90 */ (void) WriteBlobLSBLong(image,0); (void) WriteBlobLSBLong(image,0x5); /* 0x98 */ (void) WriteBlobLSBLong(image,(is_gray != MagickFalse) ? 0x8 : 0xC); /* 0x9C - DimFlag */ (void) WriteBlobLSBLong(image,(unsigned int) image->rows); /* x: 0xA0 */ (void) WriteBlobLSBLong(image,(unsigned int) image->columns); /* y: 0xA4 */ if (is_gray == MagickFalse) { (void) WriteBlobLSBLong(image,3); /* z: 0xA8 */ (void) WriteBlobLSBLong(image,0); } (void) WriteBlobLSBShort(image,1); /* 0xB0 */ (void) WriteBlobLSBShort(image,1); /* 0xB2 */ (void) WriteBlobLSBLong(image,'M'); /* 0xB4 */ (void) WriteBlobLSBLong(image,0x2); /* 0xB8 */ (void) WriteBlobLSBLong(image,(unsigned int) data_size); /* 0xBC */ /* Store image data. */ exception=(&image->exception); quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixels=GetQuantumPixels(quantum_info); do { const PixelPacket *p; ssize_t y; for (y=0; y < (ssize_t)image->columns; y++) { p=GetVirtualPixels(image,y,0,1,image->rows,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info, z2qtype[z],pixels,exception); (void) WriteBlob(image,image->rows,pixels); } if (!SyncAuthenticPixels(image,exception)) break; } while (z-- >= 2); while (padding-- > 0) (void) WriteBlobByte(image,0); quantum_info=DestroyQuantumInfo(quantum_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2742_0
crossvul-cpp_data_good_3156_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions interface with the sockets layer to implement the * SCTP Extensions for the Sockets API. * * Note that the descriptions from the specification are USER level * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Narasimha Budihal <narsi@refcode.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <samudrala@us.ibm.com> * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Anup Pemmaiah <pemmaiah@cc.usu.edu> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/hash.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/compat.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/busy_poll.h> #include <linux/socket.h> /* for sa_family_t */ #include <linux/export.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); static void sctp_wait_for_close(struct sock *sk, long timeo); static void sctp_destruct_sock(struct sock *sk); static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len); static int sctp_bindx_add(struct sock *, struct sockaddr *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk); static int sctp_do_bind(struct sock *, union sctp_addr *, int); static int sctp_autobind(struct sock *sk); static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static int sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { sctp_memory_pressure = 1; } /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { int amt; if (asoc->ep->sndbuf_policy) amt = asoc->sndbuf_used; else amt = sk_wmem_alloc_get(asoc->base.sk); if (amt >= asoc->base.sk->sk_sndbuf) { if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) amt = 0; else { amt = sk_stream_wspace(asoc->base.sk); if (amt < 0) amt = 0; } } else { amt = asoc->base.sk->sk_sndbuf - amt; } return amt; } /* Increment the used sndbuf space count of the corresponding association by * the size of the outgoing data chunk. * Also, set the skb destructor for sndbuf accounting later. * * Since it is always 1-1 between chunk and skb, and also a new skb is always * allocated for chunk bundling in sctp_packet_transmit(), we can use the * destructor in the data chunk skb for the purpose of the sndbuf space * tracking. */ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; /* The sndbuf space is tracked per association. */ sctp_association_hold(asoc); skb_set_owner_w(chunk->skb, sk); chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ skb_shinfo(chunk->skb)->destructor_arg = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sk->sk_wmem_queued += chunk->skb->truesize; sk_mem_charge(sk, chunk->skb->truesize); } /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) { struct sctp_af *af; /* Verify basic sockaddr. */ af = sctp_sockaddr_af(sctp_sk(sk), addr, len); if (!af) return -EINVAL; /* Is this a valid SCTP address? */ if (!af->addr_valid(addr, sctp_sk(sk), NULL)) return -EINVAL; if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) return -EINVAL; return 0; } /* Look up the association by its id. If this is not a UDP-style * socket, the ID field is always ignored. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) { struct sctp_association *asoc = NULL; /* If this is not a UDP-style socket, assoc id should be ignored. */ if (!sctp_style(sk, UDP)) { /* Return NULL if the socket state is not ESTABLISHED. It * could be a TCP-style listening socket or a socket which * hasn't yet called connect() to establish an association. */ if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING)) return NULL; /* Get the first and the only association from the list. */ if (!list_empty(&sctp_sk(sk)->ep->asocs)) asoc = list_entry(sctp_sk(sk)->ep->asocs.next, struct sctp_association, asocs); return asoc; } /* Otherwise this is a UDP-style socket. */ if (!id || (id == (sctp_assoc_t)-1)) return NULL; spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); spin_unlock_bh(&sctp_assocs_id_lock); if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) return NULL; return asoc; } /* Look up the transport from an address and an assoc id. If both address and * id are specified, the associations matching the address and the id should be * the same. */ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, struct sockaddr_storage *addr, sctp_assoc_t id) { struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; struct sctp_af *af = sctp_get_af_specific(addr->ss_family); union sctp_addr *laddr = (union sctp_addr *)addr; struct sctp_transport *transport; if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, laddr, &transport); if (!addr_asoc) return NULL; id_asoc = sctp_id2assoc(sk, id); if (id_asoc && (id_asoc != addr_asoc)) return NULL; sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)addr); return transport; } /* API 3.1.2 bind() - UDP Style Syntax * The syntax of bind() is, * * ret = bind(int sd, struct sockaddr *addr, int addrlen); * * sd - the socket descriptor returned by socket(). * addr - the address structure (struct sockaddr_in or struct * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0; lock_sock(sk); pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Disallow binding twice. */ if (!sctp_sk(sk)->ep->base.bind_addr.port) retval = sctp_do_bind(sk, (union sctp_addr *)addr, addr_len); else retval = -EINVAL; release_sock(sk); return retval; } static long sctp_get_port_local(struct sock *, union sctp_addr *); /* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len) { struct sctp_af *af; /* Check minimum size. */ if (len < sizeof (struct sockaddr)) return NULL; /* V4 mapped address are really of AF_INET family */ if (addr->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { if (!opt->pf->af_supported(AF_INET, opt)) return NULL; } else { /* Does this PF support this AF? */ if (!opt->pf->af_supported(addr->sa.sa_family, opt)) return NULL; } /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); if (len < af->sockaddr_len) return NULL; return af; } /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct sctp_bind_addr *bp = &ep->base.bind_addr; struct sctp_af *af; unsigned short snum; int ret = 0; /* Common sockaddr verification. */ af = sctp_sockaddr_af(sp, addr, len); if (!af) { pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", __func__, sk, addr, len); return -EINVAL; } snum = ntohs(addr->v4.sin_port); pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", __func__, sk, &addr->sa, bp->port, snum, len); /* PF specific bind() address verification. */ if (!sp->pf->bind_verify(sp, addr)) return -EADDRNOTAVAIL; /* We must either be unbound, or bind to the same port. * It's OK to allow 0 ports if we are already bound. * We'll just inhert an already bound port in this case */ if (bp->port) { if (!snum) snum = bp->port; else if (snum != bp->port) { pr_debug("%s: new port %d doesn't match existing port " "%d\n", __func__, snum, bp->port); return -EINVAL; } } if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; /* See if the address matches any of the addresses we may have * already bound before checking against other endpoints. */ if (sctp_bind_addr_match(bp, addr, sp)) return -EINVAL; /* Make sure we are allowed to bind here. * The function sctp_get_port_local() does duplicate address * detection. */ addr->v4.sin_port = htons(snum); if ((ret = sctp_get_port_local(sk, addr))) { return -EADDRINUSE; } /* Refresh ephemeral port. */ if (!bp->port) bp->port = inet_sk(sk)->inet_num; /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. */ ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, SCTP_ADDR_SRC, GFP_ATOMIC); /* Copy back into socket for getsockname() use. */ if (!ret) { inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); sp->pf->to_sk_saddr(addr, sk); } return ret; } /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks * * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged * at any one time. If a sender, after sending an ASCONF chunk, decides * it needs to transfer another ASCONF Chunk, it MUST wait until the * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a * subsequent ASCONF. Note this restriction binds each side, so at any * time two ASCONF may be in-transit on any given association (one sent * from each endpoint). */ static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { struct net *net = sock_net(asoc->base.sk); int retval = 0; /* If there is an outstanding ASCONF chunk, queue it for later * transmission. */ if (asoc->addip_last_asconf) { list_add_tail(&chunk->list, &asoc->addip_chunk_list); goto out; } /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(chunk); retval = sctp_primitive_ASCONF(net, asoc, chunk); if (retval) sctp_chunk_free(chunk); else asoc->addip_last_asconf = chunk; out: return retval; } /* Add a list of addresses as bind addresses to local endpoint or * association. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_do_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were added will be removed. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) { int cnt; int retval = 0; void *addr_buf; struct sockaddr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* The list may contain either IPv4 or IPv6 address; * determine the address length for walking thru the list. */ sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); if (!af) { retval = -EINVAL; goto err_bindx_add; } retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, af->sockaddr_len); addr_buf += af->sockaddr_len; err_bindx_add: if (retval < 0) { /* Failed. Cleanup the ones that have been added */ if (cnt > 0) sctp_bindx_rem(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Add IP address parameters to all the peers of the * associations that are part of the endpoint indicating that a list of local * addresses are added to the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_add_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; struct sctp_sockaddr_entry *laddr; union sctp_addr *addr; union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; struct list_head *p; int i; int retval = 0; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * in the bind address list of the association. If so, * do not send the asconf chunk to its peer, but continue with * other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (sctp_assoc_lookup_laddr(asoc, addr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Use the first valid address in bind addr list of * association as Address Parameter of ASCONF CHUNK. */ bp = &asoc->base.bind_addr; p = bp->address_list.next; laddr = list_entry(p, struct sctp_sockaddr_entry, list); chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, addrcnt, SCTP_PARAM_ADD_IP); if (!chunk) { retval = -ENOMEM; goto out; } /* Add the new addresses to the bind address list with * use_as_src set to 0. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); memcpy(&saveaddr, addr, af->sockaddr_len); retval = sctp_add_bind_addr(bp, &saveaddr, sizeof(saveaddr), SCTP_ADDR_NEW, GFP_ATOMIC); addr_buf += af->sockaddr_len; } if (asoc->src_out_of_asoc_ok) { struct sctp_transport *trans; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Clear the source and route cache */ dst_release(trans->dst); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; trans->rto = asoc->rto_initial; sctp_max_rto(asoc, trans); trans->rtt = trans->srtt = trans->rttvar = 0; sctp_transport_route(trans, NULL, sctp_sk(asoc->base.sk)); } } retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* Remove a list of addresses from bind addresses list. Do not remove the * last address. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_del_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were removed will be added back. * * At least one address has to be left; if only one address is * available, the operation will return -EBUSY. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; int cnt; struct sctp_bind_addr *bp = &ep->base.bind_addr; int retval = 0; void *addr_buf; union sctp_addr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* If the bind address list is empty or if there is only one * bind address, there is nothing more to be removed (we need * at least one address here). */ if (list_empty(&bp->address_list) || (sctp_list_single_entry(&bp->address_list))) { retval = -EBUSY; goto err_bindx_rem; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); if (!af) { retval = -EINVAL; goto err_bindx_rem; } if (!af->addr_valid(sa_addr, sp, NULL)) { retval = -EADDRNOTAVAIL; goto err_bindx_rem; } if (sa_addr->v4.sin_port && sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; } if (!sa_addr->v4.sin_port) sa_addr->v4.sin_port = htons(bp->port); /* FIXME - There is probably a need to check if sk->sk_saddr and * sk->sk_rcv_addr are currently set to one of the addresses to * be removed. This is something which needs to be looked into * when we are fixing the outstanding issues with multi-homing * socket routing and failover schemes. Refer to comments in * sctp_do_bind(). -daisy */ retval = sctp_del_bind_addr(bp, sa_addr); addr_buf += af->sockaddr_len; err_bindx_rem: if (retval < 0) { /* Failed. Add the ones that has been removed back */ if (cnt > 0) sctp_bindx_add(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Delete IP address parameters to all the peers of * the associations that are part of the endpoint indicating that a list of * local addresses are removed from the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_transport *transport; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; union sctp_addr *laddr; void *addr_buf; struct sctp_af *af; struct sctp_sockaddr_entry *saddr; int i; int retval = 0; int stored = 0; chunk = NULL; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * not present in the bind address list of the association. * If so, do not send the asconf chunk to its peer, but * continue with other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (!sctp_assoc_lookup_laddr(asoc, laddr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Find one address in the association's bind address list * that is not in the packed array of addresses. This is to * make sure that we do not delete all the addresses in the * association. */ bp = &asoc->base.bind_addr; laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, addrcnt, sp); if ((laddr == NULL) && (addrcnt == 1)) { if (asoc->asconf_addr_del_pending) continue; asoc->asconf_addr_del_pending = kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); if (asoc->asconf_addr_del_pending == NULL) { retval = -ENOMEM; goto out; } asoc->asconf_addr_del_pending->sa.sa_family = addrs->sa_family; asoc->asconf_addr_del_pending->v4.sin_port = htons(bp->port); if (addrs->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addrs; asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; } else if (addrs->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", __func__, asoc, &asoc->asconf_addr_del_pending->sa, asoc->asconf_addr_del_pending); asoc->src_out_of_asoc_ok = 1; stored = 1; goto skip_mkasconf; } if (laddr == NULL) return -EINVAL; /* We do not need RCU protection throughout this loop * because this is done under a socket lock from the * setsockopt call. */ chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, SCTP_PARAM_DEL_IP); if (!chunk) { retval = -ENOMEM; goto out; } skip_mkasconf: /* Reset use_as_src flag for the addresses in the bind address * list that are to be deleted. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, laddr)) saddr->state = SCTP_ADDR_DEL; } addr_buf += af->sockaddr_len; } /* Update the route and saddr entries for all the transports * as some of the addresses in the bind address list are * about to be deleted and cannot be used as source addresses. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } if (stored) /* We don't need to transmit ASCONF */ continue; retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) { struct sock *sk = sctp_opt2sk(sp); union sctp_addr *addr; struct sctp_af *af; /* It is safe to write port space in caller. */ addr = &addrw->a; addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); af = sctp_get_af_specific(addr->sa.sa_family); if (!af) return -EINVAL; if (sctp_verify_addr(sk, addr, af->sockaddr_len)) return -EINVAL; if (addrw->state == SCTP_ADDR_NEW) return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); else return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); } /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() * * API 8.1 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, * int flags); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distinguish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns * -1, and sets errno to the appropriate error code. * * For SCTP, the port given in each socket address must be the same, or * sctp_bindx() will fail, setting errno to EINVAL. * * The flags parameter is formed from the bitwise OR of zero or more of * the following currently defined flags: * * SCTP_BINDX_ADD_ADDR * * SCTP_BINDX_REM_ADDR * * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given * addresses from the association. The two flags are mutually exclusive; * if both are given, sctp_bindx() will fail with EINVAL. A caller may * not remove all addresses from an association; sctp_bindx() will * reject such an attempt with EINVAL. * * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate * additional addresses with an endpoint after calling bind(). Or use * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening * socket is associated with so that no new association accepted will be * associated with those addresses. If the endpoint supports dynamic * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a * endpoint to send the appropriate message to the peer to change the * peers address lists. * * Adding and removing addresses from a connected association is * optional functionality. Implementations that do not support this * functionality should return EOPNOTSUPP. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() * from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * op Operation to perform (add or remove, see the flags of * sctp_bindx) * * Returns 0 if ok, <0 errno code on error. */ static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, int op) { struct sockaddr *kaddrs; int err; int addrcnt = 0; int walk_size = 0; struct sockaddr *sa_addr; void *addr_buf; struct sctp_af *af; pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", __func__, sk, addrs, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { kfree(kaddrs); return -EFAULT; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { kfree(kaddrs); return -EINVAL; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { kfree(kaddrs); return -EINVAL; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* Do the work. */ switch (op) { case SCTP_BINDX_ADD_ADDR: err = sctp_bindx_add(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); break; case SCTP_BINDX_REM_ADDR: err = sctp_bindx_rem(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); break; default: err = -EINVAL; break; } out: kfree(kaddrs); return err; } /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) * * Common routine for handling connect() and sctp_connectx(). * Connect will come in with just a single address. */ static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, int addrs_size, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; sctp_scope_t scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { struct sctp_af *af; if (walk_size + sizeof(sa_family_t) > addrs_size) { err = -EINVAL; goto out_free; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } port = ntohs(sa_addr->v4.sin_port); /* Save current address so we can work with it */ memcpy(&to, sa_addr, af->sockaddr_len); err = sctp_verify_addr(sk, &to, af->sockaddr_len); if (err) goto out_free; /* Make sure the destination port is correctly set * in all addresses. */ if (asoc && asoc->peer.port && asoc->peer.port != port) { err = -EINVAL; goto out_free; } /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(&to); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ if (assoc_id) { err = sctp_assoc_set_id(asoc, GFP_KERNEL); if (err < 0) goto out_free; } err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); sp->pf->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). */ if (sk->sk_socket->file) f_flags = sk->sk_socket->file->f_flags; timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); if (assoc_id) *assoc_id = asoc->assoc_id; err = sctp_wait_for_connect(asoc, &timeo); /* Note: the asoc may be freed after the return of * sctp_wait_for_connect. */ /* Don't free association on exit. */ asoc = NULL; out_free: pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", __func__, asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop * if it wasn't hashed so we're safe */ sctp_association_free(asoc); } return err; } /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() * * API 8.9 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, * sctp_assoc_t *asoc); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distengish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_connectx() returns 0. It also sets the assoc_id to * the association id of the new association. On failure, sctp_connectx() * returns -1, and sets errno to the appropriate error code. The assoc_id * is not touched by the kernel. * * For SCTP, the port given in each socket address must be the same, or * sctp_connectx() will fail, setting errno to EINVAL. * * An application can use sctp_connectx to initiate an association with * an endpoint that is multi-homed. Much like sctp_bindx() this call * allows a caller to specify multiple addresses at which a peer can be * reached. The way the SCTP stack uses the list of addresses to set up * the association is implementation dependent. This function only * specifies that the stack will try to make use of all the addresses in * the list when needed. * * Note that the list of addresses passed in is only used for setting up * the association. It does not necessarily equal the set of addresses * the peer uses for the resulting association. If the caller wants to * find out the set of peer addresses, it must use sctp_getpaddrs() to * retrieve them after the association has been set up. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_connectx(). This is used for tunneling * the sctp_connectx() request through sctp_setsockopt() from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * * Returns >=0 if ok, <0 errno code on error. */ static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) { struct sockaddr *kaddrs; gfp_t gfp = GFP_KERNEL; int err = 0; pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ if (sk->sk_socket->file) gfp = GFP_USER | __GFP_NOWARN; kaddrs = kmalloc(addrs_size, gfp); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { err = -EFAULT; } else { err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); } kfree(kaddrs); return err; } /* * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ static int sctp_setsockopt_connectx_old(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } /* * New interface for the API. The since the API is done with a socket * option, to make it simple we feed back the association id is as a return * indication to the call. Error is always negative and association id is * always positive. */ static int sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); if (err) return err; else return assoc_id; } /* * New (hopefully final) interface for the API. * We use the sctp_getaddrs_old structure so that use-space library * can avoid any unnecessary allocations. The only different part * is that we store the actual length of the address buffer into the * addrs_num structure member. That way we can re-use the existing * code. */ #ifdef CONFIG_COMPAT struct compat_sctp_getaddrs_old { sctp_assoc_t assoc_id; s32 addr_num; compat_uptr_t addrs; /* struct sockaddr * */ }; #endif static int sctp_getsockopt_connectx3(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; int err = 0; #ifdef CONFIG_COMPAT if (in_compat_syscall()) { struct compat_sctp_getaddrs_old param32; if (len < sizeof(param32)) return -EINVAL; if (copy_from_user(&param32, optval, sizeof(param32))) return -EFAULT; param.assoc_id = param32.assoc_id; param.addr_num = param32.addr_num; param.addrs = compat_ptr(param32.addrs); } else #endif { if (len < sizeof(param)) return -EINVAL; if (copy_from_user(&param, optval, sizeof(param))) return -EFAULT; } err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) param.addrs, param.addr_num, &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; if (put_user(sizeof(assoc_id), optlen)) return -EFAULT; } return err; } /* API 3.1.4 close() - UDP Style Syntax * Applications use close() to perform graceful shutdown (as described in * Section 10.1 of [SCTP]) on ALL the associations currently represented * by a UDP-style socket. * * The syntax is * * ret = close(int sd); * * sd - the socket descriptor of the associations to be closed. * * To gracefully shutdown a specific association represented by the * UDP-style socket, an application should use the sendmsg() call, * passing no user data, but including the appropriate flag in the * ancillary data (see Section xxxx). * * If sd in the close() call is a branched-off socket representing only * one association, the shutdown is performed on that association only. * * 4.1.6 close() - TCP Style Syntax * * Applications use close() to gracefully close down an association. * * The syntax is: * * int close(int sd); * * sd - the socket descriptor of the association to be closed. * * After an application calls close() on a socket descriptor, no further * socket operations will succeed on that descriptor. * * API 7.1.4 SO_LINGER * * An application using the TCP-style socket can use this option to * perform the SCTP ABORT primitive. The linger option structure is: * * struct linger { * int l_onoff; // option on/off * int l_linger; // linger time * }; * * To enable the option, set l_onoff to 1. If the l_linger value is set * to 0, calling close() is the same as the ABORT primitive. If the * value is set to a negative value, the setsockopt() call will return * an error. If the value is set to a positive value linger_time, the * close() can be blocked for at most linger_time ms. If the graceful * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ static void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; unsigned int data_was_unread; pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING; ep = sctp_sk(sk)->ep; /* Clean up any skbs sitting on the receive queue. */ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); /* Walk all associations on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); if (sctp_style(sk, TCP)) { /* A closed association can still be in the list if * it belongs to a TCP-style listening socket that is * not yet accepted. If so, free it. If not, send an * ABORT or SHUTDOWN based on the linger options. */ if (sctp_state(asoc, CLOSED)) { sctp_association_free(asoc); continue; } } if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || !skb_queue_empty(&asoc->ulpq.reasm) || (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { struct sctp_chunk *chunk; chunk = sctp_make_abort_user(asoc, NULL, 0); sctp_primitive_ABORT(net, asoc, chunk); } else sctp_primitive_SHUTDOWN(net, asoc, NULL); } /* On a TCP-style socket, block for at most linger_time if set. */ if (sctp_style(sk, TCP) && timeout) sctp_wait_for_close(sk, timeout); /* This will run the backlog queue. */ release_sock(sk); /* Supposedly, no process has access to the socket, but * the net layers still may. * Also, sctp_destroy_sock() needs to be called with addr_wq_lock * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); bh_lock_sock(sk); /* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. */ sock_hold(sk); sk_common_release(sk); bh_unlock_sock(sk); spin_unlock_bh(&net->sctp.addr_wq_lock); sock_put(sk); SCTP_DBG_OBJCNT_DEC(sock); } /* Handle EPIPE error. */ static int sctp_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } /* API 3.1.3 sendmsg() - UDP Style Syntax * * An application uses sendmsg() and recvmsg() calls to transmit data to * and receive data from its peer. * * ssize_t sendmsg(int socket, const struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. * * Note: This function could use a rewrite especially when explicit * connect support comes in. */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *new_asoc = NULL, *asoc = NULL; struct sctp_transport *transport, *chunk_tp; struct sctp_chunk *chunk; union sctp_addr to; struct sockaddr *msg_name = NULL; struct sctp_sndrcvinfo default_sinfo; struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; sctp_scope_t scope; bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; long timeo; int err; err = 0; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, msg, msg_len, ep); /* We cannot send a message over a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { err = -EPIPE; goto out_nounlock; } /* Parse out the SCTP CMSGs. */ err = sctp_msghdr_parse(msg, &cmsgs); if (err) { pr_debug("%s: msghdr parse err:%x\n", __func__, err); goto out_nounlock; } /* Fetch the destination address for this packet. This * address only selects the association--it is not necessarily * the address we will send to. * For a peeled-off socket, msg_name is ignored. */ if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { int msg_namelen = msg->msg_namelen; err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, msg_namelen); if (err) return err; if (msg_namelen > sizeof(to)) msg_namelen = sizeof(to); memcpy(&to, msg->msg_name, msg_namelen); msg_name = msg->msg_name; } sinit = cmsgs.init; if (cmsgs.sinfo != NULL) { memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; sinfo = &default_sinfo; fill_sinfo_ttl = true; } else { sinfo = cmsgs.srinfo; } /* Did the user specify SNDINFO/SNDRCVINFO? */ if (sinfo) { sinfo_flags = sinfo->sinfo_flags; associd = sinfo->sinfo_assoc_id; } pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, msg_len, sinfo_flags); /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_EOF is set, no data can be sent. Disallow sending zero * length messages when SCTP_EOF|SCTP_ABORT is not set. * If SCTP_ABORT is set, the message length could be non zero with * the msg_iov set to the user abort reason. */ if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_ADDR_OVER is set, there must be an address * specified in msg_name. */ if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { err = -EINVAL; goto out_nounlock; } transport = NULL; pr_debug("%s: about to look up association\n", __func__); lock_sock(sk); /* If a msg_name has been specified, assume this is to be used. */ if (msg_name) { /* Look for a matching association on the endpoint. */ asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); /* If we could not find a matching association on the * endpoint, make sure that it is not a TCP-style * socket that already has an association or there is * no peeled-off association on another socket. */ if (!asoc && ((sctp_style(sk, TCP) && (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING))) || sctp_endpoint_is_peeled_off(ep, &to))) { err = -EADDRNOTAVAIL; goto out_unlock; } } else { asoc = sctp_id2assoc(sk, associd); if (!asoc) { err = -EPIPE; goto out_unlock; } } if (asoc) { pr_debug("%s: just looked up association:%p\n", __func__, asoc); /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED * socket that has an association in CLOSED state. This can * happen when an accepted socket has an association that is * already CLOSED. */ if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { err = -EPIPE; goto out_unlock; } if (sinfo_flags & SCTP_EOF) { pr_debug("%s: shutting down association:%p\n", __func__, asoc); sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { chunk = sctp_make_abort_user(asoc, msg, msg_len); if (!chunk) { err = -ENOMEM; goto out_unlock; } pr_debug("%s: aborting association:%p\n", __func__, asoc); sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; } } /* Do we need to create the association? */ if (!asoc) { pr_debug("%s: there is no association yet\n", __func__); if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; goto out_unlock; } /* Check for invalid stream against the stream counts, * either the default or the user specified stream counts. */ if (sinfo) { if (!sinit || !sinit->sinit_num_ostreams) { /* Check against the defaults. */ if (sinfo->sinfo_stream >= sp->initmsg.sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } else { /* Check against the requested. */ if (sinfo->sinfo_stream >= sinit->sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } } /* * API 3.1.2 bind() - UDP Style Syntax * If a bind() or sctp_bindx() is not called prior to a * sendmsg() call that initiates a new association, the * system picks an ephemeral port and will choose an address * set equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_unlock; } } else { /* * If an unprivileged user inherits a one-to-many * style socket with open associations on a privileged * port, it MAY be permitted to accept new associations, * but it SHOULD NOT be permitted to open new * associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_unlock; } } scope = sctp_scope(&to); new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!new_asoc) { err = -ENOMEM; goto out_unlock; } asoc = new_asoc; err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { err = -ENOMEM; goto out_free; } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. */ if (sinit) { if (sinit->sinit_num_ostreams) { asoc->c.sinit_num_ostreams = sinit->sinit_num_ostreams; } if (sinit->sinit_max_instreams) { asoc->c.sinit_max_instreams = sinit->sinit_max_instreams; } if (sinit->sinit_max_attempts) { asoc->max_init_attempts = sinit->sinit_max_attempts; } if (sinit->sinit_max_init_timeo) { asoc->max_init_timeo = msecs_to_jiffies(sinit->sinit_max_init_timeo); } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } } /* ASSERT: we have a valid association at this point. */ pr_debug("%s: we have a valid association\n", __func__); if (!sinfo) { /* If the user didn't specify SNDINFO/SNDRCVINFO, make up * one with some defaults. */ memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = asoc->default_stream; default_sinfo.sinfo_flags = asoc->default_flags; default_sinfo.sinfo_ppid = asoc->default_ppid; default_sinfo.sinfo_context = asoc->default_context; default_sinfo.sinfo_timetolive = asoc->default_timetolive; default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); sinfo = &default_sinfo; } else if (fill_sinfo_ttl) { /* In case SNDINFO was specified, we still need to fill * it with a default ttl from the assoc here. */ sinfo->sinfo_timetolive = asoc->default_timetolive; } /* API 7.1.7, the sndbuf size per association bounds the * maximum size of data that can be sent in a single send call. */ if (msg_len > sk->sk_sndbuf) { err = -EMSGSIZE; goto out_free; } if (asoc->pmtu_pending) sctp_assoc_pending_pmtu(sk, asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D * does not specify what this error is, but this looks like * a great fit. */ if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { err = -EMSGSIZE; goto out_free; } /* Check for invalid stream. */ if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { err = -EINVAL; goto out_free; } if (sctp_wspace(asoc) < msg_len) sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) goto out_free; } /* If an address is passed with the sendto/sendmsg call, it is used * to override the primary destination address in the TCP model, or * when SCTP_ADDR_OVER flag is set in the UDP model. */ if ((sctp_style(sk, TCP) && msg_name) || (sinfo_flags & SCTP_ADDR_OVER)) { chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); if (!chunk_tp) { err = -EINVAL; goto out_free; } } else chunk_tp = NULL; /* Auto-connect, if we aren't connected already. */ if (sctp_state(asoc, CLOSED)) { err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); if (IS_ERR(datamsg)) { err = PTR_ERR(datamsg); goto out_free; } /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { sctp_chunk_hold(chunk); /* Do accounting for the write space. */ sctp_set_owner_w(chunk); chunk->transport = chunk_tp; } /* Send it to the lower layers. Note: all chunks * must either fail or succeed. The lower layer * works that way today. Keep it that way or this * breaks. */ err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ if (err) { sctp_datamsg_free(datamsg); goto out_free; } pr_debug("%s: we sent primitively\n", __func__); sctp_datamsg_put(datamsg); err = msg_len; if (unlikely(wait_connect)) { timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); sctp_wait_for_connect(asoc, &timeo); } /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ goto out_unlock; out_free: if (new_asoc) sctp_association_free(asoc); out_unlock: release_sock(sk); out_nounlock: return sctp_error(sk, msg_flags, err); #if 0 do_sock_err: if (msg_len) err = msg_len; else err = sock_error(sk); goto out; do_interrupted: if (msg_len) err = msg_len; goto out; #endif /* 0 */ } /* This is an extended version of skb_pull() that removes the data from the * start of a skb even when data is spread across the list of skb's in the * frag_list. len specifies the total amount of data that needs to be removed. * when 'len' bytes could be removed from the skb, it returns 0. * If 'len' exceeds the total skb length, it returns the no. of bytes that * could not be removed. */ static int sctp_skb_pull(struct sk_buff *skb, int len) { struct sk_buff *list; int skb_len = skb_headlen(skb); int rlen; if (len <= skb_len) { __skb_pull(skb, len); return 0; } len -= skb_len; __skb_pull(skb, skb_len); skb_walk_frags(skb, list) { rlen = sctp_skb_pull(list, len); skb->len -= (len-rlen); skb->data_len -= (len-rlen); if (!rlen) return 0; len = rlen; } return len; } /* API 3.1.3 recvmsg() - UDP Style Syntax * * ssize_t recvmsg(int socket, struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. */ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); struct sk_buff *skb, *head_skb; int copied; int err = 0; int skb_len; pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, addr_len); lock_sock(sk); if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) { err = -ENOTCONN; goto out; } skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; /* Get the total length of the skb including any skb's in the * frag_list. */ skb_len = skb->len; copied = skb_len; if (copied > len) copied = len; err = skb_copy_datagram_msg(skb, 0, msg, copied); event = sctp_skb2event(skb); if (err) goto out_free; if (event->chunk && event->chunk->head_skb) head_skb = event->chunk->head_skb; else head_skb = skb; sock_recv_ts_and_drops(msg, sk, head_skb); if (sctp_ulpevent_is_notification(event)) { msg->msg_flags |= MSG_NOTIFICATION; sp->pf->event_msgname(event, msg->msg_name, addr_len); } else { sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len); } /* Check if we allow SCTP_NXTINFO. */ if (sp->recvnxtinfo) sctp_ulpevent_read_nxtinfo(event, msg, sk); /* Check if we allow SCTP_RCVINFO. */ if (sp->recvrcvinfo) sctp_ulpevent_read_rcvinfo(event, msg); /* Check if we allow SCTP_SNDRCVINFO. */ if (sp->subscribe.sctp_data_io_event) sctp_ulpevent_read_sndrcvinfo(event, msg); err = copied; /* If skb's length exceeds the user's buffer, update the skb and * push it back to the receive_queue so that the next call to * recvmsg() will return the remaining data. Don't set MSG_EOR. */ if (skb_len > copied) { msg->msg_flags &= ~MSG_EOR; if (flags & MSG_PEEK) goto out_free; sctp_skb_pull(skb, copied); skb_queue_head(&sk->sk_receive_queue, skb); /* When only partial message is copied to the user, increase * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the event is freed. */ if (!sctp_ulpevent_is_notification(event)) sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) msg->msg_flags |= MSG_EOR; else msg->msg_flags &= ~MSG_EOR; out_free: if (flags & MSG_PEEK) { /* Release the skb reference acquired after peeking the skb in * sctp_skb_recv_datagram(). */ kfree_skb(skb); } else { /* Free the event which includes releasing the reference to * the owner of the skb, freeing the skb and updating the * rwnd. */ sctp_ulpevent_free(event); } out: release_sock(sk); return err; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_setsockopt_disable_fragments(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_events(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; struct sctp_ulpevent *event; if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, * if there is no data to be sent or retransmit, the stack will * immediately send up this notification. */ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, &sctp_sk(sk)->subscribe)) { asoc = sctp_id2assoc(sk, 0); if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); if (!event) return -ENOMEM; sctp_ulpq_tail_event(&asoc->ulpq, event); } } return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct net *net = sock_net(sk); /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; if (sp->autoclose > net->sctp.max_autoclose) sp->autoclose = net->sctp.max_autoclose; return 0; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_HB_TIME_IS_ZERO - Specify's that the time for * heartbeat delayis to be set to the value of 0 * milliseconds. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, struct sctp_association *asoc, struct sctp_sock *sp, int hb_change, int pmtud_change, int sackdelay_change) { int error; if (params->spp_flags & SPP_HB_DEMAND && trans) { struct net *net = sock_net(trans->asoc->base.sk); error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of * this field is ignored. Note also that a value of zero indicates * the current setting should be left unchanged. */ if (params->spp_flags & SPP_HB_ENABLE) { /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is * set. This lets us use 0 value when this flag * is set. */ if (params->spp_flags & SPP_HB_TIME_IS_ZERO) params->spp_hbinterval = 0; if (params->spp_hbinterval || (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { if (trans) { trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else if (asoc) { asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else { sp->hbinterval = params->spp_hbinterval; } } } if (hb_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_HB) | hb_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_HB) | hb_change; } else { sp->param_flags = (sp->param_flags & ~SPP_HB) | hb_change; } } /* When Path MTU discovery is disabled the value specified here will * be the "fixed" path mtu (i.e. the value of the spp_flags field must * include the flag SPP_PMTUD_DISABLE for this field to have any * effect). */ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; sctp_frag_point(asoc, params->spp_pathmtu); } else { sp->pathmtu = params->spp_pathmtu; } } if (pmtud_change) { if (trans) { int update = (trans->param_flags & SPP_PMTUD_DISABLE) && (params->spp_flags & SPP_PMTUD_ENABLE); trans->param_flags = (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; } else { sp->param_flags = (sp->param_flags & ~SPP_PMTUD) | pmtud_change; } } /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the * value of this field is ignored. Note also that a value of zero * indicates the current setting should be left unchanged. */ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else if (asoc) { asoc->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else { sp->sackdelay = params->spp_sackdelay; } } if (sackdelay_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } } /* Note that a value of zero indicates the current setting should be left unchanged. */ if (params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { asoc->pathmaxrxt = params->spp_pathmaxrxt; } else { sp->pathmaxrxt = params->spp_pathmaxrxt; } } return 0; } static int sctp_setsockopt_peer_addr_params(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); int error; int hb_change, pmtud_change, sackdelay_change; if (optlen != sizeof(struct sctp_paddrparams)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; pmtud_change = params.spp_flags & SPP_PMTUD; sackdelay_change = params.spp_flags & SPP_SACKDELAY; if (hb_change == SPP_HB || pmtud_change == SPP_PMTUD || sackdelay_change == SPP_SACKDELAY || params.spp_sackdelay > 500 || (params.spp_pathmtu && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) return -EINVAL; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) return -EINVAL; } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Heartbeat demand can only be sent on a transport or * association, but not a socket. */ if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) return -EINVAL; /* Process parameters. */ error = sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); if (error) return error; /* If changes are for association, also apply parameters to each * transport. */ if (!trans && asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); } } return 0; } static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_setsockopt_delayed_ack(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sack_info params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (optlen == sizeof(struct sctp_sack_info)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0 && params.sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0) params.sack_freq = 1; else params.sack_freq = 0; } else return -EINVAL; /* Validate value parameter. */ if (params.sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (params.sack_delay) { if (asoc) { asoc->sackdelay = msecs_to_jiffies(params.sack_delay); asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackdelay = params.sack_delay; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } if (params.sack_freq == 1) { if (asoc) { asoc->param_flags = sctp_spp_sackdelay_disable(asoc->param_flags); } else { sp->param_flags = sctp_spp_sackdelay_disable(sp->param_flags); } } else if (params.sack_freq > 1) { if (asoc) { asoc->sackfreq = params.sack_freq; asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackfreq = params.sack_freq; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } /* If change is for association, also apply to each transport. */ if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (params.sack_delay) { trans->sackdelay = msecs_to_jiffies(params.sack_delay); trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } if (params.sack_freq == 1) { trans->param_flags = sctp_spp_sackdelay_disable(trans->param_flags); } else if (params.sack_freq > 1) { trans->sackfreq = params.sack_freq; trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } } } return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_initmsg sinit; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_initmsg)) return -EINVAL; if (copy_from_user(&sinit, optval, optlen)) return -EFAULT; if (sinit.sinit_num_ostreams) sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; if (sinit.sinit_max_instreams) sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; if (sinit.sinit_max_attempts) sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; if (sinit.sinit_max_init_timeo) sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; return 0; } /* * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. */ static int sctp_setsockopt_default_send_param(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.sinfo_stream; asoc->default_flags = info.sinfo_flags; asoc->default_ppid = info.sinfo_ppid; asoc->default_context = info.sinfo_context; asoc->default_timetolive = info.sinfo_timetolive; } else { sp->default_stream = info.sinfo_stream; sp->default_flags = info.sinfo_flags; sp->default_ppid = info.sinfo_ppid; sp->default_context = info.sinfo_context; sp->default_timetolive = info.sinfo_timetolive; } return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_setsockopt_default_sndinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.snd_sid; asoc->default_flags = info.snd_flags; asoc->default_ppid = info.snd_ppid; asoc->default_context = info.snd_context; } else { sp->default_stream = info.snd_sid; sp->default_flags = info.snd_flags; sp->default_ppid = info.snd_ppid; sp->default_context = info.snd_context; } return 0; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_prim prim; struct sctp_transport *trans; if (optlen != sizeof(struct sctp_prim)) return -EINVAL; if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) return -EFAULT; trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); if (!trans) return -EINVAL; sctp_assoc_set_primary(trans->asoc, trans); return 0; } /* * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; unsigned long rto_min, rto_max; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; if (copy_from_user(&rtoinfo, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); /* Set the values to the specific association */ if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; rto_max = rtoinfo.srto_max; rto_min = rtoinfo.srto_min; if (rto_max) rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; else rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; if (rto_min) rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; else rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; if (rto_min > rto_max) return -EINVAL; if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); asoc->rto_max = rto_max; asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; sp->rtoinfo.srto_max = rto_max; sp->rtoinfo.srto_min = rto_min; } return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assocparams)) return -EINVAL; if (copy_from_user(&assocparams, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Set the values to the specific association */ if (asoc) { if (assocparams.sasoc_asocmaxrxt != 0) { __u32 path_sum = 0; int paths = 0; struct sctp_transport *peer_addr; list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, transports) { path_sum += peer_addr->pathmaxrxt; paths++; } /* Only validate asocmaxrxt if we have more than * one path/transport. We do this because path * retransmissions are only counted when we have more * then one path. */ if (paths > 1 && assocparams.sasoc_asocmaxrxt > path_sum) return -EINVAL; asoc->max_retrans = assocparams.sasoc_asocmaxrxt; } if (assocparams.sasoc_cookie_life != 0) asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); if (assocparams.sasoc_asocmaxrxt != 0) sp->assocparams.sasoc_asocmaxrxt = assocparams.sasoc_asocmaxrxt; if (assocparams.sasoc_cookie_life != 0) sp->assocparams.sasoc_cookie_life = assocparams.sasoc_cookie_life; } return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (val) sp->v4mapped = 1; else sp->v4mapped = 0; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; params.assoc_id = 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; } else return -EINVAL; if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (val == 0) { val = asoc->pathmtu; val -= sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { sp->user_frag = val; } return 0; } /* * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) * * Requests that the peer mark the enclosed address as the association * primary. The enclosed address must be one of the association's * locally bound addresses. The following structure is used to make a * set primary request: */ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; sp = sctp_sk(sk); if (!net->sctp.addip_enable) return -EPERM; if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; if (copy_from_user(&prim, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.asconf_capable) return -EPERM; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) return -EPERM; if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; af = sctp_get_af_specific(prim.sspp_addr.ss_family); if (!af) return -EINVAL; if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, (union sctp_addr *)&prim.sspp_addr); if (!chunk) return -ENOMEM; err = sctp_send_asconf(asoc, chunk); pr_debug("%s: we set peer primary addr primitively\n", __func__); return err; } static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_setadaptation adaptation; if (optlen != sizeof(struct sctp_setadaptation)) return -EINVAL; if (copy_from_user(&adaptation, optval, optlen)) return -EFAULT; sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * * The context field in the sctp_sndrcvinfo structure is normally only * used when a failed message is retrieved holding the value that was * sent down on the actual send call. This option allows the setting of * a default context on an association basis that will be received on * reading messages from the peer. This is especially helpful in the * one-2-many model for an application to keep some reference to an * internal state machine that is processing messages on the * association. Note that the setting of this value only effects * received messages from the peer and does not effect the value that is * saved with outbound messages. */ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; asoc->default_rcv_context = params.assoc_value; } else { sp->default_rcv_context = params.assoc_value; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * * This options will at a minimum specify if the implementation is doing * fragmented interleave. Fragmented interleave, for a one to many * socket, is when subsequent calls to receive a message may return * parts of messages from different associations. Some implementations * may allow you to turn this value on or off. If so, when turned off, * no fragment interleave will occur (which will cause a head of line * blocking amongst multiple associations sharing the same one to many * socket). When this option is turned on, then each receive call may * come from a different association (thus the user must receive data * with the extended calls (e.g. sctp_recvmsg) to keep track of which * association each receive belongs to. * * This option takes a boolean value. A non-zero value indicates that * fragmented interleave is on. A value of zero indicates that * fragmented interleave is off. * * Note that it is important that an implementation that allows this * option to be turned on, have it off by default. Otherwise an unaware * application using the one to many model may become confused and act * incorrectly. */ static int sctp_setsockopt_fragment_interleave(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen != sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; return 0; } /* * 8.1.21. Set or Get the SCTP Partial Delivery Point * (SCTP_PARTIAL_DELIVERY_POINT) * * This option will set or get the SCTP partial delivery point. This * point is the size of a message where the partial delivery API will be * invoked to help free up rwnd space for the peer. Setting this to a * lower value will cause partial deliveries to happen more often. The * calls argument is an integer that sets or gets the partial delivery * point. Note also that the call will fail if the user attempts to set * this value larger than the socket receive buffer size. * * Note that any single message having a length smaller than or equal to * the SCTP partial delivery point will be delivered in one single read * call as long as the user provided buffer is large enough to hold the * message. */ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, char __user *optval, unsigned int optlen) { u32 val; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; /* Note: We double the receive buffer from what the user sets * it to be, also initial rwnd is based on rcvbuf/2. */ if (val > (sk->sk_rcvbuf >> 1)) return -EINVAL; sctp_sk(sk)->pd_point = val; return 0; /* is this the right error code? */ } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * * This option will allow a user to change the maximum burst of packets * that can be emitted by this association. Note that the default value * is 4, and some implementations may restrict this setting so that it * can only be lowered. * * NOTE: This text doesn't seem right. Do this on a socket basis with * future associations inheriting the socket value. */ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; int val; int assoc_id = 0; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option deprecated.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; assoc_id = params.assoc_id; } else return -EINVAL; sp = sctp_sk(sk); if (assoc_id != 0) { asoc = sctp_id2assoc(sk, assoc_id); if (!asoc) return -EINVAL; asoc->max_burst = val; } else sp->max_burst = val; return 0; } /* * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) * * This set option adds a chunk type that the user is requesting to be * received only in an authenticated way. Changes to the list of chunks * will only effect future associations on the socket. */ static int sctp_setsockopt_auth_chunk(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunk val; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; switch (val.sauth_chunk) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: case SCTP_CID_AUTH: return -EINVAL; } /* add this chunk id to the endpoint */ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); } /* * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) * * This option gets or sets the list of HMAC algorithms that the local * endpoint requires the peer to use. */ static int sctp_setsockopt_hmac_ident(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo *hmacs; u32 idents; int err; if (!ep->auth_enable) return -EACCES; if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; hmacs = memdup_user(optval, optlen); if (IS_ERR(hmacs)) return PTR_ERR(hmacs); idents = hmacs->shmac_num_idents; if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } err = sctp_auth_ep_set_hmacs(ep, hmacs); out: kfree(hmacs); return err; } /* * 7.1.20. Set a shared key (SCTP_AUTH_KEY) * * This option will set a shared secret key which is used to build an * association shared key. */ static int sctp_setsockopt_auth_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkey *authkey; struct sctp_association *asoc; int ret; if (!ep->auth_enable) return -EACCES; if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; authkey = memdup_user(optval, optlen); if (IS_ERR(authkey)) return PTR_ERR(authkey); if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { ret = -EINVAL; goto out; } asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; goto out; } ret = sctp_auth_set_key(ep, asoc, authkey); out: kzfree(authkey); return ret; } /* * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) * * This option will get or set the active shared key to be used to build * the association shared key. */ static int sctp_setsockopt_active_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); } /* * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) * * This set option will delete a shared secret key from use. */ static int sctp_setsockopt_del_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); } /* * 8.1.23 SCTP_AUTO_ASCONF * * This option will enable or disable the use of the automatic generation of * ASCONF chunks to add and delete addresses to an existing association. Note * that this option has two caveats namely: a) it only affects sockets that * are bound to all addresses available to the SCTP stack, and b) the system * administrator may have an overriding control that turns the ASCONF feature * off no matter what setting the socket option may have. * This option expects an integer boolean flag, where a non-zero value turns on * the option, and a zero value turns off the option. * Note. In this implementation, socket operation overrides default parameter * being set by sysctl as well as FreeBSD implementation */ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (!sctp_is_ep_boundall(sk) && val) return -EINVAL; if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) return 0; spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); if (val == 0 && sp->do_auto_asconf) { list_del(&sp->auto_asconf_list); sp->do_auto_asconf = 0; } else if (val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to alter the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (optlen < sizeof(struct sctp_paddrthlds)) return -EINVAL; if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, sizeof(struct sctp_paddrthlds))) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } if (val.spt_pathmaxrxt) asoc->pathmaxrxt = val.spt_pathmaxrxt; asoc->pf_retrans = val.spt_pathpfthld; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } return 0; } static int sctp_setsockopt_recvrcvinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_recvnxtinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_pr_supported(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(params)) goto out; if (copy_from_user(&params, optval, optlen)) { retval = -EFAULT; goto out; } asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { asoc->prsctp_enable = !!params.assoc_value; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); sp->ep->prsctp_enable = !!params.assoc_value; } else { goto out; } retval = 0; out: return retval; } static int sctp_setsockopt_default_prinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(info)) goto out; if (copy_from_user(&info, optval, sizeof(info))) { retval = -EFAULT; goto out; } if (info.pr_policy & ~SCTP_PR_SCTP_MASK) goto out; if (info.pr_policy == SCTP_PR_SCTP_NONE) info.pr_value = 0; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); asoc->default_timetolive = info.pr_value; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); sp->default_timetolive = info.pr_value; } else { goto out; } retval = 0; out: return retval; } /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve * socket options. Socket options are used to change the default * behavior of sockets calls. They are described in Section 7. * * The syntax is: * * ret = getsockopt(int sd, int level, int optname, void __user *optval, * int __user *optlen); * ret = setsockopt(int sd, int level, int optname, const void __user *optval, * int optlen); * * sd - the socket descript. * level - set to IPPROTO_SCTP for all SCTP options. * optname - the option name. * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ static int sctp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int retval = 0; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of setsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->setsockopt(sk, level, optname, optval, optlen); goto out_nounlock; } lock_sock(sk); switch (optname) { case SCTP_SOCKOPT_BINDX_ADD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_ADD_ADDR); break; case SCTP_SOCKOPT_BINDX_REM: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_REM_ADDR); break; case SCTP_SOCKOPT_CONNECTX_OLD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx_old(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_SOCKOPT_CONNECTX: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); break; case SCTP_EVENTS: retval = sctp_setsockopt_events(sk, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_setsockopt_autoclose(sk, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_setsockopt_default_send_param(sk, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_setsockopt_primary_addr(sk, optval, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); break; case SCTP_NODELAY: retval = sctp_setsockopt_nodelay(sk, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_setsockopt_associnfo(sk, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_setsockopt_mappedv4(sk, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_setsockopt_maxseg(sk, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; case SCTP_AUTH_CHUNK: retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); break; case SCTP_HMAC_IDENT: retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); break; case SCTP_AUTH_KEY: retval = sctp_setsockopt_auth_key(sk, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_setsockopt_active_key(sk, optval, optlen); break; case SCTP_AUTH_DELETE_KEY: retval = sctp_setsockopt_del_key(sk, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); break; case SCTP_PR_SUPPORTED: retval = sctp_setsockopt_pr_supported(sk, optval, optlen); break; case SCTP_DEFAULT_PRINFO: retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); out_nounlock: return retval; } /* API 3.1.6 connect() - UDP Style Syntax * * An application may use the connect() call in the UDP model to initiate an * association without sending data. * * The syntax is: * * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); * * sd: the socket descriptor to have a new association added to. * * nam: the address structure (either struct sockaddr_in or struct * sockaddr_in6 defined in RFC2553 [7]). * * len: the size of the address. */ static int sctp_connect(struct sock *sk, struct sockaddr *addr, int addr_len) { int err = 0; struct sctp_af *af; lock_sock(sk); pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); if (!af || addr_len < af->sockaddr_len) { err = -EINVAL; } else { /* Pass correct addr len to common routine (so it knows there * is only one address being passed. */ err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); } release_sock(sk); return err; } /* FIXME: Write comments. */ static int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } /* 4.1.4 accept() - TCP Style Syntax * * Applications use accept() call to remove an established SCTP * association from the accept queue of the endpoint. A new socket * descriptor will be returned from accept() to represent the newly * formed association. */ static struct sock *sctp_accept(struct sock *sk, int flags, int *err) { struct sctp_sock *sp; struct sctp_endpoint *ep; struct sock *newsk = NULL; struct sctp_association *asoc; long timeo; int error = 0; lock_sock(sk); sp = sctp_sk(sk); ep = sp->ep; if (!sctp_style(sk, TCP)) { error = -EOPNOTSUPP; goto out; } if (!sctp_sstate(sk, LISTENING)) { error = -EINVAL; goto out; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); error = sctp_wait_for_accept(sk, timeo); if (error) goto out; /* We treat the list of associations on the endpoint as the accept * queue and pick the first association on the list. */ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); newsk = sp->pf->create_accept_sk(sk, asoc); if (!newsk) { error = -ENOMEM; goto out; } /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); out: release_sock(sk); *err = error; return newsk; } /* The SCTP ioctl handler. */ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; lock_sock(sk); /* * SEQPACKET-style sockets in LISTENING state are valid, for * SCTP, so only discard TCP-style sockets in LISTENING state. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned int amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); break; } default: rc = -ENOIOCTLCMD; break; } out: release_sock(sk); return rc; } /* This is the function which gets called during socket creation to * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ static int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); sp = sctp_sk(sk); /* Initialize the SCTP per socket area. */ switch (sk->sk_type) { case SOCK_SEQPACKET: sp->type = SCTP_SOCKET_UDP; break; case SOCK_STREAM: sp->type = SCTP_SOCKET_TCP; break; default: return -ESOCKTNOSUPPORT; } sk->sk_gso_type = SKB_GSO_SCTP; /* Initialize default send parameters. These parameters can be * modified with the SCTP_DEFAULT_SEND_PARAM socket option. */ sp->default_stream = 0; sp->default_ppid = 0; sp->default_flags = 0; sp->default_context = 0; sp->default_timetolive = 0; sp->default_rcv_context = 0; sp->max_burst = net->sctp.max_burst; sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or * overridden by the SCTP_INIT CMSG. */ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; sp->initmsg.sinit_max_instreams = sctp_max_instreams; sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; /* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. */ sp->rtoinfo.srto_initial = net->sctp.rto_initial; sp->rtoinfo.srto_max = net->sctp.rto_max; sp->rtoinfo.srto_min = net->sctp.rto_min; /* Initialize default association related parameters. These parameters * can be modified with the SCTP_ASSOCINFO socket option. */ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; sp->assocparams.sasoc_number_peer_destinations = 0; sp->assocparams.sasoc_peer_rwnd = 0; sp->assocparams.sasoc_local_rwnd = 0; sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; /* Initialize default event subscriptions. By default, all the * options are off. */ memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); /* Default Peer Address Parameters. These defaults can * be modified via SCTP_PEER_ADDR_PARAMS */ sp->hbinterval = net->sctp.hb_interval; sp->pathmaxrxt = net->sctp.max_retrans_path; sp->pathmtu = 0; /* allow default discovery */ sp->sackdelay = net->sctp.sack_timeout; sp->sackfreq = 2; sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* If enabled no SCTP message fragmentation will be performed. * Configure through SCTP_DISABLE_FRAGMENTS socket option. */ sp->disable_fragments = 0; /* Enable Nagle algorithm by default. */ sp->nodelay = 0; sp->recvrcvinfo = 0; sp->recvnxtinfo = 0; /* Enable by default. */ sp->v4mapped = 1; /* Auto-close idle associations after the configured * number of seconds. A value of 0 disables this * feature. Configure through the SCTP_AUTOCLOSE socket option, * for UDP-style sockets only. */ sp->autoclose = 0; /* User specified fragmentation limit. */ sp->user_frag = 0; sp->adaptation_ind = 0; sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); if (!sp->ep) return -ENOMEM; sp->hmac = NULL; sk->sk_destruct = sctp_destruct_sock; SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); percpu_counter_inc(&sctp_sockets_allocated); sock_prot_inuse_add(net, sk->sk_prot, 1); /* Nothing can fail after this block, otherwise * sctp_destroy_sock() will be called without addr_wq_lock held */ if (net->sctp.default_auto_asconf) { spin_lock(&sock_net(sk)->sctp.addr_wq_lock); list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); } else { sp->do_auto_asconf = 0; } local_bh_enable(); return 0; } /* Cleanup any SCTP per socket resources. Must be called with * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true */ static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); /* Release our hold on the endpoint. */ sp = sctp_sk(sk); /* This could happen during socket init, thus we bail out * early, since the rest of the below is not setup either. */ if (sp->ep == NULL) return; if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); local_bh_disable(); percpu_counter_dec(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } /* Triggered when there are no references on the socket anymore */ static void sctp_destruct_sock(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_shash(sp->hmac); inet_sock_destruct(sk); } /* API 4.1.7 shutdown() - TCP Style Syntax * int shutdown(int socket, int how); * * sd - the socket descriptor of the association to be closed. * how - Specifies the type of shutdown. The values are * as follows: * SHUT_RD * Disables further receive operations. No SCTP * protocol action is taken. * SHUT_WR * Disables further send operations, and initiates * the SCTP shutdown sequence. * SHUT_RDWR * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; if (!sctp_style(sk, TCP)) return; ep = sctp_sk(sk)->ep; if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { struct sctp_association *asoc; sk->sk_state = SCTP_SS_CLOSING; asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); sctp_primitive_SHUTDOWN(net, asoc, NULL); } } int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, struct sctp_info *info) { struct sctp_transport *prim; struct list_head *pos; int mask; memset(info, 0, sizeof(*info)); if (!asoc) { struct sctp_sock *sp = sctp_sk(sk); info->sctpi_s_autoclose = sp->autoclose; info->sctpi_s_adaptation_ind = sp->adaptation_ind; info->sctpi_s_pd_point = sp->pd_point; info->sctpi_s_nodelay = sp->nodelay; info->sctpi_s_disable_fragments = sp->disable_fragments; info->sctpi_s_v4mapped = sp->v4mapped; info->sctpi_s_frag_interleave = sp->frag_interleave; info->sctpi_s_type = sp->type; return 0; } info->sctpi_tag = asoc->c.my_vtag; info->sctpi_state = asoc->state; info->sctpi_rwnd = asoc->a_rwnd; info->sctpi_unackdata = asoc->unack_data; info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); info->sctpi_instrms = asoc->c.sinit_max_instreams; info->sctpi_outstrms = asoc->c.sinit_num_ostreams; list_for_each(pos, &asoc->base.inqueue.in_chunk_list) info->sctpi_inqueue++; list_for_each(pos, &asoc->outqueue.out_chunk_list) info->sctpi_outqueue++; info->sctpi_overall_error = asoc->overall_error_count; info->sctpi_max_burst = asoc->max_burst; info->sctpi_maxseg = asoc->frag_point; info->sctpi_peer_rwnd = asoc->peer.rwnd; info->sctpi_peer_tag = asoc->c.peer_vtag; mask = asoc->peer.ecn_capable << 1; mask = (mask | asoc->peer.ipv4_address) << 1; mask = (mask | asoc->peer.ipv6_address) << 1; mask = (mask | asoc->peer.hostname_address) << 1; mask = (mask | asoc->peer.asconf_capable) << 1; mask = (mask | asoc->peer.prsctp_capable) << 1; mask = (mask | asoc->peer.auth_capable); info->sctpi_peer_capable = mask; mask = asoc->peer.sack_needed << 1; mask = (mask | asoc->peer.sack_generation) << 1; mask = (mask | asoc->peer.zero_window_announced); info->sctpi_peer_sack = mask; info->sctpi_isacks = asoc->stats.isacks; info->sctpi_osacks = asoc->stats.osacks; info->sctpi_opackets = asoc->stats.opackets; info->sctpi_ipackets = asoc->stats.ipackets; info->sctpi_rtxchunks = asoc->stats.rtxchunks; info->sctpi_outofseqtsns = asoc->stats.outofseqtsns; info->sctpi_idupchunks = asoc->stats.idupchunks; info->sctpi_gapcnt = asoc->stats.gapcnt; info->sctpi_ouodchunks = asoc->stats.ouodchunks; info->sctpi_iuodchunks = asoc->stats.iuodchunks; info->sctpi_oodchunks = asoc->stats.oodchunks; info->sctpi_iodchunks = asoc->stats.iodchunks; info->sctpi_octrlchunks = asoc->stats.octrlchunks; info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; prim = asoc->peer.primary_path; memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(struct sockaddr_storage)); info->sctpi_p_state = prim->state; info->sctpi_p_cwnd = prim->cwnd; info->sctpi_p_srtt = prim->srtt; info->sctpi_p_rto = jiffies_to_msecs(prim->rto); info->sctpi_p_hbinterval = prim->hbinterval; info->sctpi_p_pathmaxrxt = prim->pathmaxrxt; info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay); info->sctpi_p_ssthresh = prim->ssthresh; info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked; info->sctpi_p_flight_size = prim->flight_size; info->sctpi_p_error = prim->error_count; return 0; } EXPORT_SYMBOL_GPL(sctp_get_sctp_info); /* use callback to avoid exporting the core structure */ int sctp_transport_walk_start(struct rhashtable_iter *iter) { int err; rhltable_walk_enter(&sctp_transport_hashtable, iter); err = rhashtable_walk_start(iter); if (err && err != -EAGAIN) { rhashtable_walk_stop(iter); rhashtable_walk_exit(iter); return err; } return 0; } void sctp_transport_walk_stop(struct rhashtable_iter *iter) { rhashtable_walk_stop(iter); rhashtable_walk_exit(iter); } struct sctp_transport *sctp_transport_get_next(struct net *net, struct rhashtable_iter *iter) { struct sctp_transport *t; t = rhashtable_walk_next(iter); for (; t; t = rhashtable_walk_next(iter)) { if (IS_ERR(t)) { if (PTR_ERR(t) == -EAGAIN) continue; break; } if (net_eq(sock_net(t->asoc->base.sk), net) && t->asoc->peer.primary_path == t) break; } return t; } struct sctp_transport *sctp_transport_get_idx(struct net *net, struct rhashtable_iter *iter, int pos) { void *obj = SEQ_START_TOKEN; while (pos && (obj = sctp_transport_get_next(net, iter)) && !IS_ERR(obj)) pos--; return obj; } int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p) { int err = 0; int hash = 0; struct sctp_ep_common *epb; struct sctp_hashbucket *head; for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { read_lock(&head->lock); sctp_for_each_hentry(epb, &head->chain) { err = cb(sctp_ep(epb), p); if (err) break; } read_unlock(&head->lock); } return err; } EXPORT_SYMBOL_GPL(sctp_for_each_endpoint); int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, void *p) { struct sctp_transport *transport; int err; rcu_read_lock(); transport = sctp_addrs_lookup_transport(net, laddr, paddr); rcu_read_unlock(); if (!transport) return -ENOENT; err = cb(transport, p); sctp_transport_put(transport); return err; } EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), struct net *net, int pos, void *p) { struct rhashtable_iter hti; void *obj; int err; err = sctp_transport_walk_start(&hti); if (err) return err; sctp_transport_get_idx(net, &hti, pos); obj = sctp_transport_get_next(net, &hti); for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { struct sctp_transport *transport = obj; if (!sctp_transport_hold(transport)) continue; err = cb(transport, p); sctp_transport_put(transport); if (err) break; } sctp_transport_walk_stop(&hti); return err; } EXPORT_SYMBOL_GPL(sctp_for_each_transport); /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an * association, including association state, peer receiver window size, * number of unacked data chunks, and number of data chunks pending * receipt. This information is read-only. */ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_status status; struct sctp_association *asoc = NULL; struct sctp_transport *transport; sctp_assoc_t associd; int retval = 0; if (len < sizeof(status)) { retval = -EINVAL; goto out; } len = sizeof(status); if (copy_from_user(&status, optval, len)) { retval = -EFAULT; goto out; } associd = status.sstat_assoc_id; asoc = sctp_id2assoc(sk, associd); if (!asoc) { retval = -EINVAL; goto out; } transport = asoc->peer.primary_path; status.sstat_assoc_id = sctp_assoc2id(asoc); status.sstat_state = sctp_assoc_to_state(asoc); status.sstat_rwnd = asoc->peer.rwnd; status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); status.sstat_instrms = asoc->c.sinit_max_instreams; status.sstat_outstrms = asoc->c.sinit_num_ostreams; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, transport->af_specific->sockaddr_len); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)&status.sstat_primary.spinfo_address); status.sstat_primary.spinfo_state = transport->state; status.sstat_primary.spinfo_cwnd = transport->cwnd; status.sstat_primary.spinfo_srtt = transport->srtt; status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); status.sstat_primary.spinfo_mtu = transport->pathmtu; if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) status.sstat_primary.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", __func__, len, status.sstat_state, status.sstat_rwnd, status.sstat_assoc_id); if (copy_to_user(optval, &status, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) * * Applications can retrieve information about a specific peer address * of an association, including its reachability state, congestion * window, and retransmission timer values. This information is * read-only. */ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrinfo pinfo; struct sctp_transport *transport; int retval = 0; if (len < sizeof(pinfo)) { retval = -EINVAL; goto out; } len = sizeof(pinfo); if (copy_from_user(&pinfo, optval, len)) { retval = -EFAULT; goto out; } transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, pinfo.spinfo_assoc_id); if (!transport) return -EINVAL; pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); pinfo.spinfo_state = transport->state; pinfo.spinfo_cwnd = transport->cwnd; pinfo.spinfo_srtt = transport->srtt; pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); pinfo.spinfo_mtu = transport->pathmtu; if (pinfo.spinfo_state == SCTP_UNKNOWN) pinfo.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &pinfo, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->disable_fragments == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) * * This socket option is used to specify various notifications and * ancillary data the user wishes to receive. */ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len == 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) { /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) return -EFAULT; return 0; } /* Helper routine to branch off an association to a new socket. */ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; int err = 0; if (!asoc) return -EINVAL; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; } EXPORT_SYMBOL(sctp_do_peeloff); static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) { sctp_peeloff_arg_t peeloff; struct socket *newsock; struct file *newfile; int retval = 0; if (len < sizeof(sctp_peeloff_arg_t)) return -EINVAL; len = sizeof(sctp_peeloff_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); if (retval < 0) goto out; /* Map the socket to an unused fd that can be returned to the user. */ retval = get_unused_fd_flags(0); if (retval < 0) { sock_release(newsock); goto out; } newfile = sock_alloc_file(newsock, 0, NULL); if (IS_ERR(newfile)) { put_unused_fd(retval); sock_release(newsock); return PTR_ERR(newfile); } pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, retval); /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } peeloff.sd = retval; if (copy_to_user(optval, &peeloff, len)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } fd_install(retval, newfile); out: return retval; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_paddrparams)) return -EINVAL; len = sizeof(struct sctp_paddrparams); if (copy_from_user(&params, optval, len)) return -EFAULT; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) { pr_debug("%s: failed no transport\n", __func__); return -EINVAL; } } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { pr_debug("%s: failed no association\n", __func__); return -EINVAL; } if (trans) { /* Fetch transport values. */ params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); params.spp_pathmtu = trans->pathmtu; params.spp_pathmaxrxt = trans->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); params.spp_pathmtu = asoc->pathmtu; params.spp_pathmaxrxt = asoc->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; params.spp_pathmtu = sp->pathmtu; params.spp_sackdelay = sp->sackdelay; params.spp_pathmaxrxt = sp->pathmaxrxt; /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sack_info params; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len >= sizeof(struct sctp_sack_info)) { len = sizeof(struct sctp_sack_info); if (copy_from_user(&params, optval, len)) return -EFAULT; } else if (len == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { /* Fetch association values. */ if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = jiffies_to_msecs( asoc->sackdelay); params.sack_freq = asoc->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } else { /* Fetch socket values. */ if (sp->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = sp->sackdelay; params.sack_freq = sp->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len < sizeof(struct sctp_initmsg)) return -EINVAL; len = sizeof(struct sctp_initmsg); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; size_t space_left; int bytes_copied; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); list_for_each_entry(from, &asoc->peer.transport_addr_list, transports) { memcpy(&temp, &from->ipaddr, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) return -ENOMEM; if (copy_to_user(to, &temp, addrlen)) return -EFAULT; to += addrlen; cnt++; space_left -= addrlen; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) return -EFAULT; bytes_copied = ((char __user *)to) - optval; if (put_user(bytes_copied, optlen)) return -EFAULT; return 0; } static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, size_t space_left, int *bytes_copied) { struct sctp_sockaddr_entry *addr; union sctp_addr temp; int cnt = 0; int addrlen; struct net *net = sock_net(sk); rcu_read_lock(); list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue; if ((PF_INET == sk->sk_family) && (AF_INET6 == addr->a.sa.sa_family)) continue; if ((PF_INET6 == sk->sk_family) && inet_v6_ipv6only(sk) && (AF_INET == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); if (!temp.v4.sin_port) temp.v4.sin_port = htons(port); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sctp_sk(sk), &temp); if (space_left < addrlen) { cnt = -ENOMEM; break; } memcpy(to, &temp, addrlen); to += addrlen; cnt++; space_left -= addrlen; *bytes_copied += addrlen; } rcu_read_unlock(); return cnt; } static int sctp_getsockopt_local_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_bind_addr *bp; struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_sockaddr_entry *addr; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; int err = 0; size_t space_left; int bytes_copied = 0; void *addrs; void *buf; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound * addresses are returned without regard to any particular * association. */ if (0 == getaddrs.assoc_id) { bp = &sctp_sk(sk)->ep->base.bind_addr; } else { asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; bp = &asoc->base.bind_addr; } to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); if (!addrs) return -ENOMEM; /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid * addresses from the global local address list. */ if (sctp_list_single_entry(&bp->address_list)) { addr = list_entry(bp->address_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(sk, &addr->a)) { cnt = sctp_copy_laddrs(sk, bp->port, addrs, space_left, &bytes_copied); if (cnt < 0) { err = cnt; goto out; } goto copy_getaddrs; } } buf = addrs; /* Protection on the bound address list is not needed since * in the socket option context we hold a socket lock and * thus the bound address list can't change. */ list_for_each_entry(addr, &bp->address_list, list) { memcpy(&temp, &addr->a, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) { err = -ENOMEM; /*fixme: right error?*/ goto out; } memcpy(buf, &temp, addrlen); buf += addrlen; bytes_copied += addrlen; cnt++; space_left -= addrlen; } copy_getaddrs: if (copy_to_user(to, addrs, bytes_copied)) { err = -EFAULT; goto out; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { err = -EFAULT; goto out; } if (put_user(bytes_copied, optlen)) err = -EFAULT; out: kfree(addrs); return err; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prim prim; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_prim)) return -EINVAL; len = sizeof(struct sctp_prim); if (copy_from_user(&prim, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.primary_path) return -ENOTCONN; memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, asoc->peer.primary_path->af_specific->sockaddr_len); sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, (union sctp_addr *)&prim.ssp_addr); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &prim, len)) return -EFAULT; return 0; } /* * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) * * Requests that the local endpoint set the specified Adaptation Layer * Indication parameter for all future INIT and INIT-ACK exchanges. */ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_setadaptation adaptation; if (len < sizeof(struct sctp_setadaptation)) return -EINVAL; len = sizeof(struct sctp_setadaptation); adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &adaptation, len)) return -EFAULT; return 0; } /* * * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. * * For getsockopt, it get the default sctp_sndrcvinfo structure. */ static int sctp_getsockopt_default_send_param(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.sinfo_stream = asoc->default_stream; info.sinfo_flags = asoc->default_flags; info.sinfo_ppid = asoc->default_ppid; info.sinfo_context = asoc->default_context; info.sinfo_timetolive = asoc->default_timetolive; } else { info.sinfo_stream = sp->default_stream; info.sinfo_flags = sp->default_flags; info.sinfo_ppid = sp->default_ppid; info.sinfo_context = sp->default_context; info.sinfo_timetolive = sp->default_timetolive; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.snd_sid = asoc->default_stream; info.snd_flags = asoc->default_flags; info.snd_ppid = asoc->default_ppid; info.snd_context = asoc->default_context; } else { info.snd_sid = sp->default_stream; info.snd_flags = sp->default_flags; info.snd_ppid = sp->default_ppid; info.snd_context = sp->default_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* * * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_getsockopt_nodelay(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->nodelay == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; if (len < sizeof (struct sctp_rtoinfo)) return -EINVAL; len = sizeof(struct sctp_rtoinfo); if (copy_from_user(&rtoinfo, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values corresponding to the specific association. */ if (asoc) { rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); } else { /* Values corresponding to the endpoint. */ struct sctp_sock *sp = sctp_sk(sk); rtoinfo.srto_initial = sp->rtoinfo.srto_initial; rtoinfo.srto_max = sp->rtoinfo.srto_max; rtoinfo.srto_min = sp->rtoinfo.srto_min; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &rtoinfo, len)) return -EFAULT; return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_getsockopt_associnfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; struct list_head *pos; int cnt = 0; if (len < sizeof (struct sctp_assocparams)) return -EINVAL; len = sizeof(struct sctp_assocparams); if (copy_from_user(&assocparams, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values correspoinding to the specific association */ if (asoc) { assocparams.sasoc_asocmaxrxt = asoc->max_retrans; assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; assocparams.sasoc_local_rwnd = asoc->a_rwnd; assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); list_for_each(pos, &asoc->peer.transport_addr_list) { cnt++; } assocparams.sasoc_number_peer_destinations = cnt; } else { /* Values corresponding to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; assocparams.sasoc_cookie_life = sp->assocparams.sasoc_cookie_life; assocparams.sasoc_number_peer_destinations = sp->assocparams. sasoc_number_peer_destinations; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &assocparams, len)) return -EFAULT; return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_getsockopt_mappedv4(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sp->v4mapped; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * (chapter and verse is quoted at sctp_setsockopt_context()) */ static int sctp_getsockopt_context(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len < sizeof(struct sctp_assoc_value)) return -EINVAL; len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->default_rcv_context; } else { params.assoc_value = sp->default_rcv_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &params, len)) return -EFAULT; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_getsockopt_maxseg(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, sizeof(params))) return -EFAULT; } else return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) params.assoc_value = asoc->frag_point; else params.assoc_value = sctp_sk(sk)->user_frag; if (put_user(len, optlen)) return -EFAULT; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) */ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sctp_sk(sk)->frag_interleave; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.25. Set or Get the sctp partial delivery point * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) */ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, char __user *optval, int __user *optlen) { u32 val; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); val = sctp_sk(sk)->pd_point; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * (chapter and verse is quoted at sctp_setsockopt_maxburst()) */ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->max_burst; } else params.assoc_value = sp->max_burst; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents; int i; if (!ep->auth_enable) return -EACCES; hmacs = ep->auth_hmacs_list; data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; len = sizeof(struct sctp_hmacalgo) + data_len; num_idents = data_len / sizeof(u16); if (put_user(len, optlen)) return -EFAULT; if (put_user(num_idents, &p->shmac_num_idents)) return -EFAULT; for (i = 0; i < num_idents; i++) { __u16 hmacid = ntohs(hmacs->hmac_ids[i]); if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) return -EFAULT; } return 0; } static int sctp_getsockopt_active_key(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) val.scact_keynumber = asoc->active_key_id; else val.scact_keynumber = ep->active_key_id; len = sizeof(struct sctp_authkeyid); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc) return -EINVAL; ch = asoc->peer.peer_chunks; if (!ch) goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; else ch = ep->auth_chunk_list; if (!ch) goto num; num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } /* * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) * This option gets the current number of associations that are attached * to a one-to-many style socket. The option value is an uint32_t. */ static int sctp_getsockopt_assoc_number(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; u32 val = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { val++; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.1.23 SCTP_AUTO_ASCONF * See the corresponding setsockopt entry as description */ static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.2.6. Get the Current Identifiers of Associations * (SCTP_GET_ASSOC_ID_LIST) * * This option gets the current list of SCTP association identifiers of * the SCTP associations handled by a one-to-many style socket. */ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_assoc_ids *ids; u32 num = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(struct sctp_assoc_ids)) return -EINVAL; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { num++; } if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) return -EINVAL; len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; ids = kmalloc(len, GFP_USER | __GFP_NOWARN); if (unlikely(!ids)) return -ENOMEM; ids->gaids_number_of_ids = num; num = 0; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { ids->gaids_assoc_id[num++] = asoc->assoc_id; } if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { kfree(ids); return -EFAULT; } kfree(ids); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to fetch the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, char __user *optval, int len, int __user *optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (len < sizeof(struct sctp_paddrthlds)) return -EINVAL; len = sizeof(struct sctp_paddrthlds); if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; val.spt_pathpfthld = asoc->pf_retrans; val.spt_pathmaxrxt = asoc->pathmaxrxt; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * SCTP_GET_ASSOC_STATS * * This option retrieves local per endpoint statistics. It is modeled * after OpenSolaris' implementation */ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_stats sas; struct sctp_association *asoc = NULL; /* User must provide at least the assoc id */ if (len < sizeof(sctp_assoc_t)) return -EINVAL; /* Allow the struct to grow and fill in as much as possible */ len = min_t(size_t, len, sizeof(sas)); if (copy_from_user(&sas, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, sas.sas_assoc_id); if (!asoc) return -EINVAL; sas.sas_rtxchunks = asoc->stats.rtxchunks; sas.sas_gapcnt = asoc->stats.gapcnt; sas.sas_outofseqtsns = asoc->stats.outofseqtsns; sas.sas_osacks = asoc->stats.osacks; sas.sas_isacks = asoc->stats.isacks; sas.sas_octrlchunks = asoc->stats.octrlchunks; sas.sas_ictrlchunks = asoc->stats.ictrlchunks; sas.sas_oodchunks = asoc->stats.oodchunks; sas.sas_iodchunks = asoc->stats.iodchunks; sas.sas_ouodchunks = asoc->stats.ouodchunks; sas.sas_iuodchunks = asoc->stats.iuodchunks; sas.sas_idupchunks = asoc->stats.idupchunks; sas.sas_opackets = asoc->stats.opackets; sas.sas_ipackets = asoc->stats.ipackets; /* New high max rto observed, will return 0 if not a single * RTO update took place. obs_rto_ipaddr will be bogus * in such a case */ sas.sas_maxrto = asoc->stats.max_obs_rto; memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, sizeof(struct sockaddr_storage)); /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; if (put_user(len, optlen)) return -EFAULT; pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvrcvinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvnxtinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_pr_supported(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(params)) { retval = -EINVAL; goto out; } len = sizeof(params); if (copy_from_user(&params, optval, len)) goto out; asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { params.assoc_value = asoc->prsctp_enable; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); params.assoc_value = sp->ep->prsctp_enable; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &params, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt_default_prinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(info)) { retval = -EINVAL; goto out; } len = sizeof(info); if (copy_from_user(&info, optval, len)) goto out; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { info.pr_policy = SCTP_PR_POLICY(asoc->default_flags); info.pr_value = asoc->default_timetolive; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); info.pr_policy = SCTP_PR_POLICY(sp->default_flags); info.pr_value = sp->default_timetolive; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &info, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prstatus params; struct sctp_association *asoc; int policy; int retval = -EINVAL; if (len < sizeof(params)) goto out; len = sizeof(params); if (copy_from_user(&params, optval, len)) { retval = -EFAULT; goto out; } policy = params.sprstat_policy; if (policy & ~SCTP_PR_SCTP_MASK) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); if (!asoc) goto out; if (policy == SCTP_PR_SCTP_NONE) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { params.sprstat_abandoned_unsent += asoc->abandoned_unsent[policy]; params.sprstat_abandoned_sent += asoc->abandoned_sent[policy]; } } else { params.sprstat_abandoned_unsent = asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)]; params.sprstat_abandoned_sent = asoc->abandoned_sent[__SCTP_PR_INDEX(policy)]; } if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &params, len)) { retval = -EFAULT; goto out; } retval = 0; out: return retval; } static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int retval = 0; int len; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of getsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->getsockopt(sk, level, optname, optval, optlen); return retval; } if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; lock_sock(sk); switch (optname) { case SCTP_STATUS: retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_getsockopt_disable_fragments(sk, len, optval, optlen); break; case SCTP_EVENTS: retval = sctp_getsockopt_events(sk, len, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); break; case SCTP_SOCKOPT_PEELOFF: retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_getsockopt_peer_addr_params(sk, len, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_getsockopt_delayed_ack(sk, len, optval, optlen); break; case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: retval = sctp_getsockopt_peer_addrs(sk, len, optval, optlen); break; case SCTP_GET_LOCAL_ADDRS: retval = sctp_getsockopt_local_addrs(sk, len, optval, optlen); break; case SCTP_SOCKOPT_CONNECTX3: retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_getsockopt_default_send_param(sk, len, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_getsockopt_default_sndinfo(sk, len, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); break; case SCTP_NODELAY: retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDR_INFO: retval = sctp_getsockopt_peer_addr_info(sk, len, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_getsockopt_adaptation_layer(sk, len, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; case SCTP_AUTH_KEY: case SCTP_AUTH_CHUNK: case SCTP_AUTH_DELETE_KEY: retval = -EOPNOTSUPP; break; case SCTP_HMAC_IDENT: retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_getsockopt_active_key(sk, len, optval, optlen); break; case SCTP_PEER_AUTH_CHUNKS: retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, optlen); break; case SCTP_LOCAL_AUTH_CHUNKS: retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_NUMBER: retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_ID_LIST: retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); break; case SCTP_GET_ASSOC_STATS: retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); break; case SCTP_PR_SUPPORTED: retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen); break; case SCTP_DEFAULT_PRINFO: retval = sctp_getsockopt_default_prinfo(sk, len, optval, optlen); break; case SCTP_PR_ASSOC_STATUS: retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); return retval; } static int sctp_hash(struct sock *sk) { /* STUB */ return 0; } static void sctp_unhash(struct sock *sk) { /* STUB */ } /* Check if port is acceptable. Possibly find first available port. * * The port hash table (contained in the 'global' SCTP protocol storage * returned by struct sctp_protocol *sctp_get_protocol()). The hash * table is an array of 4096 lists (sctp_bind_hashbucket). Each * list (the list number is the port number hashed out, so as you * would expect from a hash function, all the ports in a given list have * such a number that hashes out to the same list number; you were * expecting that, right?); so each list has a set of ports, with a * link to the socket (struct sock) that uses it, the port number and * a fastreuse flag (FIXME: NPI ipg). */ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; int ret; snum = ntohs(addr->v4.sin_port); pr_debug("%s: begins, snum:%d\n", __func__, snum); local_bh_disable(); if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; unsigned int rover; struct net *net = sock_net(sk); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; do { rover++; if ((rover < low) || (rover > high)) rover = low; if (inet_is_local_reserved_port(net, rover)) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: spin_unlock(&head->lock); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's * mutex. */ snum = rover; } else { /* We are given an specific port number; we verify * that it is not being used. If it is used, we will * exahust the search in the hash list corresponding * to the port number (snum) - we detect that with the * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; } } pp = NULL; goto pp_not_found; pp_found: if (!hlist_empty(&pp->owner)) { /* We had a port hash table hit - there is an * available port (pp != NULL) and it is being * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ int reuse = sk->sk_reuse; struct sock *sk2; pr_debug("%s: found a possible match\n", __func__); if (pp->fastreuse && sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port * (pp->port) [via the pointers bind_next and * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, * we get the endpoint they describe and run through * the endpoint's list of IP (v4 or v6) addresses, * comparing each of the addresses with the address of * the socket sk. If we find a match, then that means * that this port/socket (sk) combination are already * in an endpoint. */ sk_for_each_bound(sk2, &pp->owner) { struct sctp_endpoint *ep2; ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || (reuse && sk2->sk_reuse && sk2->sk_state != SCTP_SS_LISTENING)) continue; if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sctp_sk(sk2), sctp_sk(sk))) { ret = (long)sk2; goto fail_unlock; } } pr_debug("%s: found a match\n", __func__); } pp_not_found: /* If there was a hash table miss, create a new port. */ ret = 1; if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) goto fail_unlock; /* In either case (hit or miss), make sure fastreuse is 1 only * if sk->sk_reuse is too (that is, if the caller requested * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table * entry, tie the socket list information with the rest of the * sockets FIXME: Blurry, NPI (ipg). */ success: if (!sctp_sk(sk)->bind_hash) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &pp->owner); sctp_sk(sk)->bind_hash = pp; } ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral * port is requested. */ static int sctp_get_port(struct sock *sk, unsigned short snum) { union sctp_addr addr; struct sctp_af *af = sctp_sk(sk)->pf->af; /* Set up a dummy address struct from the sk. */ af->from_sk(&addr, sk); addr.v4.sin_port = htons(snum); /* Note: sk->sk_num gets filled in if ephemeral port request. */ return !!sctp_get_port_local(sk, &addr); } /* * Move a socket to LISTENING state. */ static int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct crypto_shash *tfm = NULL; char alg[32]; /* Allocate HMAC for generating cookie. */ if (!sp->hmac && sp->sctp_hmac_alg) { sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); tfm = crypto_alloc_shash(alg, 0, 0); if (IS_ERR(tfm)) { net_info_ratelimited("failed to load transform for %s: %ld\n", sp->sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; } /* * If a bind() or sctp_bindx() is not called prior to a listen() * call that allows new associations to be accepted, the system * picks an ephemeral port and will choose an address set equivalent * to binding with a wildcard address. * * This is not currently spelled out in the SCTP sockets * extensions draft, but follows the practice as seen in TCP * sockets. * */ sk->sk_state = SCTP_SS_LISTENING; if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) return -EAGAIN; } else { if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { sk->sk_state = SCTP_SS_CLOSED; return -EADDRINUSE; } } sk->sk_max_ack_backlog = backlog; sctp_hash_endpoint(ep); return 0; } /* * 4.1.3 / 5.1.3 listen() * * By default, new associations are not accepted for UDP style sockets. * An application uses listen() to mark a socket as being able to * accept new associations. * * On TCP style sockets, applications use listen() to ready the SCTP * endpoint for accepting inbound associations. * * On both types of endpoints a backlog of '0' disables listening. * * Move a socket to LISTENING state. */ int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct sctp_endpoint *ep = sctp_sk(sk)->ep; int err = -EINVAL; if (unlikely(backlog < 0)) return err; lock_sock(sk); /* Peeled-off sockets are not allowed to listen(). */ if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) goto out; if (sock->state != SS_UNCONNECTED) goto out; /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) goto out; err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; if (sk->sk_reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } /* If we are already listening, just update the backlog */ if (sctp_sstate(sk, LISTENING)) sk->sk_max_ack_backlog = backlog; else { err = sctp_listen_start(sk, backlog); if (err) goto out; } err = 0; out: release_sock(sk); return err; } /* * This function is done by modeling the current datagram_poll() and the * tcp_poll(). Note that, based on these implementations, we don't * lock the socket in this function, even though it seems that, * ideally, locking or some other mechanisms can be used to ensure * the integrity of the counters (sndbuf and wmem_alloc) used * in this place. We assume that we don't need locks either until proven * otherwise. * * Another thing to note is that we include the Async I/O support * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); unsigned int mask; poll_wait(file, sk_sleep(sk), wait); sock_rps_record_flow(sk); /* A TCP-style listening socket becomes readable when the accept queue * is not empty. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) return (!list_empty(&sp->ep->asocs)) ? (POLLIN | POLLRDNORM) : 0; mask = 0; /* Is there any exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* The association is either gone or not ready. */ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) return mask; /* Is it writable? */ if (sctp_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); /* * Since the socket is not locked, the buffer * might be made available after the writeable check and * before the bit is set. This could cause a lost I/O * signal. tcp_poll() has a race breaker for this race * condition. Based on their implementation, we put * in the following code to cover it as well. */ if (sctp_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } return mask; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) { struct sctp_bind_bucket *pp; pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); if (pp) { SCTP_DBG_OBJCNT_INC(bind_bucket); pp->port = snum; pp->fastreuse = 0; INIT_HLIST_HEAD(&pp->owner); pp->net = net; hlist_add_head(&pp->node, &head->chain); } return pp; } /* Caller must hold hashbucket lock for this tb with local BH disabled */ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) { if (pp && hlist_empty(&pp->owner)) { __hlist_del(&pp->node); kmem_cache_free(sctp_bucket_cachep, pp); SCTP_DBG_OBJCNT_DEC(bind_bucket); } } /* Release this socket's reference to a local port. */ static inline void __sctp_put_port(struct sock *sk) { struct sctp_bind_hashbucket *head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), inet_sk(sk)->inet_num)]; struct sctp_bind_bucket *pp; spin_lock(&head->lock); pp = sctp_sk(sk)->bind_hash; __sk_del_bind_node(sk); sctp_sk(sk)->bind_hash = NULL; inet_sk(sk)->inet_num = 0; sctp_bucket_destroy(pp); spin_unlock(&head->lock); } void sctp_put_port(struct sock *sk) { local_bh_disable(); __sctp_put_port(sk); local_bh_enable(); } /* * The system picks an ephemeral port and choose an address set equivalent * to binding with a wildcard address. * One of those addresses will be the primary address for the association. * This automatically enables the multihoming capability of SCTP. */ static int sctp_autobind(struct sock *sk) { union sctp_addr autoaddr; struct sctp_af *af; __be16 port; /* Initialize a local sockaddr structure to INADDR_ANY. */ af = sctp_sk(sk)->pf->af; port = htons(inet_sk(sk)->inet_num); af->inaddr_any(&autoaddr, port); return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); } /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. * * From RFC 2292 * 4.2 The cmsghdr Structure * * * When ancillary data is sent or received, any number of ancillary data * objects can be specified by the msg_control and msg_controllen members of * the msghdr structure, because each object is preceded by * a cmsghdr structure defining the object's length (the cmsg_len member). * Historically Berkeley-derived implementations have passed only one object * at a time, but this API allows multiple objects to be * passed in a single call to sendmsg() or recvmsg(). The following example * shows two ancillary data objects in a control buffer. * * |<--------------------------- msg_controllen -------------------------->| * | | * * |<----- ancillary data object ----->|<----- ancillary data object ----->| * * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| * | | | * * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | * * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | * | | | | | * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| * * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * ^ * | * * msg_control * points here */ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) { struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; for_each_cmsghdr(cmsg, my_msg) { if (!CMSG_OK(my_msg, cmsg)) return -EINVAL; /* Should we parse this header or ignore? */ if (cmsg->cmsg_level != IPPROTO_SCTP) continue; /* Strictly check lengths following example in SCM code. */ switch (cmsg->cmsg_type) { case SCTP_INIT: /* SCTP Socket API Extension * 5.3.1 SCTP Initiation Structure (SCTP_INIT) * * This cmsghdr structure provides information for * initializing new SCTP associations with sendmsg(). * The SCTP_INITMSG socket option uses this same data * structure. This structure is not used for * recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) return -EINVAL; cmsgs->init = CMSG_DATA(cmsg); break; case SCTP_SNDRCV: /* SCTP Socket API Extension * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) * * This cmsghdr structure specifies SCTP options for * sendmsg() and describes SCTP header information * about a received message through recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) return -EINVAL; cmsgs->srinfo = CMSG_DATA(cmsg); if (cmsgs->srinfo->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; case SCTP_SNDINFO: /* SCTP Socket API Extension * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) * * This cmsghdr structure specifies SCTP options for * sendmsg(). This structure and SCTP_RCVINFO replaces * SCTP_SNDRCV which has been deprecated. * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ --------------------- * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) return -EINVAL; cmsgs->sinfo = CMSG_DATA(cmsg); if (cmsgs->sinfo->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; default: return -EINVAL; } } return 0; } /* * Wait for a packet.. * Note: This function is the same function as in core/datagram.c * with a few modifications to make lksctp work. */ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT(wait); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out; if (!skb_queue_empty(&sk->sk_receive_queue)) goto ready; /* Socket shut down? */ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; /* Sequenced packets can come disconnected. If so we report the * problem. */ error = -ENOTCONN; /* Is there a good reason to think that we may receive some data? */ if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) goto out; /* Handle signals. */ if (signal_pending(current)) goto interrupted; /* Let another process have a go. Since we are going to sleep * anyway. Note: This may cause odd behaviors if the message * does not fit in the user's buffer, but this seems to be the * only way to honor MSG_DONTWAIT realistically. */ release_sock(sk); *timeo_p = schedule_timeout(*timeo_p); lock_sock(sk); ready: finish_wait(sk_sleep(sk), &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); *err = error; return error; } /* Receive a datagram. * Note: This is pretty much the same routine as in core/datagram.c * with a few changes to make lksctp work. */ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int noblock, int *err) { int error; struct sk_buff *skb; long timeo; timeo = sock_rcvtimeo(sk, noblock); pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, * so nothing interrupt level * will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); } else { skb = __skb_dequeue(&sk->sk_receive_queue); } if (skb) return skb; /* Caller is allowed not to check sk->sk_err before calling. */ error = sock_error(sk); if (error) goto no_packet; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk_can_busy_loop(sk) && sk_busy_loop(sk, noblock)) continue; /* User doesn't want to wait. */ error = -EAGAIN; if (!timeo) goto no_packet; } while (sctp_wait_for_packet(sk, err, &timeo) == 0); return NULL; no_packet: *err = error; return NULL; } /* If sndbuf has changed, wake up per association sndbuf waiters. */ static void __sctp_write_space(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; if (sctp_wspace(asoc) <= 0) return; if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); if (sctp_writeable(sk)) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq) { if (waitqueue_active(&wq->wait)) wake_up_interruptible(&wq->wait); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. * We have not tested with it yet. */ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } } static void sctp_wake_up_waiters(struct sock *sk, struct sctp_association *asoc) { struct sctp_association *tmp = asoc; /* We do accounting for the sndbuf space per association, * so we only need to wake our own association. */ if (asoc->ep->sndbuf_policy) return __sctp_write_space(asoc); /* If association goes down and is just flushing its * outq, then just normally notify others. */ if (asoc->base.dead) return sctp_write_space(sk); /* Accounting for the sndbuf space is per socket, so we * need to wake up others, try to be fair and in case of * other associations, let them have a go first instead * of just doing a sctp_write_space() call. * * Note that we reach sctp_wake_up_waiters() only when * associations free up queued chunks, thus we are under * lock and the list of associations on a socket is * guaranteed not to change. */ for (tmp = list_next_entry(tmp, asocs); 1; tmp = list_next_entry(tmp, asocs)) { /* Manually skip the head element. */ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) continue; /* Wake up association. */ __sctp_write_space(tmp); /* We've reached the end. */ if (tmp == asoc) break; } } /* Do accounting for the sndbuf space. * Decrement the used sndbuf space of the corresponding association by the * data size which was just transmitted(freed). */ static void sctp_wfree(struct sk_buff *skb) { struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); /* * This undoes what is done via sctp_set_owner_w and sk_mem_charge */ sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); sock_wfree(skb); sctp_wake_up_waiters(sk, asoc); sctp_association_put(asoc); } /* Do accounting for the receive space on the socket. * Accounting for the association is done in ulpevent.c * We set this as a destructor for the cloned data skbs so that * accounting is done at the correct time. */ void sctp_sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); /* * Mimic the behavior of sock_rfree */ sk_mem_uncharge(sk, event->rmem_len); } /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); if (sk != asoc->base.sk) goto do_error; lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: err = -EPIPE; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EAGAIN; goto out; } void sctp_data_ready(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } /* If socket sndbuf has changed, wake up all per association waiters. */ void sctp_write_space(struct sock *sk) { struct sctp_association *asoc; /* Wake up the tasks in each wait queue. */ list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { __sctp_write_space(asoc); } } /* Is there any sndbuf space available on the socket? * * Note that sk_wmem_alloc is the sum of the send buffers on all of the * associations on the same socket. For a UDP-style socket with * multiple associations, it is possible for it to be "unwriteable" * prematurely. I assume that this is acceptable because * a premature "unwriteable" is better than an accidental "writeable" which * would cause an unwanted block under certain circumstances. For the 1-1 * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ static int sctp_writeable(struct sock *sk) { int amt = 0; amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amt < 0) amt = 0; return amt; } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, * returns immediately with EINPROGRESS. */ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); /* Increment the association's refcnt. */ sctp_association_hold(asoc); for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (sctp_state(asoc, ESTABLISHED)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: if (asoc->init_err_counter + 1 > asoc->max_init_attempts) err = -ETIMEDOUT; else err = -ECONNREFUSED; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EINPROGRESS; goto out; } static int sctp_wait_for_accept(struct sock *sk, long timeo) { struct sctp_endpoint *ep; int err = 0; DEFINE_WAIT(wait); ep = sctp_sk(sk)->ep; for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&ep->asocs)) { release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); } err = -EINVAL; if (!sctp_sstate(sk, LISTENING)) break; err = 0; if (!list_empty(&ep->asocs)) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } static void sctp_wait_for_close(struct sock *sk, long timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&sctp_sk(sk)->ep->asocs)) break; release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) { struct sk_buff *frag; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) sctp_skb_set_owner_r_frag(frag, sk); done: sctp_skb_set_owner_r(skb, sk); } void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc) { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; newsk->sk_type = sk->sk_type; newsk->sk_bound_dev_if = sk->sk_bound_dev_if; newsk->sk_flags = sk->sk_flags; newsk->sk_tsflags = sk->sk_tsflags; newsk->sk_no_check_tx = sk->sk_no_check_tx; newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = sctp_destruct_sock; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; newsk->sk_sndbuf = sk->sk_sndbuf; newsk->sk_rcvbuf = sk->sk_rcvbuf; newsk->sk_lingertime = sk->sk_lingertime; newsk->sk_rcvtimeo = sk->sk_rcvtimeo; newsk->sk_sndtimeo = sk->sk_sndtimeo; newsk->sk_rxhash = sk->sk_rxhash; newinet = inet_sk(newsk); /* Initialize sk's sport, dport, rcv_saddr and daddr for * getsockname() and getpeername() */ newinet->inet_sport = inet->inet_sport; newinet->inet_saddr = inet->inet_saddr; newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; newinet->inet_id = asoc->next_tsn ^ jiffies; newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) net_enable_timestamp(); security_sk_clone(sk, newsk); } static inline void sctp_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { int ancestor_size = sizeof(struct inet_sock) + sizeof(struct sctp_sock) - offsetof(struct sctp_sock, auto_asconf_list); if (sk_from->sk_family == PF_INET6) ancestor_size += sizeof(struct ipv6_pinfo); __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, sctp_socket_type_t type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; /* Migrate socket buffer sizes and all the socket level options to the * new socket. */ newsk->sk_sndbuf = oldsk->sk_sndbuf; newsk->sk_rcvbuf = oldsk->sk_rcvbuf; /* Brute force copy old sctp opt. */ sctp_copy_descendant(newsk, oldsk); /* Restore the ep value that was overwritten with the above structure * copy. */ newsp->ep = newep; newsp->hmac = NULL; /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; spin_lock_bh(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; spin_unlock_bh(&head->lock); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clean up any messages pending delivery due to partial * delivery. Three cases: * 1) No partial deliver; no work. * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ if (assoc->ulpq.pd_mode) { queue = &newsp->pd_lobby; } else queue = &newsk->sk_receive_queue; /* Walk through the pd_lobby, looking for skbs that * need moved to the new socket. */ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clear up any skbs waiting for the partial * delivery to finish. */ if (assoc->ulpq.pd_mode) sctp_clear_pd(oldsk, NULL); } sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_skb_set_owner_r_frag(skb, newsk); sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) sctp_skb_set_owner_r_frag(skb, newsk); /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. */ newsp->type = type; /* Mark the new socket "in-use" by the user so that any packets * that may arrive on the association after we've moved it are * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. * * The caller has just allocated newsk so we can guarantee that other * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. */ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) { newsk->sk_state = SCTP_SS_CLOSED; newsk->sk_shutdown |= RCV_SHUTDOWN; } else { newsk->sk_state = SCTP_SS_ESTABLISHED; } release_sock(newsk); } /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #if IS_ENABLED(CONFIG_IPV6) #include <net/transp_v6.h> static void sctp_v6_destroy_sock(struct sock *sk) { sctp_destroy_sock(sk); inet6_destroy_sock(sk); } struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_v6_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #endif /* IS_ENABLED(CONFIG_IPV6) */
./CrossVul/dataset_final_sorted/CWE-617/c/good_3156_0
crossvul-cpp_data_good_1770_2
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_EDP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <fnmatch.h> static int seq = 0; int edp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = EDP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_EXTREME; struct lldpd_chassis *chassis; int length, i, v; u_int8_t *packet, *pos, *pos_llc, *pos_len_eh, *pos_len_edp, *pos_edp, *tlv, *end; u_int16_t checksum; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; unsigned int state = 0; #endif u_int8_t edp_fakeversion[] = {7, 6, 4, 99}; /* Subsequent XXX can be replaced by other values. We place them here to ensure the position of "" to be a bit invariant with version changes. */ char *deviceslot[] = { "eth", "veth", "XXX", "XXX", "XXX", "XXX", "XXX", "XXX", "", NULL }; log_debug("edp", "send EDP frame on port %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; #ifdef ENABLE_DOT1 while (state != 2) { #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; v = 0; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && /* We need to save our current position to compute ethernet len */ /* SSAP and DSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_EDP))) goto toobig; /* EDP header */ if ((chassis->c_id_len != ETHER_ADDR_LEN) || (chassis->c_id_subtype != LLDP_CHASSISID_SUBTYPE_LLADDR)) { log_warnx("edp", "local chassis does not use MAC address as chassis ID!?"); free(packet); return EINVAL; } if (!( POKE_SAVE(pos_edp) && /* Save the start of EDP frame */ POKE_UINT8(1) && POKE_UINT8(0) && POKE_SAVE(pos_len_edp) && /* We compute the len and the checksum later */ POKE_UINT32(0) && /* Len + Checksum */ POKE_UINT16(seq) && POKE_UINT16(0) && POKE_BYTES(chassis->c_id, ETHER_ADDR_LEN))) goto toobig; seq++; #ifdef ENABLE_DOT1 switch (state) { case 0: #endif /* Display TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_DISPLAY) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_UINT8(0) && /* Add a NULL character for better compatibility */ POKE_END_EDP_TLV)) goto toobig; /* Info TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_INFO))) goto toobig; /* We try to emulate the slot thing */ for (i=0; deviceslot[i] != NULL; i++) { if (strncmp(hardware->h_ifname, deviceslot[i], strlen(deviceslot[i])) == 0) { if (!( POKE_UINT16(i) && POKE_UINT16(atoi(hardware->h_ifname + strlen(deviceslot[i]))))) goto toobig; break; } } /* If we don't find a "slot", we say that the interface is in slot 8 */ if (deviceslot[i] == NULL) { if (!( POKE_UINT16(8) && POKE_UINT16(hardware->h_ifindex))) goto toobig; } if (!( POKE_UINT16(0) && /* vchassis */ POKE_UINT32(0) && POKE_UINT16(0) && /* Reserved */ /* Version */ POKE_BYTES(edp_fakeversion, sizeof(edp_fakeversion)) && /* Connections, we say that we won't have more interfaces than this mask. */ POKE_UINT32(0xffffffff) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_END_EDP_TLV)) goto toobig; #ifdef ENABLE_DOT1 break; case 1: TAILQ_FOREACH(vlan, &hardware->h_lport.p_vlans, v_entries) { v++; if (!( POKE_START_EDP_TLV(EDP_TLV_VLAN) && POKE_UINT8(0) && /* Flags: no IP address */ POKE_UINT8(0) && /* Reserved */ POKE_UINT16(vlan->v_vid) && POKE_UINT32(0) && /* Reserved */ POKE_UINT32(0) && /* IP address */ /* VLAN name */ POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_UINT8(0) && POKE_END_EDP_TLV)) goto toobig; } break; } if ((state == 1) && (v == 0)) { /* No VLAN, no need to send another TLV */ free(packet); break; } #endif /* Null TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_NULL) && POKE_END_EDP_TLV && POKE_SAVE(end))) goto toobig; /* Compute len and checksum */ i = end - pos_llc; /* Ethernet length */ v = end - pos_edp; /* EDP length */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(i))) goto toobig; POKE_RESTORE(pos_len_edp); if (!(POKE_UINT16(v))) goto toobig; checksum = frame_checksum(pos_edp, v, 0); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("edp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); #ifdef ENABLE_DOT1 state++; } #endif hardware->h_tx_cnt++; return 0; toobig: free(packet); return E2BIG; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("edp", name " EDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int edp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; #ifdef ENABLE_DOT1 struct lldpd_mgmt *mgmt, *mgmt_next, *m; struct lldpd_vlan *lvlan = NULL, *lvlan_next; #endif const unsigned char edpaddr[] = EDP_MULTICAST_ADDR; int length, gotend = 0, gotvlans = 0, edp_len, tlv_len, tlv_type; int edp_port, edp_slot; u_int8_t *pos, *pos_edp, *tlv; u_int8_t version[4]; #ifdef ENABLE_DOT1 struct in_addr address; struct lldpd_port *oport; #endif log_debug("edp", "decode EDP frame on port %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("edp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("edp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) + 8 /* LLC */ + 10 + ETHER_ADDR_LEN /* EDP header */) { log_warnx("edp", "too short EDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(edpaddr, sizeof(edpaddr)) != 0) { log_info("edp", "frame not targeted at EDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); /* LLC: DSAP + SSAP + control + org */ if (PEEK_UINT16 != LLC_PID_EDP) { log_debug("edp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } (void)PEEK_SAVE(pos_edp); /* Save the start of EDP packet */ if (PEEK_UINT8 != 1) { log_warnx("edp", "incorrect EDP version for frame received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; /* Reserved */ edp_len = PEEK_UINT16; PEEK_DISCARD_UINT16; /* Checksum */ PEEK_DISCARD_UINT16; /* Sequence */ if (PEEK_UINT16 != 0) { /* ID Type = 0 = MAC */ log_warnx("edp", "incorrect device id type for frame received on %s", hardware->h_ifname); goto malformed; } if (edp_len > length + 10) { log_warnx("edp", "incorrect size for EDP frame received on %s", hardware->h_ifname); goto malformed; } chassis->c_ttl = cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold; chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR; chassis->c_id_len = ETHER_ADDR_LEN; if ((chassis->c_id = (char *)malloc(ETHER_ADDR_LEN)) == NULL) { log_warn("edp", "unable to allocate memory for chassis ID"); goto malformed; } PEEK_BYTES(chassis->c_id, ETHER_ADDR_LEN); /* Let's check checksum */ if (frame_checksum(pos_edp, edp_len, 0) != 0) { log_warnx("edp", "incorrect EDP checksum for frame received on %s", hardware->h_ifname); goto malformed; } while (length && !gotend) { if (length < 4) { log_warnx("edp", "EDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_UINT8 != EDP_TLV_MARKER) { log_warnx("edp", "incorrect marker starting EDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT8; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (tlv_len > length)) { log_debug("edp", "incorrect size in EDP TLV header for frame " "received on %s", hardware->h_ifname); /* Some poor old Extreme Summit are quite bogus */ gotend = 1; break; } switch (tlv_type) { case EDP_TLV_INFO: CHECK_TLV_SIZE(32, "Info"); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; edp_slot = PEEK_UINT16; edp_port = PEEK_UINT16; if (asprintf(&port->p_id, "%d/%d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port ID"); goto malformed; } port->p_id_len = strlen(port->p_id); if (asprintf(&port->p_descr, "Slot %d / Port %d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port description"); goto malformed; } PEEK_DISCARD_UINT16; /* vchassis */ PEEK_DISCARD(6); /* Reserved */ PEEK_BYTES(version, 4); if (asprintf(&chassis->c_descr, "EDP enabled device, version %d.%d.%d.%d", version[0], version[1], version[2], version[3]) == -1) { log_warn("edp", "unable to allocate memory for " "chassis description"); goto malformed; } break; case EDP_TLV_DISPLAY: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("edp", "unable to allocate memory for chassis " "name"); goto malformed; } /* TLV display contains a lot of garbage */ PEEK_BYTES(chassis->c_name, tlv_len); break; case EDP_TLV_NULL: if (tlv_len != 0) { log_warnx("edp", "null tlv with incorrect size in frame " "received on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("edp", "extra data after edp frame on %s", hardware->h_ifname); gotend = 1; break; case EDP_TLV_VLAN: #ifdef ENABLE_DOT1 CHECK_TLV_SIZE(12, "VLAN"); if ((lvlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("edp", "unable to allocate vlan"); goto malformed; } PEEK_DISCARD_UINT16; /* Flags + reserved */ lvlan->v_vid = PEEK_UINT16; /* VID */ PEEK_DISCARD(4); /* Reserved */ PEEK_BYTES(&address, sizeof(address)); if (address.s_addr != INADDR_ANY) { mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { log_warn("edp", "Out of memory"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } if ((lvlan->v_name = (char *)calloc(1, tlv_len + 1 - 12)) == NULL) { log_warn("edp", "unable to allocate vlan name"); goto malformed; } PEEK_BYTES(lvlan->v_name, tlv_len - 12); TAILQ_INSERT_TAIL(&port->p_vlans, lvlan, v_entries); lvlan = NULL; #endif gotvlans = 1; break; default: log_debug("edp", "unknown EDP TLV type (%d) received on %s", tlv_type, hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (gotend == 0)) { #ifdef ENABLE_DOT1 if (gotvlans && gotend) { /* VLAN can be sent in a separate frames. We need to add * those vlans to an existing port */ TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (!((oport->p_protocol == LLDPD_MODE_EDP) && (oport->p_chassis->c_id_subtype == chassis->c_id_subtype) && (oport->p_chassis->c_id_len == chassis->c_id_len) && (memcmp(oport->p_chassis->c_id, chassis->c_id, chassis->c_id_len) == 0))) continue; /* We attach the VLANs to the found port */ lldpd_vlan_cleanup(oport); for (lvlan = TAILQ_FIRST(&port->p_vlans); lvlan != NULL; lvlan = lvlan_next) { lvlan_next = TAILQ_NEXT(lvlan, v_entries); TAILQ_REMOVE(&port->p_vlans, lvlan, v_entries); TAILQ_INSERT_TAIL(&oport->p_vlans, lvlan, v_entries); } /* And the IP addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); /* Don't add an address that already exists! */ TAILQ_FOREACH(m, &chassis->c_mgmt, m_entries) if (m->m_family == mgmt->m_family && !memcmp(&m->m_addr, &mgmt->m_addr, sizeof(m->m_addr))) break; if (m == NULL) TAILQ_INSERT_TAIL(&oport->p_chassis->c_mgmt, mgmt, m_entries); } } /* We discard the remaining frame */ goto malformed; } #else if (gotvlans) goto malformed; #endif log_warnx("edp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_DOT1 free(lvlan); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_EDP */
./CrossVul/dataset_final_sorted/CWE-617/c/good_1770_2
crossvul-cpp_data_bad_1771_4
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_SONMP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <assert.h> static struct sonmp_chassis sonmp_chassis_types[] = { {1, "unknown (via SONMP)"}, {2, "Nortel 3000"}, {3, "Nortel 3030"}, {4, "Nortel 2310"}, {5, "Nortel 2810"}, {6, "Nortel 2912"}, {7, "Nortel 2914"}, {8, "Nortel 271x"}, {9, "Nortel 2813"}, {10, "Nortel 2814"}, {11, "Nortel 2915"}, {12, "Nortel 5000"}, {13, "Nortel 2813SA"}, {14, "Nortel 2814SA"}, {15, "Nortel 810M"}, {16, "Nortel EtherCell"}, {17, "Nortel 5005"}, {18, "Alcatel Ethernet workgroup conc."}, {20, "Nortel 2715SA"}, {21, "Nortel 2486"}, {22, "Nortel 28000 series"}, {23, "Nortel 23000 series"}, {24, "Nortel 5DN00x series"}, {25, "BayStack Ethernet"}, {26, "Nortel 23100 series"}, {27, "Nortel 100Base-T Hub"}, {28, "Nortel 3000 Fast Ethernet"}, {29, "Nortel Orion switch"}, {30, "unknown"}, {31, "Nortel DDS "}, {32, "Nortel Centillion"}, {33, "Nortel Centillion"}, {34, "Nortel Centillion"}, {35, "BayStack 301"}, {36, "BayStack TokenRing Hub"}, {37, "Nortel FVC Multimedia Switch"}, {38, "Nortel Switch Node"}, {39, "BayStack 302 Switch"}, {40, "BayStack 350 Switch"}, {41, "BayStack 150 Ethernet Hub"}, {42, "Nortel Centillion 50N switch"}, {43, "Nortel Centillion 50T switch"}, {44, "BayStack 303 and 304 Switches"}, {45, "BayStack 200 Ethernet Hub"}, {46, "BayStack 250 10/100 Ethernet Hub"}, {48, "BayStack 450 10/100/1000 Switches"}, {49, "BayStack 410 10/100 Switches"}, {50, "Nortel Ethernet Routing 1200 L3 Switch"}, {51, "Nortel Ethernet Routing 1250 L3 Switch"}, {52, "Nortel Ethernet Routing 1100 L3 Switch"}, {53, "Nortel Ethernet Routing 1150 L3 Switch"}, {54, "Nortel Ethernet Routing 1050 L3 Switch"}, {55, "Nortel Ethernet Routing 1051 L3 Switch"}, {56, "Nortel Ethernet Routing 8610 L3 Switch"}, {57, "Nortel Ethernet Routing 8606 L3 Switch"}, {58, "Nortel Ethernet Routing Switch 8010"}, {59, "Nortel Ethernet Routing Switch 8006"}, {60, "BayStack 670 wireless access point"}, {61, "Nortel Ethernet Routing Switch 740 "}, {62, "Nortel Ethernet Routing Switch 750 "}, {63, "Nortel Ethernet Routing Switch 790"}, {64, "Nortel Business Policy Switch 2000 10/100 Switches"}, {65, "Nortel Ethernet Routing 8110 L2 Switch"}, {66, "Nortel Ethernet Routing 8106 L2 Switch"}, {67, "BayStack 3580 Gig Switch"}, {68, "BayStack 10 Power Supply Unit"}, {69, "BayStack 420 10/100 Switch"}, {70, "OPTera Metro 1200 Ethernet Service Module"}, {71, "Nortel Ethernet Routing Switch 8010co"}, {72, "Nortel Ethernet Routing 8610co L3 switch"}, {73, "Nortel Ethernet Routing 8110co L2 switch"}, {74, "Nortel Ethernet Routing 8003"}, {75, "Nortel Ethernet Routing 8603 L3 switch"}, {76, "Nortel Ethernet Routing 8103 L2 switch"}, {77, "BayStack 380 10/100/1000 Switch"}, {78, "Nortel Ethernet Switch 470-48T"}, {79, "OPTera Metro 1450 Ethernet Service Module"}, {80, "OPTera Metro 1400 Ethernet Service Module"}, {81, "Alteon Switch Family"}, {82, "Ethernet Switch 460-24T-PWR"}, {83, "OPTera Metro 8010 OPM L2 Switch"}, {84, "OPTera Metro 8010co OPM L2 Switch"}, {85, "OPTera Metro 8006 OPM L2 Switch"}, {86, "OPTera Metro 8003 OPM L2 Switch"}, {87, "Alteon 180e"}, {88, "Alteon AD3"}, {89, "Alteon 184"}, {90, "Alteon AD4"}, {91, "Nortel Ethernet Routing 1424 L3 switch"}, {92, "Nortel Ethernet Routing 1648 L3 switch"}, {93, "Nortel Ethernet Routing 1612 L3 switch"}, {94, "Nortel Ethernet Routing 1624 L3 switch "}, {95, "BayStack 380-24F Fiber 1000 Switch"}, {96, "Nortel Ethernet Routing Switch 5510-24T"}, {97, "Nortel Ethernet Routing Switch 5510-48T"}, {98, "Nortel Ethernet Switch 470-24T"}, {99, "Nortel Networks Wireless LAN Access Point 2220"}, {100, "Ethernet Routing RBS 2402 L3 switch"}, {101, "Alteon Application Switch 2424 "}, {102, "Alteon Application Switch 2224 "}, {103, "Alteon Application Switch 2208 "}, {104, "Alteon Application Switch 2216"}, {105, "Alteon Application Switch 3408"}, {106, "Alteon Application Switch 3416"}, {107, "Nortel Networks Wireless LAN SecuritySwitch 2250"}, {108, "Ethernet Switch 425-48T"}, {109, "Ethernet Switch 425-24T"}, {110, "Nortel Networks Wireless LAN Access Point 2221"}, {111, "Nortel Metro Ethernet Service Unit 24-T SPF switch"}, {112, "Nortel Metro Ethernet Service Unit 24-T LX DC switch"}, {113, "Nortel Ethernet Routing Switch 8300 10-slot chassis"}, {114, "Nortel Ethernet Routing Switch 8300 6-slot chassis"}, {115, "Nortel Ethernet Routing Switch 5520-24T-PWR"}, {116, "Nortel Ethernet Routing Switch 5520-48T-PWR"}, {117, "Nortel Networks VPN Gateway 3050"}, {118, "Alteon SSL 310 10/100"}, {119, "Alteon SSL 310 10/100 Fiber"}, {120, "Alteon SSL 310 10/100 FIPS"}, {121, "Alteon SSL 410 10/100/1000"}, {122, "Alteon SSL 410 10/100/1000 Fiber"}, {123, "Alteon Application Switch 2424-SSL"}, {124, "Nortel Ethernet Switch 325-24T"}, {125, "Nortel Ethernet Switch 325-24G"}, {126, "Nortel Networks Wireless LAN Access Point 2225"}, {127, "Nortel Networks Wireless LAN SecuritySwitch 2270"}, {128, "Nortel 24-port Ethernet Switch 470-24T-PWR"}, {129, "Nortel 48-port Ethernet Switch 470-48T-PWR"}, {130, "Nortel Ethernet Routing Switch 5530-24TFD"}, {131, "Nortel Ethernet Switch 3510-24T"}, {132, "Nortel Metro Ethernet Service Unit 12G AC L3 switch"}, {133, "Nortel Metro Ethernet Service Unit 12G DC L3 switch"}, {134, "Nortel Secure Access Switch"}, {135, "Networks VPN Gateway 3070"}, {136, "OPTera Metro 3500"}, {137, "SMB BES 1010 24T"}, {138, "SMB BES 1010 48T"}, {139, "SMB BES 1020 24T PWR"}, {140, "SMB BES 1020 48T PWR"}, {141, "SMB BES 2010 24T"}, {142, "SMB BES 2010 48T"}, {143, "SMB BES 2020 24T PWR"}, {144, "SMB BES 2020 48T PWR"}, {145, "SMB BES 110 24T"}, {146, "SMB BES 110 48T"}, {147, "SMB BES 120 24T PWR"}, {148, "SMB BES 120 48T PWR"}, {149, "SMB BES 210 24T"}, {150, "SMB BES 210 48T"}, {151, "SMB BES 220 24T PWR"}, {152, "SMB BES 220 48T PWR"}, {153, "OME 6500"}, {0, "unknown (via SONMP)"}, }; int sonmp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_NORTEL; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; u_int8_t *packet, *pos, *pos_pid, *end; int length; struct in_addr address; log_debug("sonmp", "send SONMP PDU to %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* SONMP multicast address as target */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC addresss */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* SONMP frame is of fixed size */ POKE_UINT16(SONMP_SIZE))) goto toobig; /* LLC header */ if (!( /* DSAP and SSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_SAVE(pos_pid) && /* We will modify PID later to create a new frame */ POKE_UINT16(LLC_PID_SONMP_HELLO))) goto toobig; address.s_addr = htonl(INADDR_ANY); TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { if (mgmt->m_family == LLDPD_AF_IPV4) { address.s_addr = mgmt->m_addr.inet.s_addr; } break; } /* SONMP */ if (!( /* Our IP address */ POKE_BYTES(&address, sizeof(struct in_addr)) && /* Segment on three bytes, we don't have slots, so we skip the first two bytes */ POKE_UINT16(0) && POKE_UINT8(hardware->h_ifindex) && POKE_UINT8(1) && /* Chassis: Other */ POKE_UINT8(12) && /* Back: Ethernet, Fast Ethernet and Gigabit */ POKE_UINT8(SONMP_TOPOLOGY_NEW) && /* Should work. We have no state */ POKE_UINT8(1) && /* Links: Dunno what it is */ POKE_SAVE(end))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } POKE_RESTORE(pos_pid); /* Modify LLC PID */ (void)POKE_UINT16(LLC_PID_SONMP_FLATNET); POKE_RESTORE(packet); /* Go to the beginning */ PEEK_DISCARD(ETHER_ADDR_LEN - 1); /* Modify the last byte of the MAC address */ (void)POKE_UINT8(1); if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send second SONMP packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); hardware->h_tx_cnt++; return 0; toobig: free(packet); return -1; } int sonmp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; int length, i; u_int8_t *pos; u_int8_t seg[3], rchassis; struct in_addr address; log_debug("sonmp", "decode SONMP PDU from %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("sonmp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("sonmp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < SONMP_SIZE) { log_warnx("sonmp", "too short SONMP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(mcastaddr, sizeof(mcastaddr)) != 0) /* There is two multicast address. We just handle only one of * them. */ goto malformed; /* We skip to LLC PID */ PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); if (PEEK_UINT16 != LLC_PID_SONMP_HELLO) { log_debug("sonmp", "incorrect LLC protocol ID received for SONMP on %s", hardware->h_ifname); goto malformed; } chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_ADDR; if ((chassis->c_id = calloc(1, sizeof(struct in_addr) + 1)) == NULL) { log_warn("sonmp", "unable to allocate memory for chassis id on %s", hardware->h_ifname); goto malformed; } chassis->c_id_len = sizeof(struct in_addr) + 1; chassis->c_id[0] = 1; PEEK_BYTES(&address, sizeof(struct in_addr)); memcpy(chassis->c_id + 1, &address, sizeof(struct in_addr)); if (asprintf(&chassis->c_name, "%s", inet_ntoa(address)) == -1) { log_warnx("sonmp", "unable to write chassis name for %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(seg, sizeof(seg)); rchassis = PEEK_UINT8; for (i=0; sonmp_chassis_types[i].type != 0; i++) { if (sonmp_chassis_types[i].type == rchassis) break; } if (asprintf(&chassis->c_descr, "%s", sonmp_chassis_types[i].description) == -1) { log_warnx("sonmp", "unable to write chassis description for %s", hardware->h_ifname); goto malformed; } mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { assert(errno == ENOMEM); log_warn("sonmp", "unable to allocate memory for management address"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); chassis->c_ttl = cfg?(cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold): LLDPD_TTL; port->p_id_subtype = LLDP_PORTID_SUBTYPE_LOCAL; if (asprintf(&port->p_id, "%02x-%02x-%02x", seg[0], seg[1], seg[2]) == -1) { log_warn("sonmp", "unable to allocate memory for port id on %s", hardware->h_ifname); goto malformed; } port->p_id_len = strlen(port->p_id); /* Port description depend on the number of segments */ if ((seg[0] == 0) && (seg[1] == 0)) { if (asprintf(&port->p_descr, "port %d", seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else if (seg[0] == 0) { if (asprintf(&port->p_descr, "port %d/%d", seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else { if (asprintf(&port->p_descr, "port %x:%x:%x", seg[0], seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_SONMP */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1771_4
crossvul-cpp_data_bad_313_0
/* * FLV muxer * Copyright (c) 2003 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "libavutil/dict.h" #include "libavutil/intfloat.h" #include "libavutil/avassert.h" #include "libavutil/mathematics.h" #include "avio_internal.h" #include "avio.h" #include "avc.h" #include "avformat.h" #include "flv.h" #include "internal.h" #include "metadata.h" #include "libavutil/opt.h" #include "libavcodec/put_bits.h" #include "libavcodec/aacenctab.h" static const AVCodecTag flv_video_codec_ids[] = { { AV_CODEC_ID_FLV1, FLV_CODECID_H263 }, { AV_CODEC_ID_H263, FLV_CODECID_REALH263 }, { AV_CODEC_ID_MPEG4, FLV_CODECID_MPEG4 }, { AV_CODEC_ID_FLASHSV, FLV_CODECID_SCREEN }, { AV_CODEC_ID_FLASHSV2, FLV_CODECID_SCREEN2 }, { AV_CODEC_ID_VP6F, FLV_CODECID_VP6 }, { AV_CODEC_ID_VP6, FLV_CODECID_VP6 }, { AV_CODEC_ID_VP6A, FLV_CODECID_VP6A }, { AV_CODEC_ID_H264, FLV_CODECID_H264 }, { AV_CODEC_ID_NONE, 0 } }; static const AVCodecTag flv_audio_codec_ids[] = { { AV_CODEC_ID_MP3, FLV_CODECID_MP3 >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_U8, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_S16BE, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_S16LE, FLV_CODECID_PCM_LE >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_ADPCM_SWF, FLV_CODECID_ADPCM >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_AAC, FLV_CODECID_AAC >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_NELLYMOSER, FLV_CODECID_NELLYMOSER >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_MULAW, FLV_CODECID_PCM_MULAW >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_ALAW, FLV_CODECID_PCM_ALAW >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_SPEEX, FLV_CODECID_SPEEX >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_NONE, 0 } }; typedef enum { FLV_AAC_SEQ_HEADER_DETECT = (1 << 0), FLV_NO_SEQUENCE_END = (1 << 1), FLV_ADD_KEYFRAME_INDEX = (1 << 2), FLV_NO_METADATA = (1 << 3), FLV_NO_DURATION_FILESIZE = (1 << 4), } FLVFlags; typedef struct FLVFileposition { int64_t keyframe_position; double keyframe_timestamp; struct FLVFileposition *next; } FLVFileposition; typedef struct FLVContext { AVClass *av_class; int reserved; int64_t duration_offset; int64_t filesize_offset; int64_t duration; int64_t delay; ///< first dts delay (needed for AVC & Speex) int64_t datastart_offset; int64_t datasize_offset; int64_t datasize; int64_t videosize_offset; int64_t videosize; int64_t audiosize_offset; int64_t audiosize; int64_t metadata_size_pos; int64_t metadata_totalsize_pos; int64_t metadata_totalsize; int64_t keyframe_index_size; int64_t lasttimestamp_offset; double lasttimestamp; int64_t lastkeyframetimestamp_offset; double lastkeyframetimestamp; int64_t lastkeyframelocation_offset; int64_t lastkeyframelocation; int acurframeindex; int64_t keyframes_info_offset; int64_t filepositions_count; FLVFileposition *filepositions; FLVFileposition *head_filepositions; AVCodecParameters *audio_par; AVCodecParameters *video_par; double framerate; AVCodecParameters *data_par; int flags; } FLVContext; typedef struct FLVStreamContext { int64_t last_ts; ///< last timestamp for each stream } FLVStreamContext; static int get_audio_flags(AVFormatContext *s, AVCodecParameters *par) { int flags = (par->bits_per_coded_sample == 16) ? FLV_SAMPLESSIZE_16BIT : FLV_SAMPLESSIZE_8BIT; if (par->codec_id == AV_CODEC_ID_AAC) // specs force these parameters return FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ | FLV_SAMPLESSIZE_16BIT | FLV_STEREO; else if (par->codec_id == AV_CODEC_ID_SPEEX) { if (par->sample_rate != 16000) { av_log(s, AV_LOG_ERROR, "FLV only supports wideband (16kHz) Speex audio\n"); return AVERROR(EINVAL); } if (par->channels != 1) { av_log(s, AV_LOG_ERROR, "FLV only supports mono Speex audio\n"); return AVERROR(EINVAL); } return FLV_CODECID_SPEEX | FLV_SAMPLERATE_11025HZ | FLV_SAMPLESSIZE_16BIT; } else { switch (par->sample_rate) { case 48000: // 48khz mp3 is stored with 44k1 samplerate identifer if (par->codec_id == AV_CODEC_ID_MP3) { flags |= FLV_SAMPLERATE_44100HZ; break; } else { goto error; } case 44100: flags |= FLV_SAMPLERATE_44100HZ; break; case 22050: flags |= FLV_SAMPLERATE_22050HZ; break; case 11025: flags |= FLV_SAMPLERATE_11025HZ; break; case 16000: // nellymoser only case 8000: // nellymoser only case 5512: // not MP3 if (par->codec_id != AV_CODEC_ID_MP3) { flags |= FLV_SAMPLERATE_SPECIAL; break; } default: error: av_log(s, AV_LOG_ERROR, "FLV does not support sample rate %d, " "choose from (44100, 22050, 11025)\n", par->sample_rate); return AVERROR(EINVAL); } } if (par->channels > 1) flags |= FLV_STEREO; switch (par->codec_id) { case AV_CODEC_ID_MP3: flags |= FLV_CODECID_MP3 | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_U8: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_8BIT; break; case AV_CODEC_ID_PCM_S16BE: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_S16LE: flags |= FLV_CODECID_PCM_LE | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_ADPCM_SWF: flags |= FLV_CODECID_ADPCM | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_NELLYMOSER: if (par->sample_rate == 8000) flags |= FLV_CODECID_NELLYMOSER_8KHZ_MONO | FLV_SAMPLESSIZE_16BIT; else if (par->sample_rate == 16000) flags |= FLV_CODECID_NELLYMOSER_16KHZ_MONO | FLV_SAMPLESSIZE_16BIT; else flags |= FLV_CODECID_NELLYMOSER | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_MULAW: flags = FLV_CODECID_PCM_MULAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_ALAW: flags = FLV_CODECID_PCM_ALAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT; break; case 0: flags |= par->codec_tag << 4; break; default: av_log(s, AV_LOG_ERROR, "Audio codec '%s' not compatible with FLV\n", avcodec_get_name(par->codec_id)); return AVERROR(EINVAL); } return flags; } static void put_amf_string(AVIOContext *pb, const char *str) { size_t len = strlen(str); avio_wb16(pb, len); avio_write(pb, str, len); } // FLV timestamps are 32 bits signed, RTMP timestamps should be 32-bit unsigned static void put_timestamp(AVIOContext *pb, int64_t ts) { avio_wb24(pb, ts & 0xFFFFFF); avio_w8(pb, (ts >> 24) & 0x7F); } static void put_avc_eos_tag(AVIOContext *pb, unsigned ts) { avio_w8(pb, FLV_TAG_TYPE_VIDEO); avio_wb24(pb, 5); /* Tag Data Size */ put_timestamp(pb, ts); avio_wb24(pb, 0); /* StreamId = 0 */ avio_w8(pb, 23); /* ub[4] FrameType = 1, ub[4] CodecId = 7 */ avio_w8(pb, 2); /* AVC end of sequence */ avio_wb24(pb, 0); /* Always 0 for AVC EOS. */ avio_wb32(pb, 16); /* Size of FLV tag */ } static void put_amf_double(AVIOContext *pb, double d) { avio_w8(pb, AMF_DATA_TYPE_NUMBER); avio_wb64(pb, av_double2int(d)); } static void put_amf_byte(AVIOContext *pb, unsigned char abyte) { avio_w8(pb, abyte); } static void put_amf_dword_array(AVIOContext *pb, uint32_t dw) { avio_w8(pb, AMF_DATA_TYPE_ARRAY); avio_wb32(pb, dw); } static void put_amf_bool(AVIOContext *pb, int b) { avio_w8(pb, AMF_DATA_TYPE_BOOL); avio_w8(pb, !!b); } static void write_metadata(AVFormatContext *s, unsigned int ts) { AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; int write_duration_filesize = !(flv->flags & FLV_NO_DURATION_FILESIZE); int metadata_count = 0; int64_t metadata_count_pos; AVDictionaryEntry *tag = NULL; /* write meta_tag */ avio_w8(pb, FLV_TAG_TYPE_META); // tag type META flv->metadata_size_pos = avio_tell(pb); avio_wb24(pb, 0); // size of data part (sum of all parts below) avio_wb24(pb, ts); // timestamp avio_wb32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onMetaData"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY); metadata_count_pos = avio_tell(pb); metadata_count = 4 * !!flv->video_par + 5 * !!flv->audio_par + 1 * !!flv->data_par; if (write_duration_filesize) { metadata_count += 2; // +2 for duration and file size } avio_wb32(pb, metadata_count); if (write_duration_filesize) { put_amf_string(pb, "duration"); flv->duration_offset = avio_tell(pb); // fill in the guessed duration, it'll be corrected later if incorrect put_amf_double(pb, s->duration / AV_TIME_BASE); } if (flv->video_par) { put_amf_string(pb, "width"); put_amf_double(pb, flv->video_par->width); put_amf_string(pb, "height"); put_amf_double(pb, flv->video_par->height); put_amf_string(pb, "videodatarate"); put_amf_double(pb, flv->video_par->bit_rate / 1024.0); if (flv->framerate != 0.0) { put_amf_string(pb, "framerate"); put_amf_double(pb, flv->framerate); metadata_count++; } put_amf_string(pb, "videocodecid"); put_amf_double(pb, flv->video_par->codec_tag); } if (flv->audio_par) { put_amf_string(pb, "audiodatarate"); put_amf_double(pb, flv->audio_par->bit_rate / 1024.0); put_amf_string(pb, "audiosamplerate"); put_amf_double(pb, flv->audio_par->sample_rate); put_amf_string(pb, "audiosamplesize"); put_amf_double(pb, flv->audio_par->codec_id == AV_CODEC_ID_PCM_U8 ? 8 : 16); put_amf_string(pb, "stereo"); put_amf_bool(pb, flv->audio_par->channels == 2); put_amf_string(pb, "audiocodecid"); put_amf_double(pb, flv->audio_par->codec_tag); } if (flv->data_par) { put_amf_string(pb, "datastream"); put_amf_double(pb, 0.0); } ff_standardize_creation_time(s); while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) { if( !strcmp(tag->key, "width") ||!strcmp(tag->key, "height") ||!strcmp(tag->key, "videodatarate") ||!strcmp(tag->key, "framerate") ||!strcmp(tag->key, "videocodecid") ||!strcmp(tag->key, "audiodatarate") ||!strcmp(tag->key, "audiosamplerate") ||!strcmp(tag->key, "audiosamplesize") ||!strcmp(tag->key, "stereo") ||!strcmp(tag->key, "audiocodecid") ||!strcmp(tag->key, "duration") ||!strcmp(tag->key, "onMetaData") ||!strcmp(tag->key, "datasize") ||!strcmp(tag->key, "lasttimestamp") ||!strcmp(tag->key, "totalframes") ||!strcmp(tag->key, "hasAudio") ||!strcmp(tag->key, "hasVideo") ||!strcmp(tag->key, "hasCuePoints") ||!strcmp(tag->key, "hasMetadata") ||!strcmp(tag->key, "hasKeyframes") ){ av_log(s, AV_LOG_DEBUG, "Ignoring metadata for %s\n", tag->key); continue; } put_amf_string(pb, tag->key); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, tag->value); metadata_count++; } if (write_duration_filesize) { put_amf_string(pb, "filesize"); flv->filesize_offset = avio_tell(pb); put_amf_double(pb, 0); // delayed write } if (flv->flags & FLV_ADD_KEYFRAME_INDEX) { flv->acurframeindex = 0; flv->keyframe_index_size = 0; put_amf_string(pb, "hasVideo"); put_amf_bool(pb, !!flv->video_par); metadata_count++; put_amf_string(pb, "hasKeyframes"); put_amf_bool(pb, 1); metadata_count++; put_amf_string(pb, "hasAudio"); put_amf_bool(pb, !!flv->audio_par); metadata_count++; put_amf_string(pb, "hasMetadata"); put_amf_bool(pb, 1); metadata_count++; put_amf_string(pb, "canSeekToEnd"); put_amf_bool(pb, 1); metadata_count++; put_amf_string(pb, "datasize"); flv->datasize_offset = avio_tell(pb); flv->datasize = 0; put_amf_double(pb, flv->datasize); metadata_count++; put_amf_string(pb, "videosize"); flv->videosize_offset = avio_tell(pb); flv->videosize = 0; put_amf_double(pb, flv->videosize); metadata_count++; put_amf_string(pb, "audiosize"); flv->audiosize_offset = avio_tell(pb); flv->audiosize = 0; put_amf_double(pb, flv->audiosize); metadata_count++; put_amf_string(pb, "lasttimestamp"); flv->lasttimestamp_offset = avio_tell(pb); flv->lasttimestamp = 0; put_amf_double(pb, 0); metadata_count++; put_amf_string(pb, "lastkeyframetimestamp"); flv->lastkeyframetimestamp_offset = avio_tell(pb); flv->lastkeyframetimestamp = 0; put_amf_double(pb, 0); metadata_count++; put_amf_string(pb, "lastkeyframelocation"); flv->lastkeyframelocation_offset = avio_tell(pb); flv->lastkeyframelocation = 0; put_amf_double(pb, 0); metadata_count++; put_amf_string(pb, "keyframes"); put_amf_byte(pb, AMF_DATA_TYPE_OBJECT); metadata_count++; flv->keyframes_info_offset = avio_tell(pb); } put_amf_string(pb, ""); avio_w8(pb, AMF_END_OF_OBJECT); /* write total size of tag */ flv->metadata_totalsize = avio_tell(pb) - flv->metadata_size_pos - 10; avio_seek(pb, metadata_count_pos, SEEK_SET); avio_wb32(pb, metadata_count); avio_seek(pb, flv->metadata_size_pos, SEEK_SET); avio_wb24(pb, flv->metadata_totalsize); avio_skip(pb, flv->metadata_totalsize + 10 - 3); flv->metadata_totalsize_pos = avio_tell(pb); avio_wb32(pb, flv->metadata_totalsize + 11); } static int unsupported_codec(AVFormatContext *s, const char* type, int codec_id) { const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id); av_log(s, AV_LOG_ERROR, "%s codec %s not compatible with flv\n", type, desc ? desc->name : "unknown"); return AVERROR(ENOSYS); } static void flv_write_codec_header(AVFormatContext* s, AVCodecParameters* par, int64_t ts) { int64_t data_size; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; if (par->codec_id == AV_CODEC_ID_AAC || par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { int64_t pos; avio_w8(pb, par->codec_type == AVMEDIA_TYPE_VIDEO ? FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO); avio_wb24(pb, 0); // size patched later put_timestamp(pb, ts); avio_wb24(pb, 0); // streamid pos = avio_tell(pb); if (par->codec_id == AV_CODEC_ID_AAC) { avio_w8(pb, get_audio_flags(s, par)); avio_w8(pb, 0); // AAC sequence header if (!par->extradata_size && (flv->flags & FLV_AAC_SEQ_HEADER_DETECT)) { PutBitContext pbc; int samplerate_index; int channels = flv->audio_par->channels - (flv->audio_par->channels == 8 ? 1 : 0); uint8_t data[2]; for (samplerate_index = 0; samplerate_index < 16; samplerate_index++) if (flv->audio_par->sample_rate == mpeg4audio_sample_rates[samplerate_index]) break; init_put_bits(&pbc, data, sizeof(data)); put_bits(&pbc, 5, flv->audio_par->profile + 1); //profile put_bits(&pbc, 4, samplerate_index); //sample rate index put_bits(&pbc, 4, channels); put_bits(&pbc, 1, 0); //frame length - 1024 samples put_bits(&pbc, 1, 0); //does not depend on core coder put_bits(&pbc, 1, 0); //is not extension flush_put_bits(&pbc); avio_w8(pb, data[0]); avio_w8(pb, data[1]); av_log(s, AV_LOG_WARNING, "AAC sequence header: %02x %02x.\n", data[0], data[1]); } avio_write(pb, par->extradata, par->extradata_size); } else { avio_w8(pb, par->codec_tag | FLV_FRAME_KEY); // flags avio_w8(pb, 0); // AVC sequence header avio_wb24(pb, 0); // composition time ff_isom_write_avcc(pb, par->extradata, par->extradata_size); } data_size = avio_tell(pb) - pos; avio_seek(pb, -data_size - 10, SEEK_CUR); avio_wb24(pb, data_size); avio_skip(pb, data_size + 10 - 3); avio_wb32(pb, data_size + 11); // previous tag size } } static int flv_append_keyframe_info(AVFormatContext *s, FLVContext *flv, double ts, int64_t pos) { FLVFileposition *position = av_malloc(sizeof(FLVFileposition)); if (!position) { av_log(s, AV_LOG_WARNING, "no mem for add keyframe index!\n"); return AVERROR(ENOMEM); } position->keyframe_timestamp = ts; position->keyframe_position = pos; if (!flv->filepositions_count) { flv->filepositions = position; flv->head_filepositions = flv->filepositions; position->next = NULL; } else { flv->filepositions->next = position; position->next = NULL; flv->filepositions = flv->filepositions->next; } flv->filepositions_count++; return 0; } static int shift_data(AVFormatContext *s) { int ret = 0; int n = 0; int64_t metadata_size = 0; FLVContext *flv = s->priv_data; int64_t pos, pos_end = avio_tell(s->pb); uint8_t *buf, *read_buf[2]; int read_buf_id = 0; int read_size[2]; AVIOContext *read_pb; metadata_size = flv->filepositions_count * 9 * 2 + 10; /* filepositions and times value */ metadata_size += 2 + 13; /* filepositions String */ metadata_size += 2 + 5; /* times String */ metadata_size += 3; /* Object end */ flv->keyframe_index_size = metadata_size; if (metadata_size < 0) return metadata_size; buf = av_malloc_array(metadata_size, 2); if (!buf) { return AVERROR(ENOMEM); } read_buf[0] = buf; read_buf[1] = buf + metadata_size; avio_seek(s->pb, flv->metadata_size_pos, SEEK_SET); avio_wb24(s->pb, flv->metadata_totalsize + metadata_size); avio_seek(s->pb, flv->metadata_totalsize_pos, SEEK_SET); avio_wb32(s->pb, flv->metadata_totalsize + 11 + metadata_size); avio_seek(s->pb, pos_end, SEEK_SET); /* Shift the data: the AVIO context of the output can only be used for * writing, so we re-open the same output, but for reading. It also avoids * a read/seek/write/seek back and forth. */ avio_flush(s->pb); ret = s->io_open(s, &read_pb, s->url, AVIO_FLAG_READ, NULL); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Unable to re-open %s output file for " "the second pass (add_keyframe_index)\n", s->url); goto end; } /* mark the end of the shift to up to the last data we wrote, and get ready * for writing */ pos_end = avio_tell(s->pb); avio_seek(s->pb, flv->keyframes_info_offset + metadata_size, SEEK_SET); /* start reading at where the keyframe index information will be placed */ avio_seek(read_pb, flv->keyframes_info_offset, SEEK_SET); pos = avio_tell(read_pb); #define READ_BLOCK do { \ read_size[read_buf_id] = avio_read(read_pb, read_buf[read_buf_id], metadata_size); \ read_buf_id ^= 1; \ } while (0) /* shift data by chunk of at most keyframe *filepositions* and *times* size */ READ_BLOCK; do { READ_BLOCK; n = read_size[read_buf_id]; if (n < 0) break; avio_write(s->pb, read_buf[read_buf_id], n); pos += n; } while (pos <= pos_end); ff_format_io_close(s, &read_pb); end: av_free(buf); return ret; } static int flv_write_header(AVFormatContext *s) { int i; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; for (i = 0; i < s->nb_streams; i++) { AVCodecParameters *par = s->streams[i]->codecpar; FLVStreamContext *sc; switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: if (s->streams[i]->avg_frame_rate.den && s->streams[i]->avg_frame_rate.num) { flv->framerate = av_q2d(s->streams[i]->avg_frame_rate); } if (flv->video_par) { av_log(s, AV_LOG_ERROR, "at most one video stream is supported in flv\n"); return AVERROR(EINVAL); } flv->video_par = par; if (!ff_codec_get_tag(flv_video_codec_ids, par->codec_id)) return unsupported_codec(s, "Video", par->codec_id); if (par->codec_id == AV_CODEC_ID_MPEG4 || par->codec_id == AV_CODEC_ID_H263) { int error = s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL; av_log(s, error ? AV_LOG_ERROR : AV_LOG_WARNING, "Codec %s is not supported in the official FLV specification,\n", avcodec_get_name(par->codec_id)); if (error) { av_log(s, AV_LOG_ERROR, "use vstrict=-1 / -strict -1 to use it anyway.\n"); return AVERROR(EINVAL); } } else if (par->codec_id == AV_CODEC_ID_VP6) { av_log(s, AV_LOG_WARNING, "Muxing VP6 in flv will produce flipped video on playback.\n"); } break; case AVMEDIA_TYPE_AUDIO: if (flv->audio_par) { av_log(s, AV_LOG_ERROR, "at most one audio stream is supported in flv\n"); return AVERROR(EINVAL); } flv->audio_par = par; if (get_audio_flags(s, par) < 0) return unsupported_codec(s, "Audio", par->codec_id); if (par->codec_id == AV_CODEC_ID_PCM_S16BE) av_log(s, AV_LOG_WARNING, "16-bit big-endian audio in flv is valid but most likely unplayable (hardware dependent); use s16le\n"); break; case AVMEDIA_TYPE_DATA: if (par->codec_id != AV_CODEC_ID_TEXT && par->codec_id != AV_CODEC_ID_NONE) return unsupported_codec(s, "Data", par->codec_id); flv->data_par = par; break; case AVMEDIA_TYPE_SUBTITLE: if (par->codec_id != AV_CODEC_ID_TEXT) { av_log(s, AV_LOG_ERROR, "Subtitle codec '%s' for stream %d is not compatible with FLV\n", avcodec_get_name(par->codec_id), i); return AVERROR_INVALIDDATA; } flv->data_par = par; break; default: av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n", av_get_media_type_string(par->codec_type), i); return AVERROR(EINVAL); } avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */ sc = av_mallocz(sizeof(FLVStreamContext)); if (!sc) return AVERROR(ENOMEM); s->streams[i]->priv_data = sc; sc->last_ts = -1; } flv->delay = AV_NOPTS_VALUE; avio_write(pb, "FLV", 3); avio_w8(pb, 1); avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!flv->audio_par + FLV_HEADER_FLAG_HASVIDEO * !!flv->video_par); avio_wb32(pb, 9); avio_wb32(pb, 0); for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->codecpar->codec_tag == 5) { avio_w8(pb, 8); // message type avio_wb24(pb, 0); // include flags avio_wb24(pb, 0); // time stamp avio_wb32(pb, 0); // reserved avio_wb32(pb, 11); // size flv->reserved = 5; } if (flv->flags & FLV_NO_METADATA) { pb->seekable = 0; } else { write_metadata(s, 0); } for (i = 0; i < s->nb_streams; i++) { flv_write_codec_header(s, s->streams[i]->codecpar, 0); } flv->datastart_offset = avio_tell(pb); return 0; } static int flv_write_trailer(AVFormatContext *s) { int64_t file_size; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; int build_keyframes_idx = flv->flags & FLV_ADD_KEYFRAME_INDEX; int i, res; int64_t cur_pos = avio_tell(s->pb); if (build_keyframes_idx) { FLVFileposition *newflv_posinfo, *p; avio_seek(pb, flv->videosize_offset, SEEK_SET); put_amf_double(pb, flv->videosize); avio_seek(pb, flv->audiosize_offset, SEEK_SET); put_amf_double(pb, flv->audiosize); avio_seek(pb, flv->lasttimestamp_offset, SEEK_SET); put_amf_double(pb, flv->lasttimestamp); avio_seek(pb, flv->lastkeyframetimestamp_offset, SEEK_SET); put_amf_double(pb, flv->lastkeyframetimestamp); avio_seek(pb, flv->lastkeyframelocation_offset, SEEK_SET); put_amf_double(pb, flv->lastkeyframelocation + flv->keyframe_index_size); avio_seek(pb, cur_pos, SEEK_SET); res = shift_data(s); if (res < 0) { goto end; } avio_seek(pb, flv->keyframes_info_offset, SEEK_SET); put_amf_string(pb, "filepositions"); put_amf_dword_array(pb, flv->filepositions_count); for (newflv_posinfo = flv->head_filepositions; newflv_posinfo; newflv_posinfo = newflv_posinfo->next) { put_amf_double(pb, newflv_posinfo->keyframe_position + flv->keyframe_index_size); } put_amf_string(pb, "times"); put_amf_dword_array(pb, flv->filepositions_count); for (newflv_posinfo = flv->head_filepositions; newflv_posinfo; newflv_posinfo = newflv_posinfo->next) { put_amf_double(pb, newflv_posinfo->keyframe_timestamp); } newflv_posinfo = flv->head_filepositions; while (newflv_posinfo) { p = newflv_posinfo->next; if (p) { newflv_posinfo->next = p->next; av_free(p); p = NULL; } else { av_free(newflv_posinfo); newflv_posinfo = NULL; } } put_amf_string(pb, ""); avio_w8(pb, AMF_END_OF_OBJECT); avio_seek(pb, cur_pos + flv->keyframe_index_size, SEEK_SET); } end: if (flv->flags & FLV_NO_SEQUENCE_END) { av_log(s, AV_LOG_DEBUG, "FLV no sequence end mode open\n"); } else { /* Add EOS tag */ for (i = 0; i < s->nb_streams; i++) { AVCodecParameters *par = s->streams[i]->codecpar; FLVStreamContext *sc = s->streams[i]->priv_data; if (par->codec_type == AVMEDIA_TYPE_VIDEO && (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4)) put_avc_eos_tag(pb, sc->last_ts); } } file_size = avio_tell(pb); if (build_keyframes_idx) { flv->datasize = file_size - flv->datastart_offset; avio_seek(pb, flv->datasize_offset, SEEK_SET); put_amf_double(pb, flv->datasize); } if (!(flv->flags & FLV_NO_METADATA)) { if (!(flv->flags & FLV_NO_DURATION_FILESIZE)) { /* update information */ if (avio_seek(pb, flv->duration_offset, SEEK_SET) < 0) { av_log(s, AV_LOG_WARNING, "Failed to update header with correct duration.\n"); } else { put_amf_double(pb, flv->duration / (double)1000); } if (avio_seek(pb, flv->filesize_offset, SEEK_SET) < 0) { av_log(s, AV_LOG_WARNING, "Failed to update header with correct filesize.\n"); } else { put_amf_double(pb, file_size); } } } return 0; } static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; AVCodecParameters *par = s->streams[pkt->stream_index]->codecpar; FLVContext *flv = s->priv_data; FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data; unsigned ts; int size = pkt->size; uint8_t *data = NULL; int flags = -1, flags_size, ret; int64_t cur_offset = avio_tell(pb); if (par->codec_id == AV_CODEC_ID_VP6F || par->codec_id == AV_CODEC_ID_VP6A || par->codec_id == AV_CODEC_ID_VP6 || par->codec_id == AV_CODEC_ID_AAC) flags_size = 2; else if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) flags_size = 5; else flags_size = 1; if (par->codec_id == AV_CODEC_ID_AAC || par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { int side_size = 0; uint8_t *side = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); if (side && side_size > 0 && (side_size != par->extradata_size || memcmp(side, par->extradata, side_size))) { av_free(par->extradata); par->extradata = av_mallocz(side_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!par->extradata) { par->extradata_size = 0; return AVERROR(ENOMEM); } memcpy(par->extradata, side, side_size); par->extradata_size = side_size; flv_write_codec_header(s, par, pkt->dts); } } if (flv->delay == AV_NOPTS_VALUE) flv->delay = -pkt->dts; if (pkt->dts < -flv->delay) { av_log(s, AV_LOG_WARNING, "Packets are not in the proper order with respect to DTS\n"); return AVERROR(EINVAL); } ts = pkt->dts; if (s->event_flags & AVSTREAM_EVENT_FLAG_METADATA_UPDATED) { write_metadata(s, ts); s->event_flags &= ~AVSTREAM_EVENT_FLAG_METADATA_UPDATED; } avio_write_marker(pb, av_rescale(ts, AV_TIME_BASE, 1000), pkt->flags & AV_PKT_FLAG_KEY && (flv->video_par ? par->codec_type == AVMEDIA_TYPE_VIDEO : 1) ? AVIO_DATA_MARKER_SYNC_POINT : AVIO_DATA_MARKER_BOUNDARY_POINT); switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: avio_w8(pb, FLV_TAG_TYPE_VIDEO); flags = ff_codec_get_tag(flv_video_codec_ids, par->codec_id); flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; break; case AVMEDIA_TYPE_AUDIO: flags = get_audio_flags(s, par); av_assert0(size); avio_w8(pb, FLV_TAG_TYPE_AUDIO); break; case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_DATA: avio_w8(pb, FLV_TAG_TYPE_META); break; default: return AVERROR(EINVAL); } if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { /* check if extradata looks like mp4 formatted */ if (par->extradata_size > 0 && *(uint8_t*)par->extradata != 1) if ((ret = ff_avc_parse_nal_units_buf(pkt->data, &data, &size)) < 0) return ret; } else if (par->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 && (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) { if (!s->streams[pkt->stream_index]->nb_frames) { av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: " "use the audio bitstream filter 'aac_adtstoasc' to fix it " "('-bsf:a aac_adtstoasc' option with ffmpeg)\n"); return AVERROR_INVALIDDATA; } av_log(s, AV_LOG_WARNING, "aac bitstream error\n"); } /* check Speex packet duration */ if (par->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160) av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than " "8 frames per packet. Adobe Flash " "Player cannot handle this!\n"); if (sc->last_ts < ts) sc->last_ts = ts; if (size + flags_size >= 1<<24) { av_log(s, AV_LOG_ERROR, "Too large packet with size %u >= %u\n", size + flags_size, 1<<24); return AVERROR(EINVAL); } avio_wb24(pb, size + flags_size); put_timestamp(pb, ts); avio_wb24(pb, flv->reserved); if (par->codec_type == AVMEDIA_TYPE_DATA || par->codec_type == AVMEDIA_TYPE_SUBTITLE ) { int data_size; int64_t metadata_size_pos = avio_tell(pb); if (par->codec_id == AV_CODEC_ID_TEXT) { // legacy FFmpeg magic? avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onTextData"); avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY); avio_wb32(pb, 2); put_amf_string(pb, "type"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "Text"); put_amf_string(pb, "text"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, pkt->data); put_amf_string(pb, ""); avio_w8(pb, AMF_END_OF_OBJECT); } else { // just pass the metadata through avio_write(pb, data ? data : pkt->data, size); } /* write total size of tag */ data_size = avio_tell(pb) - metadata_size_pos; avio_seek(pb, metadata_size_pos - 10, SEEK_SET); avio_wb24(pb, data_size); avio_seek(pb, data_size + 10 - 3, SEEK_CUR); avio_wb32(pb, data_size + 11); } else { av_assert1(flags>=0); avio_w8(pb,flags); if (par->codec_id == AV_CODEC_ID_VP6) avio_w8(pb,0); if (par->codec_id == AV_CODEC_ID_VP6F || par->codec_id == AV_CODEC_ID_VP6A) { if (par->extradata_size) avio_w8(pb, par->extradata[0]); else avio_w8(pb, ((FFALIGN(par->width, 16) - par->width) << 4) | (FFALIGN(par->height, 16) - par->height)); } else if (par->codec_id == AV_CODEC_ID_AAC) avio_w8(pb, 1); // AAC raw else if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { avio_w8(pb, 1); // AVC NALU avio_wb24(pb, pkt->pts - pkt->dts); } avio_write(pb, data ? data : pkt->data, size); avio_wb32(pb, size + flags_size + 11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); } if (flv->flags & FLV_ADD_KEYFRAME_INDEX) { switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: flv->videosize += (avio_tell(pb) - cur_offset); flv->lasttimestamp = flv->acurframeindex / flv->framerate; if (pkt->flags & AV_PKT_FLAG_KEY) { double ts = flv->acurframeindex / flv->framerate; int64_t pos = cur_offset; flv->lastkeyframetimestamp = flv->acurframeindex / flv->framerate; flv->lastkeyframelocation = pos; flv_append_keyframe_info(s, flv, ts, pos); } flv->acurframeindex++; break; case AVMEDIA_TYPE_AUDIO: flv->audiosize += (avio_tell(pb) - cur_offset); break; default: av_log(s, AV_LOG_WARNING, "par->codec_type is type = [%d]\n", par->codec_type); break; } } av_free(data); return pb->error; } static const AVOption options[] = { { "flvflags", "FLV muxer flags", offsetof(FLVContext, flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "aac_seq_header_detect", "Put AAC sequence header based on stream data", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_AAC_SEQ_HEADER_DETECT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "no_sequence_end", "disable sequence end for FLV", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_NO_SEQUENCE_END}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "no_metadata", "disable metadata for FLV", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_NO_METADATA}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "no_duration_filesize", "disable duration and filesize zero value metadata for FLV", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_NO_DURATION_FILESIZE}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "add_keyframe_index", "Add keyframe index metadata", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_ADD_KEYFRAME_INDEX}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { NULL }, }; static const AVClass flv_muxer_class = { .class_name = "flv muxer", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVOutputFormat ff_flv_muxer = { .name = "flv", .long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"), .mime_type = "video/x-flv", .extensions = "flv", .priv_data_size = sizeof(FLVContext), .audio_codec = CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_ADPCM_SWF, .video_codec = AV_CODEC_ID_FLV1, .write_header = flv_write_header, .write_packet = flv_write_packet, .write_trailer = flv_write_trailer, .codec_tag = (const AVCodecTag* const []) { flv_video_codec_ids, flv_audio_codec_ids, 0 }, .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT, .priv_class = &flv_muxer_class, };
./CrossVul/dataset_final_sorted/CWE-617/c/bad_313_0
crossvul-cpp_data_bad_1770_1
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* We also supports FDP which is very similar to CDPv1 */ #include "lldpd.h" #include "frame.h" #if defined (ENABLE_CDP) || defined (ENABLE_FDP) #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <assert.h> static int cdp_send(struct lldpd *global, struct lldpd_hardware *hardware, int version) { const char *platform = "Unknown"; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; struct lldpd_port *port; u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; u_int8_t llcorg[] = LLC_ORG_CISCO; #ifdef ENABLE_FDP char *capstr; #endif u_int16_t checksum; int length, i; u_int32_t cap; u_int8_t *packet; u_int8_t *pos, *pos_len_eh, *pos_llc, *pos_cdp, *pos_checksum, *tlv, *end; log_debug("cdp", "send CDP frame on %s", hardware->h_ifname); port = &(hardware->h_lport); chassis = port->p_chassis; #ifdef ENABLE_FDP if (version == 0) { /* With FDP, change multicast address and LLC PID */ const u_int8_t fdpmcastaddr[] = FDP_MULTICAST_ADDR; const u_int8_t fdpllcorg[] = LLC_ORG_FOUNDRY; memcpy(mcastaddr, fdpmcastaddr, sizeof(mcastaddr)); memcpy(llcorg, fdpllcorg, sizeof(llcorg)); } #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && POKE_UINT8(0xaa) && /* SSAP */ POKE_UINT8(0xaa) && /* DSAP */ POKE_UINT8(0x03) && /* Control field */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_CDP))) goto toobig; /* CDP header */ if (!( POKE_SAVE(pos_cdp) && POKE_UINT8((version == 0)?1:version) && POKE_UINT8(chassis->c_ttl) && POKE_SAVE(pos_checksum) && /* Save checksum position */ POKE_UINT16(0))) goto toobig; /* Chassis ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_CHASSIS) && (chassis->c_name? POKE_BYTES(chassis->c_name, strlen(chassis->c_name)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Adresses */ /* See: * http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#xtocid12 * * It seems that Cisco implies that CDP supports IPv6 using * 802.2 address format with 0xAAAA03 0x000000 0x0800, but * 0x0800 is the Ethernet protocol type for IPv4. Therefore, * we support only IPv4. */ i = 0; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) if (mgmt->m_family == LLDPD_AF_IPV4) i++; if (i > 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_ADDRESSES) && POKE_UINT32(i))) goto toobig; TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { switch (mgmt->m_family) { case LLDPD_AF_IPV4: if (!( POKE_UINT8(1) && /* Type: NLPID */ POKE_UINT8(1) && /* Length: 1 */ POKE_UINT8(CDP_ADDRESS_PROTO_IP) && /* IP */ POKE_UINT16(sizeof(struct in_addr)) && /* Address length */ POKE_BYTES(&mgmt->m_addr, sizeof(struct in_addr)))) goto toobig; break; } } if (!(POKE_END_CDP_TLV)) goto toobig; } /* Port ID */ if (!( POKE_START_CDP_TLV(CDP_TLV_PORT) && (hardware->h_lport.p_descr? POKE_BYTES(hardware->h_lport.p_descr, strlen(hardware->h_lport.p_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Capabilities */ if (version != 0) { cap = 0; if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) cap |= CDP_CAP_ROUTER; if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) cap |= CDP_CAP_SWITCH; cap |= CDP_CAP_HOST; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_UINT32(cap) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_FDP } else { /* With FDP, it seems that a string is used in place of an int */ if (chassis->c_cap_enabled & LLDP_CAP_ROUTER) capstr = "Router"; else if (chassis->c_cap_enabled & LLDP_CAP_BRIDGE) capstr = "Switch"; else if (chassis->c_cap_enabled & LLDP_CAP_REPEATER) capstr = "Bridge"; else capstr = "Host"; if (!( POKE_START_CDP_TLV(CDP_TLV_CAPABILITIES) && POKE_BYTES(capstr, strlen(capstr)) && POKE_END_CDP_TLV)) goto toobig; #endif } /* Native VLAN */ #ifdef ENABLE_DOT1 if (version >=2 && hardware->h_lport.p_pvid != 0) { if (!( POKE_START_CDP_TLV(CDP_TLV_NATIVEVLAN) && POKE_UINT16(hardware->h_lport.p_pvid) && POKE_END_CDP_TLV)) goto toobig; } #endif /* Software version */ if (!( POKE_START_CDP_TLV(CDP_TLV_SOFTWARE) && (chassis->c_descr? POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)): POKE_BYTES("", 0)) && POKE_END_CDP_TLV)) goto toobig; /* Platform */ if (global && global->g_config.c_platform) platform = global->g_config.c_platform; if (!( POKE_START_CDP_TLV(CDP_TLV_PLATFORM) && POKE_BYTES(platform, strlen(platform)) && POKE_END_CDP_TLV)) goto toobig; #ifdef ENABLE_LLDPMED /* Power use */ if ((version >= 2) && port->p_med_cap_enabled && (port->p_med_power.source != LLDP_MED_POW_SOURCE_LOCAL) && (port->p_med_power.val > 0) && (port->p_med_power.val <= 655)) { if (!( POKE_START_CDP_TLV(CDP_TLV_POWER_CONSUMPTION) && POKE_UINT16(port->p_med_power.val * 100) && POKE_END_CDP_TLV)) goto toobig; } #endif (void)POKE_SAVE(end); /* Compute len and checksum */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(end - pos_llc))) goto toobig; checksum = frame_checksum(pos_cdp, end - pos_cdp, (version != 0) ? 1 : 0); POKE_RESTORE(pos_checksum); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("cdp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; free(packet); return 0; toobig: free(packet); return -1; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("cdp", name " CDP/FDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) /* cdp_decode also decodes FDP */ int cdp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; struct in_addr addr; #if 0 u_int16_t cksum; #endif u_int8_t *software = NULL, *platform = NULL; int software_len = 0, platform_len = 0, proto, version, nb, caps; const unsigned char cdpaddr[] = CDP_MULTICAST_ADDR; #ifdef ENABLE_FDP const unsigned char fdpaddr[] = CDP_MULTICAST_ADDR; int fdp = 0; #endif u_int8_t *pos, *tlv, *pos_address, *pos_next_address; int length, len_eth, tlv_type, tlv_len, addresses_len, address_len; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; #endif log_debug("cdp", "decode CDP frame received on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("cdp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("cdp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) { log_warn("cdp", "too short CDP/FDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(cdpaddr, sizeof(cdpaddr)) != 0) { #ifdef ENABLE_FDP PEEK_RESTORE((u_int8_t*)frame); if (PEEK_CMP(fdpaddr, sizeof(fdpaddr)) != 0) fdp = 1; else { #endif log_info("cdp", "frame not targeted at CDP/FDP multicast address received on %s", hardware->h_ifname); goto malformed; #ifdef ENABLE_FDP } #endif } PEEK_DISCARD(ETHER_ADDR_LEN); /* Don't care of source address */ len_eth = PEEK_UINT16; if (len_eth > length) { log_warnx("cdp", "incorrect 802.3 frame size reported on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(6); /* Skip beginning of LLC */ proto = PEEK_UINT16; if (proto != LLC_PID_CDP) { if ((proto != LLC_PID_DRIP) && (proto != LLC_PID_PAGP) && (proto != LLC_PID_PVSTP) && (proto != LLC_PID_UDLD) && (proto != LLC_PID_VTP) && (proto != LLC_PID_DTP) && (proto != LLC_PID_STP)) log_debug("cdp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } #if 0 /* Check checksum */ cksum = frame_checksum(pos, len_eth - 8, #ifdef ENABLE_FDP !fdp /* fdp = 0 -> cisco checksum */ #else 1 /* cisco checksum */ #endif ); if (cksum != 0) { log_info("cdp", "incorrect CDP/FDP checksum for frame received on %s (%d)", hardware->h_ifname, cksum); goto malformed; } #endif /* Check version */ version = PEEK_UINT8; if ((version != 1) && (version != 2)) { log_warnx("cdp", "incorrect CDP/FDP version (%d) for frame received on %s", version, hardware->h_ifname); goto malformed; } chassis->c_ttl = PEEK_UINT8; /* TTL */ PEEK_DISCARD_UINT16; /* Checksum, already checked */ while (length) { if (length < 4) { log_warnx("cdp", "CDP/FDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT16; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (length < tlv_len)) { log_warnx("cdp", "incorrect size in CDP/FDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case CDP_TLV_CHASSIS: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis name"); goto malformed; } PEEK_BYTES(chassis->c_name, tlv_len); chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; if ((chassis->c_id = (char *)malloc(tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis ID"); goto malformed; } memcpy(chassis->c_id, chassis->c_name, tlv_len); chassis->c_id_len = tlv_len; break; case CDP_TLV_ADDRESSES: CHECK_TLV_SIZE(4, "Address"); addresses_len = tlv_len - 4; for (nb = PEEK_UINT32; nb > 0; nb--) { (void)PEEK_SAVE(pos_address); /* We first try to get the real length of the packet */ if (addresses_len < 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; addresses_len--; address_len = PEEK_UINT8; addresses_len--; if (addresses_len < address_len + 2) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); addresses_len -= address_len; address_len = PEEK_UINT16; addresses_len -= 2; if (addresses_len < address_len) { log_warn("cdp", "too short address subframe " "received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(address_len); (void)PEEK_SAVE(pos_next_address); /* Next, we go back and try to extract IPv4 address */ PEEK_RESTORE(pos_address); if ((PEEK_UINT8 == 1) && (PEEK_UINT8 == 1) && (PEEK_UINT8 == CDP_ADDRESS_PROTO_IP) && (PEEK_UINT16 == sizeof(struct in_addr))) { PEEK_BYTES(&addr, sizeof(struct in_addr)); mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &addr, sizeof(struct in_addr), 0); if (mgmt == NULL) { assert(errno == ENOMEM); log_warn("cdp", "unable to allocate memory for management address"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } /* Go to the end of the address */ PEEK_RESTORE(pos_next_address); } break; case CDP_TLV_PORT: if (tlv_len == 0) { log_warn("cd[", "too short port description received"); goto malformed; } if ((port->p_descr = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for port description"); goto malformed; } PEEK_BYTES(port->p_descr, tlv_len); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; if ((port->p_id = (char *)calloc(1, tlv_len)) == NULL) { log_warn("cdp", "unable to allocate memory for port ID"); goto malformed; } memcpy(port->p_id, port->p_descr, tlv_len); port->p_id_len = tlv_len; break; case CDP_TLV_CAPABILITIES: #ifdef ENABLE_FDP if (fdp) { /* Capabilities are string with FDP */ if (!strncmp("Router", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_ROUTER; else if (!strncmp("Switch", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_BRIDGE; else if (!strncmp("Bridge", (char*)pos, tlv_len)) chassis->c_cap_enabled = LLDP_CAP_REPEATER; else chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; } #endif CHECK_TLV_SIZE(4, "Capabilities"); caps = PEEK_UINT32; if (caps & CDP_CAP_ROUTER) chassis->c_cap_enabled |= LLDP_CAP_ROUTER; if (caps & 0x0e) chassis->c_cap_enabled |= LLDP_CAP_BRIDGE; if (chassis->c_cap_enabled == 0) chassis->c_cap_enabled = LLDP_CAP_STATION; chassis->c_cap_available = chassis->c_cap_enabled; break; case CDP_TLV_SOFTWARE: software_len = tlv_len; (void)PEEK_SAVE(software); break; case CDP_TLV_PLATFORM: platform_len = tlv_len; (void)PEEK_SAVE(platform); break; #ifdef ENABLE_DOT1 case CDP_TLV_NATIVEVLAN: CHECK_TLV_SIZE(2, "Native VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("cdp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = port->p_pvid = PEEK_UINT16; if (asprintf(&vlan->v_name, "VLAN #%d", vlan->v_vid) == -1) { log_warn("cdp", "unable to alloc VLAN name for " "TLV received on %s", hardware->h_ifname); free(vlan); goto malformed; } TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); break; #endif default: log_debug("cdp", "unknown CDP/FDP TLV type (%d) received on %s", ntohs(tlv_type), hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if (!software && platform) { if ((chassis->c_descr = (char *)calloc(1, platform_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); } else if (software && !platform) { if ((chassis->c_descr = (char *)calloc(1, software_len + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, software, software_len); } else if (software && platform) { #define CONCAT_PLATFORM " running on\n" if ((chassis->c_descr = (char *)calloc(1, software_len + platform_len + strlen(CONCAT_PLATFORM) + 1)) == NULL) { log_warn("cdp", "unable to allocate memory for chassis description"); goto malformed; } memcpy(chassis->c_descr, platform, platform_len); memcpy(chassis->c_descr + platform_len, CONCAT_PLATFORM, strlen(CONCAT_PLATFORM)); memcpy(chassis->c_descr + platform_len + strlen(CONCAT_PLATFORM), software, software_len); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (chassis->c_ttl == 0) || (chassis->c_cap_enabled == 0)) { log_warnx("cdp", "some mandatory CDP/FDP tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #ifdef ENABLE_CDP int cdpv1_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 1); } int cdpv2_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 2); } #endif #ifdef ENABLE_FDP int fdp_send(struct lldpd *global, struct lldpd_hardware *hardware) { return cdp_send(global, hardware, 0); } #endif #ifdef ENABLE_CDP static int cdp_guess(char *pos, int length, int version) { const u_int8_t mcastaddr[] = CDP_MULTICAST_ADDR; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) /* Ethernet */ + 8 /* LLC */ + 4 /* CDP header */) return 0; if (PEEK_CMP(mcastaddr, ETHER_ADDR_LEN) != 0) return 0; PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; /* Ethernet */ PEEK_DISCARD(8); /* LLC */ return (PEEK_UINT8 == version); } int cdpv1_guess(char *frame, int len) { return cdp_guess(frame, len, 1); } int cdpv2_guess(char *frame, int len) { return cdp_guess(frame, len, 2); } #endif #endif /* defined (ENABLE_CDP) || defined (ENABLE_FDP) */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1770_1
crossvul-cpp_data_good_487_1
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* lib/krb5/krb/s4u_creds.c */ /* * Copyright (C) 2009 by the Massachusetts Institute of Technology. * All rights reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ #include "k5-int.h" #include "int-proto.h" /* Convert ticket flags to necessary KDC options */ #define FLAGS2OPTS(flags) (flags & KDC_TKT_COMMON_MASK) /* * Implements S4U2Self, by which a service can request a ticket to * itself on behalf of an arbitrary principal. */ static krb5_error_code krb5_get_as_key_noop( krb5_context context, krb5_principal client, krb5_enctype etype, krb5_prompter_fct prompter, void *prompter_data, krb5_data *salt, krb5_data *params, krb5_keyblock *as_key, void *gak_data, k5_response_items *ritems) { /* force a hard error, we don't actually have the key */ return KRB5_PREAUTH_FAILED; } static krb5_error_code s4u_identify_user(krb5_context context, krb5_creds *in_creds, krb5_data *subject_cert, krb5_principal *canon_user) { krb5_error_code code; krb5_preauthtype ptypes[1] = { KRB5_PADATA_S4U_X509_USER }; krb5_creds creds; int use_master = 0; krb5_get_init_creds_opt *opts = NULL; krb5_principal_data client; krb5_s4u_userid userid; *canon_user = NULL; if (in_creds->client == NULL && subject_cert == NULL) { return EINVAL; } if (in_creds->client != NULL && in_creds->client->type != KRB5_NT_ENTERPRISE_PRINCIPAL) { int anonymous; anonymous = krb5_principal_compare(context, in_creds->client, krb5_anonymous_principal()); return krb5_copy_principal(context, anonymous ? in_creds->server : in_creds->client, canon_user); } memset(&creds, 0, sizeof(creds)); memset(&userid, 0, sizeof(userid)); if (subject_cert != NULL) userid.subject_cert = *subject_cert; code = krb5_get_init_creds_opt_alloc(context, &opts); if (code != 0) goto cleanup; krb5_get_init_creds_opt_set_tkt_life(opts, 15); krb5_get_init_creds_opt_set_renew_life(opts, 0); krb5_get_init_creds_opt_set_forwardable(opts, 0); krb5_get_init_creds_opt_set_proxiable(opts, 0); krb5_get_init_creds_opt_set_canonicalize(opts, 1); krb5_get_init_creds_opt_set_preauth_list(opts, ptypes, 1); if (in_creds->client != NULL) { client = *in_creds->client; client.realm = in_creds->server->realm; } else { client.magic = KV5M_PRINCIPAL; client.realm = in_creds->server->realm; /* should this be NULL, empty or a fixed string? XXX */ client.data = NULL; client.length = 0; client.type = KRB5_NT_ENTERPRISE_PRINCIPAL; } code = k5_get_init_creds(context, &creds, &client, NULL, NULL, 0, NULL, opts, krb5_get_as_key_noop, &userid, &use_master, NULL); if (!code || code == KRB5_PREAUTH_FAILED || code == KRB5KDC_ERR_KEY_EXP) { *canon_user = userid.user; userid.user = NULL; code = 0; } cleanup: krb5_free_cred_contents(context, &creds); if (opts != NULL) krb5_get_init_creds_opt_free(context, opts); if (userid.user != NULL) krb5_free_principal(context, userid.user); return code; } static krb5_error_code make_pa_for_user_checksum(krb5_context context, krb5_keyblock *key, krb5_pa_for_user *req, krb5_checksum *cksum) { krb5_error_code code; int i; char *p; krb5_data data; data.length = 4; for (i = 0; i < req->user->length; i++) data.length += req->user->data[i].length; data.length += req->user->realm.length; data.length += req->auth_package.length; p = data.data = malloc(data.length); if (data.data == NULL) return ENOMEM; p[0] = (req->user->type >> 0) & 0xFF; p[1] = (req->user->type >> 8) & 0xFF; p[2] = (req->user->type >> 16) & 0xFF; p[3] = (req->user->type >> 24) & 0xFF; p += 4; for (i = 0; i < req->user->length; i++) { if (req->user->data[i].length > 0) memcpy(p, req->user->data[i].data, req->user->data[i].length); p += req->user->data[i].length; } if (req->user->realm.length > 0) memcpy(p, req->user->realm.data, req->user->realm.length); p += req->user->realm.length; if (req->auth_package.length > 0) memcpy(p, req->auth_package.data, req->auth_package.length); /* Per spec, use hmac-md5 checksum regardless of key type. */ code = krb5_c_make_checksum(context, CKSUMTYPE_HMAC_MD5_ARCFOUR, key, KRB5_KEYUSAGE_APP_DATA_CKSUM, &data, cksum); free(data.data); return code; } static krb5_error_code build_pa_for_user(krb5_context context, krb5_creds *tgt, krb5_s4u_userid *userid, krb5_pa_data **out_padata) { krb5_error_code code; krb5_pa_data *padata; krb5_pa_for_user for_user; krb5_data *for_user_data = NULL; char package[] = "Kerberos"; if (userid->user == NULL) return EINVAL; memset(&for_user, 0, sizeof(for_user)); for_user.user = userid->user; for_user.auth_package.data = package; for_user.auth_package.length = sizeof(package) - 1; code = make_pa_for_user_checksum(context, &tgt->keyblock, &for_user, &for_user.cksum); if (code != 0) goto cleanup; code = encode_krb5_pa_for_user(&for_user, &for_user_data); if (code != 0) goto cleanup; padata = malloc(sizeof(*padata)); if (padata == NULL) { code = ENOMEM; goto cleanup; } padata->magic = KV5M_PA_DATA; padata->pa_type = KRB5_PADATA_FOR_USER; padata->length = for_user_data->length; padata->contents = (krb5_octet *)for_user_data->data; free(for_user_data); for_user_data = NULL; *out_padata = padata; cleanup: if (for_user.cksum.contents != NULL) krb5_free_checksum_contents(context, &for_user.cksum); krb5_free_data(context, for_user_data); return code; } /* * This function is invoked by krb5int_make_tgs_request_ext() just before the * request is encoded; it gives us access to the nonce and subkey without * requiring them to be generated by the caller. */ static krb5_error_code build_pa_s4u_x509_user(krb5_context context, krb5_keyblock *subkey, krb5_kdc_req *tgsreq, void *gcvt_data) { krb5_error_code code; krb5_pa_s4u_x509_user *s4u_user = (krb5_pa_s4u_x509_user *)gcvt_data; krb5_data *data = NULL; krb5_pa_data **padata; krb5_cksumtype cksumtype; int i; assert(s4u_user->cksum.contents == NULL); s4u_user->user_id.nonce = tgsreq->nonce; code = encode_krb5_s4u_userid(&s4u_user->user_id, &data); if (code != 0) goto cleanup; /* [MS-SFU] 2.2.2: unusual to say the least, but enc_padata secures it */ if (subkey->enctype == ENCTYPE_ARCFOUR_HMAC || subkey->enctype == ENCTYPE_ARCFOUR_HMAC_EXP) { cksumtype = CKSUMTYPE_RSA_MD4; } else { code = krb5int_c_mandatory_cksumtype(context, subkey->enctype, &cksumtype); } if (code != 0) goto cleanup; code = krb5_c_make_checksum(context, cksumtype, subkey, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data, &s4u_user->cksum); if (code != 0) goto cleanup; krb5_free_data(context, data); data = NULL; code = encode_krb5_pa_s4u_x509_user(s4u_user, &data); if (code != 0) goto cleanup; assert(tgsreq->padata != NULL); for (i = 0; tgsreq->padata[i] != NULL; i++) ; padata = realloc(tgsreq->padata, (i + 2) * sizeof(krb5_pa_data *)); if (padata == NULL) { code = ENOMEM; goto cleanup; } tgsreq->padata = padata; padata[i] = malloc(sizeof(krb5_pa_data)); if (padata[i] == NULL) { code = ENOMEM; goto cleanup; } padata[i]->magic = KV5M_PA_DATA; padata[i]->pa_type = KRB5_PADATA_S4U_X509_USER; padata[i]->length = data->length; padata[i]->contents = (krb5_octet *)data->data; padata[i + 1] = NULL; free(data); data = NULL; cleanup: if (code != 0 && s4u_user->cksum.contents != NULL) { krb5_free_checksum_contents(context, &s4u_user->cksum); s4u_user->cksum.contents = NULL; } krb5_free_data(context, data); return code; } static krb5_error_code verify_s4u2self_reply(krb5_context context, krb5_keyblock *subkey, krb5_pa_s4u_x509_user *req_s4u_user, krb5_pa_data **rep_padata, krb5_pa_data **enc_padata) { krb5_error_code code; krb5_pa_data *rep_s4u_padata, *enc_s4u_padata; krb5_pa_s4u_x509_user *rep_s4u_user = NULL; krb5_data data, *datap = NULL; krb5_keyusage usage; krb5_boolean valid; krb5_boolean not_newer; assert(req_s4u_user != NULL); switch (subkey->enctype) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: case ENCTYPE_DES3_CBC_SHA1: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC_EXP : not_newer = TRUE; break; default: not_newer = FALSE; break; } enc_s4u_padata = krb5int_find_pa_data(context, enc_padata, KRB5_PADATA_S4U_X509_USER); /* XXX this will break newer enctypes with a MIT 1.7 KDC */ rep_s4u_padata = krb5int_find_pa_data(context, rep_padata, KRB5_PADATA_S4U_X509_USER); if (rep_s4u_padata == NULL) { if (not_newer == FALSE || enc_s4u_padata != NULL) return KRB5_KDCREP_MODIFIED; else return 0; } data.length = rep_s4u_padata->length; data.data = (char *)rep_s4u_padata->contents; code = decode_krb5_pa_s4u_x509_user(&data, &rep_s4u_user); if (code != 0) goto cleanup; if (rep_s4u_user->user_id.nonce != req_s4u_user->user_id.nonce) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } code = encode_krb5_s4u_userid(&rep_s4u_user->user_id, &datap); if (code != 0) goto cleanup; if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY; else usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST; code = krb5_c_verify_checksum(context, subkey, usage, datap, &rep_s4u_user->cksum, &valid); if (code != 0) goto cleanup; if (valid == FALSE) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } /* * KDCs that support KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE also return * S4U enc_padata for older (pre-AES) encryption types only. */ if (not_newer) { if (enc_s4u_padata == NULL) { if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } } else { if (enc_s4u_padata->length != req_s4u_user->cksum.length + rep_s4u_user->cksum.length) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } if (memcmp(enc_s4u_padata->contents, req_s4u_user->cksum.contents, req_s4u_user->cksum.length) || memcmp(&enc_s4u_padata->contents[req_s4u_user->cksum.length], rep_s4u_user->cksum.contents, rep_s4u_user->cksum.length)) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } } } else if (!krb5_c_is_keyed_cksum(rep_s4u_user->cksum.checksum_type)) { code = KRB5KRB_AP_ERR_INAPP_CKSUM; goto cleanup; } cleanup: krb5_free_pa_s4u_x509_user(context, rep_s4u_user); krb5_free_data(context, datap); return code; } /* Unparse princ and re-parse it as an enterprise principal. */ static krb5_error_code convert_to_enterprise(krb5_context context, krb5_principal princ, krb5_principal *eprinc_out) { krb5_error_code code; char *str; *eprinc_out = NULL; code = krb5_unparse_name(context, princ, &str); if (code != 0) return code; code = krb5_parse_name_flags(context, str, KRB5_PRINCIPAL_PARSE_ENTERPRISE | KRB5_PRINCIPAL_PARSE_IGNORE_REALM, eprinc_out); krb5_free_unparsed_name(context, str); return code; } static krb5_error_code krb5_get_self_cred_from_kdc(krb5_context context, krb5_flags options, krb5_ccache ccache, krb5_creds *in_creds, krb5_data *subject_cert, krb5_data *user_realm, krb5_creds **out_creds) { krb5_error_code code; krb5_principal tgs = NULL, eprinc = NULL; krb5_principal_data sprinc; krb5_creds tgtq, s4u_creds, *tgt = NULL, *tgtptr; krb5_creds *referral_tgts[KRB5_REFERRAL_MAXHOPS]; krb5_pa_s4u_x509_user s4u_user; int referral_count = 0, i; krb5_flags kdcopt; memset(&tgtq, 0, sizeof(tgtq)); memset(referral_tgts, 0, sizeof(referral_tgts)); *out_creds = NULL; memset(&s4u_user, 0, sizeof(s4u_user)); if (in_creds->client != NULL && in_creds->client->length > 0) { if (in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) { code = krb5_build_principal_ext(context, &s4u_user.user_id.user, user_realm->length, user_realm->data, in_creds->client->data[0].length, in_creds->client->data[0].data, 0); if (code != 0) goto cleanup; s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL; } else { code = krb5_copy_principal(context, in_creds->client, &s4u_user.user_id.user); if (code != 0) goto cleanup; } } else { code = krb5_build_principal_ext(context, &s4u_user.user_id.user, user_realm->length, user_realm->data); if (code != 0) goto cleanup; s4u_user.user_id.user->type = KRB5_NT_ENTERPRISE_PRINCIPAL; } if (subject_cert != NULL) s4u_user.user_id.subject_cert = *subject_cert; s4u_user.user_id.options = KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE; /* First, acquire a TGT to the user's realm. */ code = krb5int_tgtname(context, user_realm, &in_creds->server->realm, &tgs); if (code != 0) goto cleanup; tgtq.client = in_creds->server; tgtq.server = tgs; code = krb5_get_credentials(context, options, ccache, &tgtq, &tgt); if (code != 0) goto cleanup; tgtptr = tgt; /* Convert the server principal to an enterprise principal, for use with * foreign realms. */ code = convert_to_enterprise(context, in_creds->server, &eprinc); if (code != 0) goto cleanup; /* Make a shallow copy of in_creds with client pointing to the server * principal. We will set s4u_creds.server for each request. */ s4u_creds = *in_creds; s4u_creds.client = in_creds->server; /* Then, walk back the referral path to S4U2Self for user */ kdcopt = 0; if (options & KRB5_GC_CANONICALIZE) kdcopt |= KDC_OPT_CANONICALIZE; if (options & KRB5_GC_FORWARDABLE) kdcopt |= KDC_OPT_FORWARDABLE; if (options & KRB5_GC_NO_TRANSIT_CHECK) kdcopt |= KDC_OPT_DISABLE_TRANSITED_CHECK; for (referral_count = 0; referral_count < KRB5_REFERRAL_MAXHOPS; referral_count++) { krb5_pa_data **in_padata = NULL; krb5_pa_data **out_padata = NULL; krb5_pa_data **enc_padata = NULL; krb5_keyblock *subkey = NULL; if (s4u_user.user_id.user != NULL && s4u_user.user_id.user->length) { in_padata = calloc(2, sizeof(krb5_pa_data *)); if (in_padata == NULL) { code = ENOMEM; goto cleanup; } code = build_pa_for_user(context, tgtptr, &s4u_user.user_id, &in_padata[0]); if (code != 0) { krb5_free_pa_data(context, in_padata); goto cleanup; } } if (data_eq(tgtptr->server->data[1], in_creds->server->realm)) { /* When asking the server realm, use the real principal. */ s4u_creds.server = in_creds->server; } else { /* When asking a foreign realm, use the enterprise principal, with * the realm set to the TGS realm. */ sprinc = *eprinc; sprinc.realm = tgtptr->server->data[1]; s4u_creds.server = &sprinc; } code = krb5_get_cred_via_tkt_ext(context, tgtptr, KDC_OPT_CANONICALIZE | FLAGS2OPTS(tgtptr->ticket_flags) | kdcopt, tgtptr->addresses, in_padata, &s4u_creds, build_pa_s4u_x509_user, &s4u_user, &out_padata, &enc_padata, out_creds, &subkey); if (code != 0) { krb5_free_checksum_contents(context, &s4u_user.cksum); krb5_free_pa_data(context, in_padata); goto cleanup; } code = verify_s4u2self_reply(context, subkey, &s4u_user, out_padata, enc_padata); krb5_free_checksum_contents(context, &s4u_user.cksum); krb5_free_pa_data(context, in_padata); krb5_free_pa_data(context, out_padata); krb5_free_pa_data(context, enc_padata); krb5_free_keyblock(context, subkey); if (code != 0) goto cleanup; if (krb5_principal_compare(context, in_creds->server, (*out_creds)->server)) { code = 0; goto cleanup; } else if (IS_TGS_PRINC((*out_creds)->server)) { krb5_data *r1 = &tgtptr->server->data[1]; krb5_data *r2 = &(*out_creds)->server->data[1]; if (data_eq(*r1, *r2)) { krb5_free_creds(context, *out_creds); *out_creds = NULL; code = KRB5_ERR_HOST_REALM_UNKNOWN; break; } for (i = 0; i < referral_count; i++) { if (krb5_principal_compare(context, (*out_creds)->server, referral_tgts[i]->server)) { code = KRB5_KDC_UNREACH; goto cleanup; } } tgtptr = *out_creds; referral_tgts[referral_count] = *out_creds; *out_creds = NULL; } else { krb5_free_creds(context, *out_creds); *out_creds = NULL; code = KRB5KRB_AP_WRONG_PRINC; /* XXX */ break; } } cleanup: for (i = 0; i < KRB5_REFERRAL_MAXHOPS; i++) { if (referral_tgts[i] != NULL) krb5_free_creds(context, referral_tgts[i]); } krb5_free_principal(context, tgs); krb5_free_principal(context, eprinc); krb5_free_creds(context, tgt); krb5_free_principal(context, s4u_user.user_id.user); krb5_free_checksum_contents(context, &s4u_user.cksum); return code; } krb5_error_code KRB5_CALLCONV krb5_get_credentials_for_user(krb5_context context, krb5_flags options, krb5_ccache ccache, krb5_creds *in_creds, krb5_data *subject_cert, krb5_creds **out_creds) { krb5_error_code code; krb5_principal realm = NULL; *out_creds = NULL; if (options & KRB5_GC_CONSTRAINED_DELEGATION) { code = EINVAL; goto cleanup; } if (in_creds->client != NULL) { /* Uncanonicalised check */ code = krb5_get_credentials(context, options | KRB5_GC_CACHED, ccache, in_creds, out_creds); if (code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE) goto cleanup; if ((options & KRB5_GC_CACHED) && !(options & KRB5_GC_CANONICALIZE)) goto cleanup; } code = s4u_identify_user(context, in_creds, subject_cert, &realm); if (code != 0) goto cleanup; if (in_creds->client != NULL && in_creds->client->type == KRB5_NT_ENTERPRISE_PRINCIPAL) { /* Post-canonicalisation check for enterprise principals */ krb5_creds mcreds = *in_creds; mcreds.client = realm; code = krb5_get_credentials(context, options | KRB5_GC_CACHED, ccache, &mcreds, out_creds); if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE) || (options & KRB5_GC_CACHED)) goto cleanup; } code = krb5_get_self_cred_from_kdc(context, options, ccache, in_creds, subject_cert, &realm->realm, out_creds); if (code != 0) goto cleanup; assert(*out_creds != NULL); if ((options & KRB5_GC_NO_STORE) == 0) { code = krb5_cc_store_cred(context, ccache, *out_creds); if (code != 0) goto cleanup; } cleanup: if (code != 0 && *out_creds != NULL) { krb5_free_creds(context, *out_creds); *out_creds = NULL; } krb5_free_principal(context, realm); return code; } /* * Exported API for constrained delegation (S4U2Proxy). * * This is preferable to using krb5_get_credentials directly because * it can perform some additional checks. */ krb5_error_code KRB5_CALLCONV krb5_get_credentials_for_proxy(krb5_context context, krb5_flags options, krb5_ccache ccache, krb5_creds *in_creds, krb5_ticket *evidence_tkt, krb5_creds **out_creds) { krb5_error_code code; krb5_creds mcreds; krb5_creds *ncreds = NULL; krb5_flags fields; krb5_data *evidence_tkt_data = NULL; krb5_creds s4u_creds; *out_creds = NULL; if (in_creds == NULL || in_creds->client == NULL || evidence_tkt == NULL || evidence_tkt->enc_part2 == NULL) { code = EINVAL; goto cleanup; } /* * Caller should have set in_creds->client to match evidence * ticket client */ if (!krb5_principal_compare(context, evidence_tkt->enc_part2->client, in_creds->client)) { code = EINVAL; goto cleanup; } if ((evidence_tkt->enc_part2->flags & TKT_FLG_FORWARDABLE) == 0) { code = KRB5_TKT_NOT_FORWARDABLE; goto cleanup; } code = krb5int_construct_matching_creds(context, options, in_creds, &mcreds, &fields); if (code != 0) goto cleanup; ncreds = calloc(1, sizeof(*ncreds)); if (ncreds == NULL) { code = ENOMEM; goto cleanup; } ncreds->magic = KV5M_CRED; code = krb5_cc_retrieve_cred(context, ccache, fields, &mcreds, ncreds); if (code != 0) { free(ncreds); ncreds = in_creds; } else { *out_creds = ncreds; } if ((code != KRB5_CC_NOTFOUND && code != KRB5_CC_NOT_KTYPE) || options & KRB5_GC_CACHED) goto cleanup; code = encode_krb5_ticket(evidence_tkt, &evidence_tkt_data); if (code != 0) goto cleanup; s4u_creds = *in_creds; s4u_creds.client = evidence_tkt->server; s4u_creds.second_ticket = *evidence_tkt_data; code = krb5_get_credentials(context, options | KRB5_GC_CONSTRAINED_DELEGATION, ccache, &s4u_creds, out_creds); if (code != 0) goto cleanup; /* * Check client name because we couldn't compare that inside * krb5_get_credentials() (enc_part2 is unavailable in clear) */ if (!krb5_principal_compare(context, evidence_tkt->enc_part2->client, (*out_creds)->client)) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } cleanup: if (*out_creds != NULL && code != 0) { krb5_free_creds(context, *out_creds); *out_creds = NULL; } if (evidence_tkt_data != NULL) krb5_free_data(context, evidence_tkt_data); return code; }
./CrossVul/dataset_final_sorted/CWE-617/c/good_487_1
crossvul-cpp_data_good_219_1
/* * H.263 decoder * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.263 decoder. */ #define UNCHECKED_BITSTREAM_READER 1 #include "libavutil/cpu.h" #include "avcodec.h" #include "error_resilience.h" #include "flv.h" #include "h263.h" #include "h263_parser.h" #include "hwaccel.h" #include "internal.h" #include "mpeg_er.h" #include "mpeg4video.h" #include "mpeg4video_parser.h" #include "mpegutils.h" #include "mpegvideo.h" #include "msmpeg4.h" #include "qpeldsp.h" #include "thread.h" #include "wmv2.h" static enum AVPixelFormat h263_get_format(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; /* MPEG-4 Studio Profile only, not supported by hardware */ if (avctx->bits_per_raw_sample > 8) { av_assert1(s->studio_profile); return avctx->pix_fmt; } if (avctx->codec->id == AV_CODEC_ID_MSS2) return AV_PIX_FMT_YUV420P; if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY)) { if (avctx->color_range == AVCOL_RANGE_UNSPECIFIED) avctx->color_range = AVCOL_RANGE_MPEG; return AV_PIX_FMT_GRAY8; } return avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts); } av_cold int ff_h263_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int ret; s->out_format = FMT_H263; // set defaults ff_mpv_decode_defaults(s); ff_mpv_decode_init(s, avctx); s->quant_precision = 5; s->decode_mb = ff_h263_decode_mb; s->low_delay = 1; s->unrestricted_mv = 1; /* select sub codec */ switch (avctx->codec->id) { case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: s->unrestricted_mv = 0; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; break; case AV_CODEC_ID_MPEG4: break; case AV_CODEC_ID_MSMPEG4V1: s->h263_pred = 1; s->msmpeg4_version = 1; break; case AV_CODEC_ID_MSMPEG4V2: s->h263_pred = 1; s->msmpeg4_version = 2; break; case AV_CODEC_ID_MSMPEG4V3: s->h263_pred = 1; s->msmpeg4_version = 3; break; case AV_CODEC_ID_WMV1: s->h263_pred = 1; s->msmpeg4_version = 4; break; case AV_CODEC_ID_WMV2: s->h263_pred = 1; s->msmpeg4_version = 5; break; case AV_CODEC_ID_VC1: case AV_CODEC_ID_WMV3: case AV_CODEC_ID_VC1IMAGE: case AV_CODEC_ID_WMV3IMAGE: case AV_CODEC_ID_MSS2: s->h263_pred = 1; s->msmpeg4_version = 6; avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break; case AV_CODEC_ID_H263I: break; case AV_CODEC_ID_FLV1: s->h263_flv = 1; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported codec %d\n", avctx->codec->id); return AVERROR(ENOSYS); } s->codec_id = avctx->codec->id; if (avctx->codec_tag == AV_RL32("L263") || avctx->codec_tag == AV_RL32("S263")) if (avctx->extradata_size == 56 && avctx->extradata[0] == 1) s->ehc_mode = 1; /* for H.263, we allocate the images after having read the header */ if (avctx->codec->id != AV_CODEC_ID_H263 && avctx->codec->id != AV_CODEC_ID_H263P && avctx->codec->id != AV_CODEC_ID_MPEG4) { avctx->pix_fmt = h263_get_format(avctx); ff_mpv_idct_init(s); if ((ret = ff_mpv_common_init(s)) < 0) return ret; } ff_h263dsp_init(&s->h263dsp); ff_qpeldsp_init(&s->qdsp); ff_h263_decode_init_vlc(); return 0; } av_cold int ff_h263_decode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; ff_mpv_common_end(s); return 0; } /** * Return the number of bytes consumed for building the current frame. */ static int get_consumed_bytes(MpegEncContext *s, int buf_size) { int pos = (get_bits_count(&s->gb) + 7) >> 3; if (s->divx_packed || s->avctx->hwaccel) { /* We would have to scan through the whole buf to handle the weird * reordering ... */ return buf_size; } else if (s->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { pos -= s->parse_context.last_index; // padding is not really read so this might be -1 if (pos < 0) pos = 0; return pos; } else { // avoid infinite loops (maybe not needed...) if (pos == 0) pos = 1; // oops ;) if (pos + 10 > buf_size) pos = buf_size; return pos; } } static int decode_slice(MpegEncContext *s) { const int part_mask = s->partitioned_frame ? (ER_AC_END | ER_AC_ERROR) : 0x7F; const int mb_size = 16 >> s->avctx->lowres; int ret; s->last_resync_gb = s->gb; s->first_slice_line = 1; s->resync_mb_x = s->mb_x; s->resync_mb_y = s->mb_y; ff_set_qscale(s, s->qscale); if (s->studio_profile) { if ((ret = ff_mpeg4_decode_studio_slice_header(s->avctx->priv_data)) < 0) return ret; } if (s->avctx->hwaccel) { const uint8_t *start = s->gb.buffer + get_bits_count(&s->gb) / 8; ret = s->avctx->hwaccel->decode_slice(s->avctx, start, s->gb.buffer_end - start); // ensure we exit decode loop s->mb_y = s->mb_height; return ret; } if (s->partitioned_frame) { const int qscale = s->qscale; if (CONFIG_MPEG4_DECODER && s->codec_id == AV_CODEC_ID_MPEG4) if ((ret = ff_mpeg4_decode_partitions(s->avctx->priv_data)) < 0) return ret; /* restore variables which were modified */ s->first_slice_line = 1; s->mb_x = s->resync_mb_x; s->mb_y = s->resync_mb_y; ff_set_qscale(s, qscale); } for (; s->mb_y < s->mb_height; s->mb_y++) { /* per-row end of slice checks */ if (s->msmpeg4_version) { if (s->resync_mb_y + s->slice_height == s->mb_y) { ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_MB_END); return 0; } } if (s->msmpeg4_version == 1) { s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128; } ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { int ret; ff_update_block_index(s); if (s->resync_mb_x == s->mb_x && s->resync_mb_y + 1 == s->mb_y) s->first_slice_line = 0; /* DCT & quantize */ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; ff_dlog(s, "%d %06X\n", get_bits_count(&s->gb), show_bits(&s->gb, 24)); ff_tlog(NULL, "Decoding MB at %dx%d\n", s->mb_x, s->mb_y); ret = s->decode_mb(s, s->block); if (s->pict_type != AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); if (ret < 0) { const int xy = s->mb_x + s->mb_y * s->mb_stride; if (ret == SLICE_END) { ff_mpv_reconstruct_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END & part_mask); s->padding_bug_score--; if (++s->mb_x >= s->mb_width) { s->mb_x = 0; ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); ff_mpv_report_decode_progress(s); s->mb_y++; } return 0; } else if (ret == SLICE_NOEND) { av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x + 1, s->mb_y, ER_MB_END & part_mask); return AVERROR_INVALIDDATA; } av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR & part_mask); if (s->avctx->err_recognition & AV_EF_IGNORE_ERR) continue; return AVERROR_INVALIDDATA; } ff_mpv_reconstruct_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); } ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); ff_mpv_report_decode_progress(s); s->mb_x = 0; } av_assert1(s->mb_x == 0 && s->mb_y == s->mb_height); // Detect incorrect padding with wrong stuffing codes used by NEC N-02B if (s->codec_id == AV_CODEC_ID_MPEG4 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 48 && show_bits(&s->gb, 24) == 0x4010 && !s->data_partitioning) s->padding_bug_score += 32; /* try to detect the padding bug */ if (s->codec_id == AV_CODEC_ID_MPEG4 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 0 && get_bits_left(&s->gb) < 137 && !s->data_partitioning) { const int bits_count = get_bits_count(&s->gb); const int bits_left = s->gb.size_in_bits - bits_count; if (bits_left == 0) { s->padding_bug_score += 16; } else if (bits_left != 1) { int v = show_bits(&s->gb, 8); v |= 0x7F >> (7 - (bits_count & 7)); if (v == 0x7F && bits_left <= 8) s->padding_bug_score--; else if (v == 0x7F && ((get_bits_count(&s->gb) + 8) & 8) && bits_left <= 16) s->padding_bug_score += 4; else s->padding_bug_score++; } } if (s->codec_id == AV_CODEC_ID_H263 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 8 && get_bits_left(&s->gb) < 300 && s->pict_type == AV_PICTURE_TYPE_I && show_bits(&s->gb, 8) == 0 && !s->data_partitioning) { s->padding_bug_score += 32; } if (s->codec_id == AV_CODEC_ID_H263 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 64 && AV_RB64(s->gb.buffer_end - 8) == 0xCDCDCDCDFC7F0000) { s->padding_bug_score += 32; } if (s->workaround_bugs & FF_BUG_AUTODETECT) { if ( (s->padding_bug_score > -2 && !s->data_partitioning)) s->workaround_bugs |= FF_BUG_NO_PADDING; else s->workaround_bugs &= ~FF_BUG_NO_PADDING; } // handle formats which don't have unique end markers if (s->msmpeg4_version || (s->workaround_bugs & FF_BUG_NO_PADDING)) { // FIXME perhaps solve this more cleanly int left = get_bits_left(&s->gb); int max_extra = 7; /* no markers in M$ crap */ if (s->msmpeg4_version && s->pict_type == AV_PICTURE_TYPE_I) max_extra += 17; /* buggy padding but the frame should still end approximately at * the bitstream end */ if ((s->workaround_bugs & FF_BUG_NO_PADDING) && (s->avctx->err_recognition & (AV_EF_BUFFER|AV_EF_AGGRESSIVE))) max_extra += 48; else if ((s->workaround_bugs & FF_BUG_NO_PADDING)) max_extra += 256 * 256 * 256 * 64; if (left > max_extra) av_log(s->avctx, AV_LOG_ERROR, "discarding %d junk bits at end, next would be %X\n", left, show_bits(&s->gb, 24)); else if (left < 0) av_log(s->avctx, AV_LOG_ERROR, "overreading %d bits\n", -left); else ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_MB_END); return 0; } av_log(s->avctx, AV_LOG_ERROR, "slice end not reached but screenspace end (%d left %06X, score= %d)\n", get_bits_left(&s->gb), show_bits(&s->gb, 24), s->padding_bug_score); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END & part_mask); return AVERROR_INVALIDDATA; } int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *s = avctx->priv_data; int ret; int slice_ret = 0; AVFrame *pict = data; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay == 0 && s->next_picture_ptr) { if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0) return ret; s->next_picture_ptr = NULL; *got_frame = 1; } return 0; } if (s->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { int next; if (CONFIG_MPEG4_DECODER && s->codec_id == AV_CODEC_ID_MPEG4) { next = ff_mpeg4_find_frame_end(&s->parse_context, buf, buf_size); } else if (CONFIG_H263_DECODER && s->codec_id == AV_CODEC_ID_H263) { next = ff_h263_find_frame_end(&s->parse_context, buf, buf_size); } else if (CONFIG_H263P_DECODER && s->codec_id == AV_CODEC_ID_H263P) { next = ff_h263_find_frame_end(&s->parse_context, buf, buf_size); } else { av_log(s->avctx, AV_LOG_ERROR, "this codec does not support truncated bitstreams\n"); return AVERROR(ENOSYS); } if (ff_combine_frame(&s->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0) return buf_size; } retry: if (s->divx_packed && s->bitstream_buffer_size) { int i; for(i=0; i < buf_size-3; i++) { if (buf[i]==0 && buf[i+1]==0 && buf[i+2]==1) { if (buf[i+3]==0xB0) { av_log(s->avctx, AV_LOG_WARNING, "Discarding excessive bitstream in packed xvid\n"); s->bitstream_buffer_size = 0; } break; } } } if (s->bitstream_buffer_size && (s->divx_packed || buf_size <= MAX_NVOP_SIZE)) // divx 5.01+/xvid frame reorder ret = init_get_bits8(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size); else ret = init_get_bits8(&s->gb, buf, buf_size); s->bitstream_buffer_size = 0; if (ret < 0) return ret; if (!s->context_initialized) // we need the idct permutation for reading a custom matrix ff_mpv_idct_init(s); /* let's go :-) */ if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) { ret = ff_wmv2_decode_picture_header(s); } else if (CONFIG_MSMPEG4_DECODER && s->msmpeg4_version) { ret = ff_msmpeg4_decode_picture_header(s); } else if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) { if (s->avctx->extradata_size && s->picture_number == 0) { GetBitContext gb; if (init_get_bits8(&gb, s->avctx->extradata, s->avctx->extradata_size) >= 0 ) ff_mpeg4_decode_picture_header(avctx->priv_data, &gb); } ret = ff_mpeg4_decode_picture_header(avctx->priv_data, &s->gb); } else if (CONFIG_H263I_DECODER && s->codec_id == AV_CODEC_ID_H263I) { ret = ff_intel_h263_decode_picture_header(s); } else if (CONFIG_FLV_DECODER && s->h263_flv) { ret = ff_flv_decode_picture_header(s); } else { ret = ff_h263_decode_picture_header(s); } if (ret < 0 || ret == FRAME_SKIPPED) { if ( s->width != avctx->coded_width || s->height != avctx->coded_height) { av_log(s->avctx, AV_LOG_WARNING, "Reverting picture dimensions change due to header decoding failure\n"); s->width = avctx->coded_width; s->height= avctx->coded_height; } } if (ret == FRAME_SKIPPED) return get_consumed_bytes(s, buf_size); /* skip if the header was thrashed */ if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return ret; } if (!s->context_initialized) { avctx->pix_fmt = h263_get_format(avctx); if ((ret = ff_mpv_common_init(s)) < 0) return ret; } if (!s->current_picture_ptr || s->current_picture_ptr->f->data[0]) { int i = ff_find_unused_picture(s->avctx, s->picture, 0); if (i < 0) return i; s->current_picture_ptr = &s->picture[i]; } avctx->has_b_frames = !s->low_delay; if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) { if (ff_mpeg4_workaround_bugs(avctx) == 1) goto retry; if (s->studio_profile != (s->idsp.idct == NULL)) ff_mpv_idct_init(s); } /* After H.263 & MPEG-4 header decode we have the height, width, * and other parameters. So then we could init the picture. * FIXME: By the way H.263 decoder is evolving it should have * an H263EncContext */ if (s->width != avctx->coded_width || s->height != avctx->coded_height || s->context_reinit) { /* H.263 could change picture size any time */ s->context_reinit = 0; ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; ff_set_sar(avctx, avctx->sample_aspect_ratio); if ((ret = ff_mpv_common_frame_size_change(s))) return ret; if (avctx->pix_fmt != h263_get_format(avctx)) { av_log(avctx, AV_LOG_ERROR, "format change not supported\n"); avctx->pix_fmt = AV_PIX_FMT_NONE; return AVERROR_UNKNOWN; } } if (s->codec_id == AV_CODEC_ID_H263 || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_H263I) s->gob_index = H263_GOB_HEIGHT(s->height); // for skipping the frame s->current_picture.f->pict_type = s->pict_type; s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if (!s->last_picture_ptr && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) return get_consumed_bytes(s, buf_size); if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); if (s->next_p_frame_damaged) { if (s->pict_type == AV_PICTURE_TYPE_B) return get_consumed_bytes(s, buf_size); else s->next_p_frame_damaged = 0; } if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) { s->me.qpel_put = s->qdsp.put_qpel_pixels_tab; s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; } else { s->me.qpel_put = s->qdsp.put_no_rnd_qpel_pixels_tab; s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; } if ((ret = ff_mpv_frame_start(s, avctx)) < 0) return ret; if (!s->divx_packed) ff_thread_finish_setup(avctx); if (avctx->hwaccel) { ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); if (ret < 0 ) return ret; } ff_mpeg_er_frame_start(s); /* the second part of the wmv2 header contains the MB skip bits which * are stored in current_picture->mb_type which is not available before * ff_mpv_frame_start() */ if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) { ret = ff_wmv2_decode_secondary_picture_header(s); if (ret < 0) return ret; if (ret == 1) goto frame_end; } /* decode each macroblock */ s->mb_x = 0; s->mb_y = 0; slice_ret = decode_slice(s); while (s->mb_y < s->mb_height) { if (s->msmpeg4_version) { if (s->slice_height == 0 || s->mb_x != 0 || slice_ret < 0 || (s->mb_y % s->slice_height) != 0 || get_bits_left(&s->gb) < 0) break; } else { int prev_x = s->mb_x, prev_y = s->mb_y; if (ff_h263_resync(s) < 0) break; if (prev_y * s->mb_width + prev_x < s->mb_y * s->mb_width + s->mb_x) s->er.error_occurred = 1; } if (s->msmpeg4_version < 4 && s->h263_pred) ff_mpeg4_clean_buffers(s); if (decode_slice(s) < 0) slice_ret = AVERROR_INVALIDDATA; } if (s->msmpeg4_version && s->msmpeg4_version < 4 && s->pict_type == AV_PICTURE_TYPE_I) if (!CONFIG_MSMPEG4_DECODER || ff_msmpeg4_decode_ext_header(s, buf_size) < 0) s->er.error_status_table[s->mb_num - 1] = ER_MB_ERROR; av_assert1(s->bitstream_buffer_size == 0); frame_end: if (!s->studio_profile) ff_er_frame_end(&s->er); if (avctx->hwaccel) { ret = avctx->hwaccel->end_frame(avctx); if (ret < 0) return ret; } ff_mpv_frame_end(s); if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) ff_mpeg4_frame_end(avctx, buf, buf_size); if (!s->divx_packed && avctx->hwaccel) ff_thread_finish_setup(avctx); av_assert1(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type); av_assert1(s->current_picture.f->pict_type == s->pict_type); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->current_picture_ptr, pict); ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1); } else if (s->last_picture_ptr) { if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->last_picture_ptr, pict); ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1); } if (s->last_picture_ptr || s->low_delay) { if ( pict->format == AV_PIX_FMT_YUV420P && (s->codec_tag == AV_RL32("GEOV") || s->codec_tag == AV_RL32("GEOX"))) { int x, y, p; av_frame_make_writable(pict); for (p=0; p<3; p++) { int w = AV_CEIL_RSHIFT(pict-> width, !!p); int h = AV_CEIL_RSHIFT(pict->height, !!p); int linesize = pict->linesize[p]; for (y=0; y<(h>>1); y++) for (x=0; x<w; x++) FFSWAP(int, pict->data[p][x + y*linesize], pict->data[p][x + (h-1-y)*linesize]); } } *got_frame = 1; } if (slice_ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) return slice_ret; else return get_consumed_bytes(s, buf_size); } const enum AVPixelFormat ff_h263_hwaccel_pixfmt_list_420[] = { #if CONFIG_H263_VAAPI_HWACCEL || CONFIG_MPEG4_VAAPI_HWACCEL AV_PIX_FMT_VAAPI, #endif #if CONFIG_MPEG4_NVDEC_HWACCEL AV_PIX_FMT_CUDA, #endif #if CONFIG_MPEG4_VDPAU_HWACCEL AV_PIX_FMT_VDPAU, #endif #if CONFIG_H263_VIDEOTOOLBOX_HWACCEL || CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL AV_PIX_FMT_VIDEOTOOLBOX, #endif AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; AVCodec ff_h263_decoder = { .name = "h263", .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H263, .priv_data_size = sizeof(MpegEncContext), .init = ff_h263_decode_init, .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = ff_mpeg_flush, .max_lowres = 3, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420, }; AVCodec ff_h263p_decoder = { .name = "h263p", .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H263P, .priv_data_size = sizeof(MpegEncContext), .init = ff_h263_decode_init, .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = ff_mpeg_flush, .max_lowres = 3, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420, .hw_configs = (const AVCodecHWConfigInternal*[]) { #if CONFIG_H263_VAAPI_HWACCEL HWACCEL_VAAPI(h263), #endif #if CONFIG_MPEG4_VDPAU_HWACCEL HWACCEL_VDPAU(mpeg4), #endif #if CONFIG_H263_VIDEOTOOLBOX_HWACCEL HWACCEL_VIDEOTOOLBOX(h263), #endif NULL }, };
./CrossVul/dataset_final_sorted/CWE-617/c/good_219_1
crossvul-cpp_data_bad_2571_2
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/kdc_util.c - Utility functions for the KDC implementation */ /* * Copyright 1990,1991,2007,2008,2009 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include "kdc_util.h" #include "extern.h" #include <stdio.h> #include <ctype.h> #include <syslog.h> #include <kadm5/admin.h> #include "adm_proto.h" #include "net-server.h" #include <limits.h> #ifdef KRBCONF_VAGUE_ERRORS const int vague_errors = 1; #else const int vague_errors = 0; #endif static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey); static krb5_error_code find_server_key(krb5_context, krb5_db_entry *, krb5_enctype, krb5_kvno, krb5_keyblock **, krb5_kvno *); /* * concatenate first two authdata arrays, returning an allocated replacement. * The replacement should be freed with krb5_free_authdata(). */ krb5_error_code concat_authorization_data(krb5_context context, krb5_authdata **first, krb5_authdata **second, krb5_authdata ***output) { register int i, j; register krb5_authdata **ptr, **retdata; /* count up the entries */ i = 0; if (first) for (ptr = first; *ptr; ptr++) i++; if (second) for (ptr = second; *ptr; ptr++) i++; retdata = (krb5_authdata **)malloc((i+1)*sizeof(*retdata)); if (!retdata) return ENOMEM; retdata[i] = 0; /* null-terminated array */ for (i = 0, j = 0, ptr = first; j < 2 ; ptr = second, j++) while (ptr && *ptr) { /* now walk & copy */ retdata[i] = (krb5_authdata *)malloc(sizeof(*retdata[i])); if (!retdata[i]) { krb5_free_authdata(context, retdata); return ENOMEM; } *retdata[i] = **ptr; if (!(retdata[i]->contents = (krb5_octet *)malloc(retdata[i]->length))) { free(retdata[i]); retdata[i] = 0; krb5_free_authdata(context, retdata); return ENOMEM; } memcpy(retdata[i]->contents, (*ptr)->contents, retdata[i]->length); ptr++; i++; } *output = retdata; return 0; } krb5_boolean is_local_principal(kdc_realm_t *kdc_active_realm, krb5_const_principal princ1) { return krb5_realm_compare(kdc_context, princ1, tgs_server); } /* * Returns TRUE if the kerberos principal is the name of a Kerberos ticket * service. */ krb5_boolean krb5_is_tgs_principal(krb5_const_principal principal) { if (krb5_princ_size(kdc_context, principal) != 2) return FALSE; if (data_eq_string(*krb5_princ_component(kdc_context, principal, 0), KRB5_TGS_NAME)) return TRUE; else return FALSE; } /* Returns TRUE if principal is the name of a cross-realm TGS. */ krb5_boolean is_cross_tgs_principal(krb5_const_principal principal) { if (!krb5_is_tgs_principal(principal)) return FALSE; if (!data_eq(*krb5_princ_component(kdc_context, principal, 1), *krb5_princ_realm(kdc_context, principal))) return TRUE; else return FALSE; } /* * given authentication data (provides seed for checksum), verify checksum * for source data. */ static krb5_error_code comp_cksum(krb5_context kcontext, krb5_data *source, krb5_ticket *ticket, krb5_checksum *his_cksum) { krb5_error_code retval; krb5_boolean valid; if (!krb5_c_valid_cksumtype(his_cksum->checksum_type)) return KRB5KDC_ERR_SUMTYPE_NOSUPP; /* must be collision proof */ if (!krb5_c_is_coll_proof_cksum(his_cksum->checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; /* verify checksum */ if ((retval = krb5_c_verify_checksum(kcontext, ticket->enc_part2->session, KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM, source, his_cksum, &valid))) return(retval); if (!valid) return(KRB5KRB_AP_ERR_BAD_INTEGRITY); return(0); } /* If a header ticket is decrypted, *ticket_out is filled in even on error. */ krb5_error_code kdc_process_tgs_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_fulladdr *from, krb5_data *pkt, krb5_ticket **ticket_out, krb5_db_entry **krbtgt_ptr, krb5_keyblock **tgskey, krb5_keyblock **subkey, krb5_pa_data **pa_tgs_req) { krb5_pa_data * tmppa; krb5_ap_req * apreq; krb5_error_code retval; krb5_authdata **authdata = NULL; krb5_data scratch1; krb5_data * scratch = NULL; krb5_boolean foreign_server = FALSE; krb5_auth_context auth_context = NULL; krb5_authenticator * authenticator = NULL; krb5_checksum * his_cksum = NULL; krb5_db_entry * krbtgt = NULL; krb5_ticket * ticket; *ticket_out = NULL; *krbtgt_ptr = NULL; *tgskey = NULL; tmppa = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_AP_REQ); if (!tmppa) return KRB5KDC_ERR_PADATA_TYPE_NOSUPP; scratch1.length = tmppa->length; scratch1.data = (char *)tmppa->contents; if ((retval = decode_krb5_ap_req(&scratch1, &apreq))) return retval; ticket = apreq->ticket; if (isflagset(apreq->ap_options, AP_OPTS_USE_SESSION_KEY) || isflagset(apreq->ap_options, AP_OPTS_MUTUAL_REQUIRED)) { krb5_klog_syslog(LOG_INFO, _("TGS_REQ: SESSION KEY or MUTUAL")); retval = KRB5KDC_ERR_POLICY; goto cleanup; } /* If the "server" principal in the ticket is not something in the local realm, then we must refuse to service the request if the client claims to be from the local realm. If we don't do this, then some other realm's nasty KDC can claim to be authenticating a client from our realm, and we'll give out tickets concurring with it! we set a flag here for checking below. */ foreign_server = !is_local_principal(kdc_active_realm, apreq->ticket->server); if ((retval = krb5_auth_con_init(kdc_context, &auth_context))) goto cleanup; /* Don't use a replay cache. */ if ((retval = krb5_auth_con_setflags(kdc_context, auth_context, 0))) goto cleanup; if ((retval = krb5_auth_con_setaddrs(kdc_context, auth_context, NULL, from->address)) ) goto cleanup_auth_context; retval = kdc_rd_ap_req(kdc_active_realm, apreq, auth_context, &krbtgt, tgskey); if (retval) goto cleanup_auth_context; /* "invalid flag" tickets can must be used to validate */ if (isflagset(ticket->enc_part2->flags, TKT_FLG_INVALID) && !isflagset(request->kdc_options, KDC_OPT_VALIDATE)) { retval = KRB5KRB_AP_ERR_TKT_INVALID; goto cleanup_auth_context; } if ((retval = krb5_auth_con_getrecvsubkey(kdc_context, auth_context, subkey))) goto cleanup_auth_context; if ((retval = krb5_auth_con_getauthenticator(kdc_context, auth_context, &authenticator))) goto cleanup_auth_context; retval = krb5_find_authdata(kdc_context, ticket->enc_part2->authorization_data, authenticator->authorization_data, KRB5_AUTHDATA_FX_ARMOR, &authdata); if (retval != 0) goto cleanup_authenticator; if (authdata&& authdata[0]) { k5_setmsg(kdc_context, KRB5KDC_ERR_POLICY, "ticket valid only as FAST armor"); retval = KRB5KDC_ERR_POLICY; krb5_free_authdata(kdc_context, authdata); goto cleanup_authenticator; } krb5_free_authdata(kdc_context, authdata); /* Check for a checksum */ if (!(his_cksum = authenticator->checksum)) { retval = KRB5KRB_AP_ERR_INAPP_CKSUM; goto cleanup_authenticator; } /* make sure the client is of proper lineage (see above) */ if (foreign_server && !krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER)) { if (is_local_principal(kdc_active_realm, ticket->enc_part2->client)) { /* someone in a foreign realm claiming to be local */ krb5_klog_syslog(LOG_INFO, _("PROCESS_TGS: failed lineage check")); retval = KRB5KDC_ERR_POLICY; goto cleanup_authenticator; } } /* * Check application checksum vs. tgs request * * We try checksumming the req-body two different ways: first we * try reaching into the raw asn.1 stream (if available), and * checksum that directly; if that fails, then we try encoding * using our local asn.1 library. */ if (pkt && (fetch_asn1_field((unsigned char *) pkt->data, 1, 4, &scratch1) >= 0)) { if (comp_cksum(kdc_context, &scratch1, ticket, his_cksum)) { if (!(retval = encode_krb5_kdc_req_body(request, &scratch))) retval = comp_cksum(kdc_context, scratch, ticket, his_cksum); krb5_free_data(kdc_context, scratch); if (retval) goto cleanup_authenticator; } } *pa_tgs_req = tmppa; *krbtgt_ptr = krbtgt; krbtgt = NULL; cleanup_authenticator: krb5_free_authenticator(kdc_context, authenticator); cleanup_auth_context: krb5_auth_con_free(kdc_context, auth_context); cleanup: if (retval != 0) { krb5_free_keyblock(kdc_context, *tgskey); *tgskey = NULL; } if (apreq->ticket->enc_part2 != NULL) { /* Steal the decrypted ticket pointer, even on error. */ *ticket_out = apreq->ticket; apreq->ticket = NULL; } krb5_free_ap_req(kdc_context, apreq); krb5_db_free_principal(kdc_context, krbtgt); return retval; } /* * This is a KDC wrapper around krb5_rd_req_decoded_anyflag(). * * We can't depend on KDB-as-keytab for handling the AP-REQ here for * optimization reasons: we want to minimize the number of KDB lookups. We'll * need the KDB entry for the TGS principal, and the TGS key used to decrypt * the TGT, elsewhere in the TGS code. * * This function also implements key rollover support for kvno 0 cross-realm * TGTs issued by AD. */ static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey) { krb5_error_code retval; krb5_enctype search_enctype = apreq->ticket->enc_part.enctype; krb5_boolean match_enctype = 1; krb5_kvno kvno; size_t tries = 3; /* * When we issue tickets we use the first key in the principals' highest * kvno keyset. For non-cross-realm krbtgt principals we want to only * allow the use of the first key of the principal's keyset that matches * the given kvno. */ if (krb5_is_tgs_principal(apreq->ticket->server) && !is_cross_tgs_principal(apreq->ticket->server)) { search_enctype = -1; match_enctype = 0; } retval = kdc_get_server_key(kdc_context, apreq->ticket, KRB5_KDB_FLAG_ALIAS_OK, match_enctype, server, NULL, NULL); if (retval) return retval; *tgskey = NULL; kvno = apreq->ticket->enc_part.kvno; do { krb5_free_keyblock(kdc_context, *tgskey); retval = find_server_key(kdc_context, *server, search_enctype, kvno, tgskey, &kvno); if (retval) continue; /* Make the TGS key available to krb5_rd_req_decoded_anyflag() */ retval = krb5_auth_con_setuseruserkey(kdc_context, auth_context, *tgskey); if (retval) return retval; retval = krb5_rd_req_decoded_anyflag(kdc_context, &auth_context, apreq, apreq->ticket->server, kdc_active_realm->realm_keytab, NULL, NULL); /* If the ticket was decrypted, don't try any more keys. */ if (apreq->ticket->enc_part2 != NULL) break; } while (retval && apreq->ticket->enc_part.kvno == 0 && kvno-- > 1 && --tries > 0); return retval; } /* * The KDC should take the keytab associated with the realm and pass * that to the krb5_rd_req_decoded_anyflag(), but we still need to use * the service (TGS, here) key elsewhere. This approach is faster than * the KDB keytab approach too. * * This is also used by do_tgs_req() for u2u auth. */ krb5_error_code kdc_get_server_key(krb5_context context, krb5_ticket *ticket, unsigned int flags, krb5_boolean match_enctype, krb5_db_entry **server_ptr, krb5_keyblock **key, krb5_kvno *kvno) { krb5_error_code retval; krb5_db_entry * server = NULL; krb5_enctype search_enctype = -1; krb5_kvno search_kvno = -1; if (match_enctype) search_enctype = ticket->enc_part.enctype; if (ticket->enc_part.kvno) search_kvno = ticket->enc_part.kvno; *server_ptr = NULL; retval = krb5_db_get_principal(context, ticket->server, flags, &server); if (retval == KRB5_KDB_NOENTRY) { char *sname; if (!krb5_unparse_name(context, ticket->server, &sname)) { limit_string(sname); krb5_klog_syslog(LOG_ERR, _("TGS_REQ: UNKNOWN SERVER: server='%s'"), sname); free(sname); } return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; } else if (retval) return retval; if (server->attributes & KRB5_KDB_DISALLOW_SVR || server->attributes & KRB5_KDB_DISALLOW_ALL_TIX) { retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } if (key) { retval = find_server_key(context, server, search_enctype, search_kvno, key, kvno); if (retval) goto errout; } *server_ptr = server; server = NULL; return 0; errout: krb5_db_free_principal(context, server); return retval; } /* * A utility function to get the right key from a KDB entry. Used in handling * of kvno 0 TGTs, for example. */ static krb5_error_code find_server_key(krb5_context context, krb5_db_entry *server, krb5_enctype enctype, krb5_kvno kvno, krb5_keyblock **key_out, krb5_kvno *kvno_out) { krb5_error_code retval; krb5_key_data * server_key; krb5_keyblock * key; *key_out = NULL; retval = krb5_dbe_find_enctype(context, server, enctype, -1, kvno ? (krb5_int32)kvno : -1, &server_key); if (retval) return retval; if (!server_key) return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; if ((key = (krb5_keyblock *)malloc(sizeof *key)) == NULL) return ENOMEM; retval = krb5_dbe_decrypt_key_data(context, NULL, server_key, key, NULL); if (retval) goto errout; if (enctype != -1) { krb5_boolean similar; retval = krb5_c_enctype_compare(context, enctype, key->enctype, &similar); if (retval) goto errout; if (!similar) { retval = KRB5_KDB_NO_PERMITTED_KEY; goto errout; } key->enctype = enctype; } *key_out = key; key = NULL; if (kvno_out) *kvno_out = server_key->key_data_kvno; errout: krb5_free_keyblock(context, key); return retval; } /* * If candidate is the local TGT for realm, set *alias_out to candidate and * *storage_out to NULL. Otherwise, load the local TGT into *storage_out and * set *alias_out to *storage_out. * * In the future we might generalize this to a small per-request principal * cache. For now, it saves a load operation in the common case where the AS * server or TGS header ticket server is the local TGT. */ krb5_error_code get_local_tgt(krb5_context context, const krb5_data *realm, krb5_db_entry *candidate, krb5_db_entry **alias_out, krb5_db_entry **storage_out) { krb5_error_code ret; krb5_principal princ; krb5_db_entry *tgt; *alias_out = NULL; *storage_out = NULL; ret = krb5_build_principal_ext(context, &princ, realm->length, realm->data, KRB5_TGS_NAME_SIZE, KRB5_TGS_NAME, realm->length, realm->data, 0); if (ret) return ret; if (!krb5_principal_compare(context, candidate->princ, princ)) { ret = krb5_db_get_principal(context, princ, 0, &tgt); if (!ret) *storage_out = *alias_out = tgt; } else { *alias_out = candidate; } krb5_free_principal(context, princ); return ret; } /* This probably wants to be updated if you support last_req stuff */ static krb5_last_req_entry nolrentry = { KV5M_LAST_REQ_ENTRY, KRB5_LRQ_NONE, 0 }; static krb5_last_req_entry *nolrarray[] = { &nolrentry, 0 }; krb5_error_code fetch_last_req_info(krb5_db_entry *dbentry, krb5_last_req_entry ***lrentry) { *lrentry = nolrarray; return 0; } /* XXX! This is a temporary place-holder */ krb5_error_code check_hot_list(krb5_ticket *ticket) { return 0; } /* Convert an API error code to a protocol error code. */ int errcode_to_protocol(krb5_error_code code) { int protcode; protcode = code - ERROR_TABLE_BASE_krb5; return (protcode >= 0 && protcode <= 128) ? protcode : KRB_ERR_GENERIC; } /* Return -1 if the AS or TGS request is disallowed due to KDC policy on * anonymous tickets. */ int check_anon(kdc_realm_t *kdc_active_realm, krb5_principal client, krb5_principal server) { /* If restrict_anon is set, reject requests from anonymous to principals * other than the local TGT. */ if (kdc_active_realm->realm_restrict_anon && krb5_principal_compare_any_realm(kdc_context, client, krb5_anonymous_principal()) && !krb5_principal_compare(kdc_context, server, tgs_server)) return -1; return 0; } /* * Routines that validate a AS request; checks a lot of things. :-) * * Returns a Kerberos protocol error number, which is _not_ the same * as a com_err error number! */ #define AS_INVALID_OPTIONS (KDC_OPT_FORWARDED | KDC_OPT_PROXY | \ KDC_OPT_VALIDATE | KDC_OPT_RENEW | \ KDC_OPT_ENC_TKT_IN_SKEY | KDC_OPT_CNAME_IN_ADDL_TKT) int validate_as_request(kdc_realm_t *kdc_active_realm, register krb5_kdc_req *request, krb5_db_entry client, krb5_db_entry server, krb5_timestamp kdc_time, const char **status, krb5_pa_data ***e_data) { int errcode; krb5_error_code ret; /* * If an option is set that is only allowed in TGS requests, complain. */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KDC_ERR_BADOPTION; } /* The client must not be expired */ if (client.expiration && ts_after(kdc_time, client.expiration)) { *status = "CLIENT EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_NAME_EXP); } /* The client's password must not be expired, unless the server is a KRB5_KDC_PWCHANGE_SERVICE. */ if (client.pw_expiration && ts_after(kdc_time, client.pw_expiration) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "CLIENT KEY EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_KEY_EXP); } /* The server must not be expired */ if (server.expiration && ts_after(kdc_time, server.expiration)) { *status = "SERVICE EXPIRED"; return(KDC_ERR_SERVICE_EXP); } /* * If the client requires password changing, then only allow the * pwchange service. */ if (isflagset(client.attributes, KRB5_KDB_REQUIRES_PWCHANGE) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "REQUIRED PWCHANGE"; return(KDC_ERR_KEY_EXP); } /* Client and server must allow postdating tickets */ if ((isflagset(request->kdc_options, KDC_OPT_ALLOW_POSTDATE) || isflagset(request->kdc_options, KDC_OPT_POSTDATED)) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_POSTDATED) || isflagset(server.attributes, KRB5_KDB_DISALLOW_POSTDATED))) { *status = "POSTDATE NOT ALLOWED"; return(KDC_ERR_CANNOT_POSTDATE); } /* * A Windows KDC will return KDC_ERR_PREAUTH_REQUIRED instead of * KDC_ERR_POLICY in the following case: * * - KDC_OPT_FORWARDABLE is set in KDCOptions but local * policy has KRB5_KDB_DISALLOW_FORWARDABLE set for the * client, and; * - KRB5_KDB_REQUIRES_PRE_AUTH is set for the client but * preauthentication data is absent in the request. * * Hence, this check most be done after the check for preauth * data, and is now performed by validate_forwardable() (the * contents of which were previously below). */ /* Client and server must allow proxiable tickets */ if (isflagset(request->kdc_options, KDC_OPT_PROXIABLE) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_PROXIABLE) || isflagset(server.attributes, KRB5_KDB_DISALLOW_PROXIABLE))) { *status = "PROXIABLE NOT ALLOWED"; return(KDC_ERR_POLICY); } /* Check to see if client is locked out */ if (isflagset(client.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "CLIENT LOCKED OUT"; return(KDC_ERR_CLIENT_REVOKED); } /* Check to see if server is locked out */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "SERVICE LOCKED OUT"; return(KDC_ERR_S_PRINCIPAL_UNKNOWN); } /* Check to see if server is allowed to be a service */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_SVR)) { *status = "SERVICE NOT ALLOWED"; return(KDC_ERR_MUST_USE_USER2USER); } if (check_anon(kdc_active_realm, client.princ, request->server) != 0) { *status = "ANONYMOUS NOT ALLOWED"; return(KDC_ERR_POLICY); } /* Perform KDB module policy checks. */ ret = krb5_db_check_policy_as(kdc_context, request, &client, &server, kdc_time, status, e_data); if (ret && ret != KRB5_PLUGIN_OP_NOTSUPP) return errcode_to_protocol(ret); /* Check against local policy. */ errcode = against_local_policy_as(request, client, server, kdc_time, status, e_data); if (errcode) return errcode; return 0; } int validate_forwardable(krb5_kdc_req *request, krb5_db_entry client, krb5_db_entry server, krb5_timestamp kdc_time, const char **status) { *status = NULL; if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_FORWARDABLE) || isflagset(server.attributes, KRB5_KDB_DISALLOW_FORWARDABLE))) { *status = "FORWARDABLE NOT ALLOWED"; return(KDC_ERR_POLICY); } else return 0; } /* Return KRB5KDC_ERR_POLICY if indicators does not contain the required auth * indicators for server, ENOMEM on allocation error, 0 otherwise. */ krb5_error_code check_indicators(krb5_context context, krb5_db_entry *server, krb5_data *const *indicators) { krb5_error_code ret; char *str = NULL, *copy = NULL, *save, *ind; ret = krb5_dbe_get_string(context, server, KRB5_KDB_SK_REQUIRE_AUTH, &str); if (ret || str == NULL) goto cleanup; copy = strdup(str); if (copy == NULL) { ret = ENOMEM; goto cleanup; } /* Look for any of the space-separated strings in indicators. */ ind = strtok_r(copy, " ", &save); while (ind != NULL) { if (authind_contains(indicators, ind)) goto cleanup; ind = strtok_r(NULL, " ", &save); } ret = KRB5KDC_ERR_POLICY; k5_setmsg(context, ret, _("Required auth indicators not present in ticket: %s"), str); cleanup: krb5_dbe_free_string(context, str); free(copy); return ret; } #define ASN1_ID_CLASS (0xc0) #define ASN1_ID_TYPE (0x20) #define ASN1_ID_TAG (0x1f) #define ASN1_CLASS_UNIV (0) #define ASN1_CLASS_APP (1) #define ASN1_CLASS_CTX (2) #define ASN1_CLASS_PRIV (3) #define asn1_id_constructed(x) (x & ASN1_ID_TYPE) #define asn1_id_primitive(x) (!asn1_id_constructed(x)) #define asn1_id_class(x) ((x & ASN1_ID_CLASS) >> 6) #define asn1_id_tag(x) (x & ASN1_ID_TAG) /* * asn1length - return encoded length of value. * * passed a pointer into the asn.1 stream, which is updated * to point right after the length bits. * * returns -1 on failure. */ static int asn1length(unsigned char **astream) { int length; /* resulting length */ int sublen; /* sublengths */ int blen; /* bytes of length */ unsigned char *p; /* substring searching */ if (**astream & 0x80) { blen = **astream & 0x7f; if (blen > 3) { return(-1); } for (++*astream, length = 0; blen; ++*astream, blen--) { length = (length << 8) | **astream; } if (length == 0) { /* indefinite length, figure out by hand */ p = *astream; p++; while (1) { /* compute value length. */ if ((sublen = asn1length(&p)) < 0) { return(-1); } p += sublen; /* check for termination */ if ((!*p++) && (!*p)) { p++; break; } } length = p - *astream; } } else { length = **astream; ++*astream; } return(length); } /* * fetch_asn1_field - return raw asn.1 stream of subfield. * * this routine is passed a context-dependent tag number and "level" and returns * the size and length of the corresponding level subfield. * * levels and are numbered starting from 1. * * returns 0 on success, -1 otherwise. */ int fetch_asn1_field(unsigned char *astream, unsigned int level, unsigned int field, krb5_data *data) { unsigned char *estream; /* end of stream */ int classes; /* # classes seen so far this level */ unsigned int levels = 0; /* levels seen so far */ int lastlevel = 1000; /* last level seen */ int length; /* various lengths */ int tag; /* tag number */ unsigned char savelen; /* saved length of our field */ classes = -1; /* we assume that the first identifier/length will tell us how long the entire stream is. */ astream++; estream = astream; if ((length = asn1length(&astream)) < 0) { return(-1); } estream += length; /* search down the stream, checking identifiers. we process identifiers until we hit the "level" we want, and then process that level for our subfield, always making sure we don't go off the end of the stream. */ while (astream < estream) { if (!asn1_id_constructed(*astream)) { return(-1); } if (asn1_id_class(*astream) == ASN1_CLASS_CTX) { if ((tag = (int)asn1_id_tag(*astream)) <= lastlevel) { levels++; classes = -1; } lastlevel = tag; if (levels == level) { /* in our context-dependent class, is this the one we're looking for ? */ if (tag == (int)field) { /* return length and data */ astream++; savelen = *astream; if ((length = asn1length(&astream)) < 0) { return(-1); } data->length = length; /* if the field length is indefinite, we will have to subtract two (terminating octets) from the length returned since we don't want to pass any info from the "wrapper" back. asn1length will always return the *total* length of the field, not just what's contained in it */ if ((savelen & 0xff) == 0x80) { data->length -=2 ; } data->data = (char *)astream; return(0); } else if (tag <= classes) { /* we've seen this class before, something must be wrong */ return(-1); } else { classes = tag; } } } /* if we're not on our level yet, process this value. otherwise skip over it */ astream++; if ((length = asn1length(&astream)) < 0) { return(-1); } if (levels == level) { astream += length; } } return(-1); } /* Return true if we believe server can support enctype as a session key. */ static krb5_boolean dbentry_supports_enctype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, krb5_enctype enctype) { krb5_error_code retval; krb5_key_data *datap; char *etypes_str = NULL; krb5_enctype default_enctypes[1] = { 0 }; krb5_enctype *etypes = NULL; krb5_boolean in_list; /* Look up the supported session key enctypes list in the KDB. */ retval = krb5_dbe_get_string(kdc_context, server, KRB5_KDB_SK_SESSION_ENCTYPES, &etypes_str); if (retval == 0 && etypes_str != NULL && *etypes_str != '\0') { /* Pass a fake profile key for tracing of unrecognized tokens. */ retval = krb5int_parse_enctype_list(kdc_context, "KDB-session_etypes", etypes_str, default_enctypes, &etypes); if (retval == 0 && etypes != NULL && etypes[0]) { in_list = k5_etypes_contains(etypes, enctype); free(etypes_str); free(etypes); return in_list; } /* Fall through on error or empty list */ } free(etypes_str); free(etypes); /* If configured to, assume every server without a session_enctypes * attribute supports DES_CBC_CRC. */ if (kdc_active_realm->realm_assume_des_crc_sess && enctype == ENCTYPE_DES_CBC_CRC) return TRUE; /* Due to an ancient interop problem, assume nothing supports des-cbc-md5 * unless there's a session_enctypes explicitly saying that it does. */ if (enctype == ENCTYPE_DES_CBC_MD5) return FALSE; /* Assume the server supports any enctype it has a long-term key for. */ return !krb5_dbe_find_enctype(kdc_context, server, enctype, -1, 0, &datap); } /* * This function returns the keytype which should be selected for the * session key. It is based on the ordered list which the user * requested, and what the KDC and the application server can support. */ krb5_enctype select_session_keytype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, int nktypes, krb5_enctype *ktype) { int i; for (i = 0; i < nktypes; i++) { if (!krb5_c_valid_enctype(ktype[i])) continue; if (!krb5_is_permitted_enctype(kdc_context, ktype[i])) continue; if (dbentry_supports_enctype(kdc_active_realm, server, ktype[i])) return ktype[i]; } return 0; } /* * Limit strings to a "reasonable" length to prevent crowding out of * other useful information in the log entry */ #define NAME_LENGTH_LIMIT 128 void limit_string(char *name) { int i; if (!name) return; if (strlen(name) < NAME_LENGTH_LIMIT) return; i = NAME_LENGTH_LIMIT-4; name[i++] = '.'; name[i++] = '.'; name[i++] = '.'; name[i] = '\0'; return; } /* * L10_2 = log10(2**x), rounded up; log10(2) ~= 0.301. */ #define L10_2(x) ((int)(((x * 301) + 999) / 1000)) /* * Max length of sprintf("%ld") for an int of type T; includes leading * minus sign and terminating NUL. */ #define D_LEN(t) (L10_2(sizeof(t) * CHAR_BIT) + 2) void ktypes2str(char *s, size_t len, int nktypes, krb5_enctype *ktype) { int i; char stmp[D_LEN(krb5_enctype) + 1]; char *p; if (nktypes < 0 || len < (sizeof(" etypes {...}") + D_LEN(int))) { *s = '\0'; return; } snprintf(s, len, "%d etypes {", nktypes); for (i = 0; i < nktypes; i++) { snprintf(stmp, sizeof(stmp), "%s%ld", i ? " " : "", (long)ktype[i]); if (strlen(s) + strlen(stmp) + sizeof("}") > len) break; strlcat(s, stmp, len); } if (i < nktypes) { /* * We broke out of the loop. Try to truncate the list. */ p = s + strlen(s); while (p - s + sizeof("...}") > len) { while (p > s && *p != ' ' && *p != '{') *p-- = '\0'; if (p > s && *p == ' ') { *p-- = '\0'; continue; } } strlcat(s, "...", len); } strlcat(s, "}", len); return; } void rep_etypes2str(char *s, size_t len, krb5_kdc_rep *rep) { char stmp[sizeof("ses=") + D_LEN(krb5_enctype)]; if (len < (3 * D_LEN(krb5_enctype) + sizeof("etypes {rep= tkt= ses=}"))) { *s = '\0'; return; } snprintf(s, len, "etypes {rep=%ld", (long)rep->enc_part.enctype); if (rep->ticket != NULL) { snprintf(stmp, sizeof(stmp), " tkt=%ld", (long)rep->ticket->enc_part.enctype); strlcat(s, stmp, len); } if (rep->ticket != NULL && rep->ticket->enc_part2 != NULL && rep->ticket->enc_part2->session != NULL) { snprintf(stmp, sizeof(stmp), " ses=%ld", (long)rep->ticket->enc_part2->session->enctype); strlcat(s, stmp, len); } strlcat(s, "}", len); return; } static krb5_error_code verify_for_user_checksum(krb5_context context, krb5_keyblock *key, krb5_pa_for_user *req) { krb5_error_code code; int i; krb5_int32 name_type; char *p; krb5_data data; krb5_boolean valid = FALSE; if (!krb5_c_is_keyed_cksum(req->cksum.checksum_type)) { return KRB5KRB_AP_ERR_INAPP_CKSUM; } /* * Checksum is over name type and string components of * client principal name and auth_package. */ data.length = 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { data.length += krb5_princ_component(context, req->user, i)->length; } data.length += krb5_princ_realm(context, req->user)->length; data.length += req->auth_package.length; p = data.data = malloc(data.length); if (data.data == NULL) { return ENOMEM; } name_type = krb5_princ_type(context, req->user); p[0] = (name_type >> 0 ) & 0xFF; p[1] = (name_type >> 8 ) & 0xFF; p[2] = (name_type >> 16) & 0xFF; p[3] = (name_type >> 24) & 0xFF; p += 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { if (krb5_princ_component(context, req->user, i)->length > 0) { memcpy(p, krb5_princ_component(context, req->user, i)->data, krb5_princ_component(context, req->user, i)->length); } p += krb5_princ_component(context, req->user, i)->length; } if (krb5_princ_realm(context, req->user)->length > 0) { memcpy(p, krb5_princ_realm(context, req->user)->data, krb5_princ_realm(context, req->user)->length); } p += krb5_princ_realm(context, req->user)->length; if (req->auth_package.length > 0) memcpy(p, req->auth_package.data, req->auth_package.length); p += req->auth_package.length; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_APP_DATA_CKSUM, &data, &req->cksum, &valid); if (code == 0 && valid == FALSE) code = KRB5KRB_AP_ERR_MODIFIED; free(data.data); return code; } /* * Legacy protocol transition (Windows 2003 and above) */ static krb5_error_code kdc_process_for_user(kdc_realm_t *kdc_active_realm, krb5_pa_data *pa_data, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_pa_for_user *for_user; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_for_user(&req_data, &for_user); if (code) return code; code = verify_for_user_checksum(kdc_context, tgs_session, for_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_for_user(kdc_context, for_user); return code; } *s4u_x509_user = calloc(1, sizeof(krb5_pa_s4u_x509_user)); if (*s4u_x509_user == NULL) { krb5_free_pa_for_user(kdc_context, for_user); return ENOMEM; } (*s4u_x509_user)->user_id.user = for_user->user; for_user->user = NULL; krb5_free_pa_for_user(kdc_context, for_user); return 0; } static krb5_error_code verify_s4u_x509_user_checksum(krb5_context context, krb5_keyblock *key, krb5_data *req_data, krb5_int32 kdc_req_nonce, krb5_pa_s4u_x509_user *req) { krb5_error_code code; krb5_data scratch; krb5_boolean valid = FALSE; if (enctype_requires_etype_info_2(key->enctype) && !krb5_c_is_keyed_cksum(req->cksum.checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; if (req->user_id.nonce != kdc_req_nonce) return KRB5KRB_AP_ERR_MODIFIED; /* * Verify checksum over the encoded userid. If that fails, * re-encode, and verify that. This is similar to the * behaviour in kdc_process_tgs_req(). */ if (fetch_asn1_field((unsigned char *)req_data->data, 1, 0, &scratch) < 0) return ASN1_PARSE_ERROR; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, &scratch, &req->cksum, &valid); if (code != 0) return code; if (valid == FALSE) { krb5_data *data; code = encode_krb5_s4u_userid(&req->user_id, &data); if (code != 0) return code; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data, &req->cksum, &valid); krb5_free_data(context, data); if (code != 0) return code; } return valid ? 0 : KRB5KRB_AP_ERR_MODIFIED; } /* * New protocol transition request (Windows 2008 and above) */ static krb5_error_code kdc_process_s4u_x509_user(krb5_context context, krb5_kdc_req *request, krb5_pa_data *pa_data, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_s4u_x509_user(&req_data, s4u_x509_user); if (code) return code; code = verify_s4u_x509_user_checksum(context, tgs_subkey ? tgs_subkey : tgs_session, &req_data, request->nonce, *s4u_x509_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return code; } if (krb5_princ_size(context, (*s4u_x509_user)->user_id.user) == 0 || (*s4u_x509_user)->user_id.subject_cert.length != 0) { *status = "INVALID_S4U2SELF_REQUEST"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } return 0; } krb5_error_code kdc_make_s4u2self_rep(krb5_context context, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user *req_s4u_user, krb5_kdc_rep *reply, krb5_enc_kdc_rep_part *reply_encpart) { krb5_error_code code; krb5_data *data = NULL; krb5_pa_s4u_x509_user rep_s4u_user; krb5_pa_data padata; krb5_enctype enctype; krb5_keyusage usage; memset(&rep_s4u_user, 0, sizeof(rep_s4u_user)); rep_s4u_user.user_id.nonce = req_s4u_user->user_id.nonce; rep_s4u_user.user_id.user = req_s4u_user->user_id.user; rep_s4u_user.user_id.options = req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE; code = encode_krb5_s4u_userid(&rep_s4u_user.user_id, &data); if (code != 0) goto cleanup; if (req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY; else usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST; code = krb5_c_make_checksum(context, req_s4u_user->cksum.checksum_type, tgs_subkey != NULL ? tgs_subkey : tgs_session, usage, data, &rep_s4u_user.cksum); if (code != 0) goto cleanup; krb5_free_data(context, data); data = NULL; code = encode_krb5_pa_s4u_x509_user(&rep_s4u_user, &data); if (code != 0) goto cleanup; padata.magic = KV5M_PA_DATA; padata.pa_type = KRB5_PADATA_S4U_X509_USER; padata.length = data->length; padata.contents = (krb5_octet *)data->data; code = add_pa_data_element(context, &padata, &reply->padata, FALSE); if (code != 0) goto cleanup; free(data); data = NULL; if (tgs_subkey != NULL) enctype = tgs_subkey->enctype; else enctype = tgs_session->enctype; /* * Owing to a bug in Windows, unkeyed checksums were used for older * enctypes, including rc4-hmac. A forthcoming workaround for this * includes the checksum bytes in the encrypted padata. */ if ((req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) && enctype_requires_etype_info_2(enctype) == FALSE) { padata.length = req_s4u_user->cksum.length + rep_s4u_user.cksum.length; padata.contents = malloc(padata.length); if (padata.contents == NULL) { code = ENOMEM; goto cleanup; } memcpy(padata.contents, req_s4u_user->cksum.contents, req_s4u_user->cksum.length); memcpy(&padata.contents[req_s4u_user->cksum.length], rep_s4u_user.cksum.contents, rep_s4u_user.cksum.length); code = add_pa_data_element(context,&padata, &reply_encpart->enc_padata, FALSE); if (code != 0) { free(padata.contents); goto cleanup; } } cleanup: if (rep_s4u_user.cksum.contents != NULL) krb5_free_checksum_contents(context, &rep_s4u_user.cksum); krb5_free_data(context, data); return code; } /* * Protocol transition (S4U2Self) */ krb5_error_code kdc_process_s4u2self_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_const_principal client_princ, const krb5_db_entry *server, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_timestamp kdc_time, krb5_pa_s4u_x509_user **s4u_x509_user, krb5_db_entry **princ_ptr, const char **status) { krb5_error_code code; krb5_pa_data *pa_data; int flags; krb5_db_entry *princ; *princ_ptr = NULL; pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_S4U_X509_USER); if (pa_data != NULL) { code = kdc_process_s4u_x509_user(kdc_context, request, pa_data, tgs_subkey, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else { pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER); if (pa_data != NULL) { code = kdc_process_for_user(kdc_active_realm, pa_data, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else return 0; } /* * We need to compare the client name in the TGT with the requested * server name. Supporting server name aliases without assuming a * global name service makes this difficult to do. * * The comparison below handles the following cases (note that the * term "principal name" below excludes the realm). * * (1) The requested service is a host-based service with two name * components, in which case we assume the principal name to * contain sufficient qualifying information. The realm is * ignored for the purpose of comparison. * * (2) The requested service name is an enterprise principal name: * the service principal name is compared with the unparsed * form of the client name (including its realm). * * (3) The requested service is some other name type: an exact * match is required. * * An alternative would be to look up the server once again with * FLAG_CANONICALIZE | FLAG_CLIENT_REFERRALS_ONLY set, do an exact * match between the returned name and client_princ. However, this * assumes that the client set FLAG_CANONICALIZE when requesting * the TGT and that we have a global name service. */ flags = 0; switch (krb5_princ_type(kdc_context, request->server)) { case KRB5_NT_SRV_HST: /* (1) */ if (krb5_princ_size(kdc_context, request->server) == 2) flags |= KRB5_PRINCIPAL_COMPARE_IGNORE_REALM; break; case KRB5_NT_ENTERPRISE_PRINCIPAL: /* (2) */ flags |= KRB5_PRINCIPAL_COMPARE_ENTERPRISE; break; default: /* (3) */ break; } if (!krb5_principal_compare_flags(kdc_context, request->server, client_princ, flags)) { *status = "INVALID_S4U2SELF_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error code */ } /* * Protocol transition is mutually exclusive with renew/forward/etc * as well as user-to-user and constrained delegation. This check * is also made in validate_as_request(). * * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* * Do not attempt to lookup principals in foreign realms. */ if (is_local_principal(kdc_active_realm, (*s4u_x509_user)->user_id.user)) { krb5_db_entry no_server; krb5_pa_data **e_data = NULL; code = krb5_db_get_principal(kdc_context, (*s4u_x509_user)->user_id.user, KRB5_KDB_FLAG_INCLUDE_PAC, &princ); if (code == KRB5_KDB_NOENTRY) { *status = "UNKNOWN_S4U2SELF_PRINCIPAL"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } else if (code) { *status = "LOOKING_UP_S4U2SELF_PRINCIPAL"; return code; /* caller can free for_user */ } memset(&no_server, 0, sizeof(no_server)); code = validate_as_request(kdc_active_realm, request, *princ, no_server, kdc_time, status, &e_data); if (code) { krb5_db_free_principal(kdc_context, princ); krb5_free_pa_data(kdc_context, e_data); return code; } *princ_ptr = princ; } return 0; } static krb5_error_code check_allowed_to_delegate_to(krb5_context context, krb5_const_principal client, const krb5_db_entry *server, krb5_const_principal proxy) { /* Can't get a TGT (otherwise it would be unconstrained delegation) */ if (krb5_is_tgs_principal(proxy)) return KRB5KDC_ERR_POLICY; /* Must be in same realm */ if (!krb5_realm_compare(context, server->princ, proxy)) return KRB5KDC_ERR_POLICY; return krb5_db_check_allowed_to_delegate(context, client, server, proxy); } krb5_error_code kdc_process_s4u2proxy_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_enc_tkt_part *t2enc, const krb5_db_entry *server, krb5_const_principal server_princ, krb5_const_principal proxy_princ, const char **status) { krb5_error_code errcode; /* * Constrained delegation is mutually exclusive with renew/forward/etc. * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & (NON_TGT_OPTION | KDC_OPT_ENC_TKT_IN_SKEY)) { return KRB5KDC_ERR_BADOPTION; } /* Ensure that evidence ticket server matches TGT client */ if (!krb5_principal_compare(kdc_context, server->princ, /* after canon */ server_princ)) { return KRB5KDC_ERR_SERVER_NOMATCH; } if (!isflagset(t2enc->flags, TKT_FLG_FORWARDABLE)) { *status = "EVIDENCE_TKT_NOT_FORWARDABLE"; return KRB5_TKT_NOT_FORWARDABLE; } /* Backend policy check */ errcode = check_allowed_to_delegate_to(kdc_context, t2enc->client, server, proxy_princ); if (errcode) { *status = "NOT_ALLOWED_TO_DELEGATE"; return errcode; } return 0; } krb5_error_code kdc_check_transited_list(kdc_realm_t *kdc_active_realm, const krb5_data *trans, const krb5_data *realm1, const krb5_data *realm2) { krb5_error_code code; /* Check against the KDB module. Treat this answer as authoritative if the * method is supported and doesn't explicitly pass control. */ code = krb5_db_check_transited_realms(kdc_context, trans, realm1, realm2); if (code != KRB5_PLUGIN_OP_NOTSUPP && code != KRB5_PLUGIN_NO_HANDLE) return code; /* Check using krb5.conf [capaths] or hierarchical relationships. */ return krb5_check_transited_list(kdc_context, trans, realm1, realm2); } krb5_error_code validate_transit_path(krb5_context context, krb5_const_principal client, krb5_db_entry *server, krb5_db_entry *header_srv) { /* Incoming */ if (isflagset(server->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE)) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } /* Outgoing */ if (isflagset(header_srv->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE) && (!krb5_principal_compare(context, server->princ, header_srv->princ) || !krb5_realm_compare(context, client, header_srv->princ))) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } return 0; } krb5_boolean enctype_requires_etype_info_2(krb5_enctype enctype) { switch(enctype) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: case ENCTYPE_DES3_CBC_SHA1: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC_EXP : return 0; default: return krb5_c_valid_enctype(enctype); } } /* XXX where are the generic helper routines for this? */ krb5_error_code add_pa_data_element(krb5_context context, krb5_pa_data *padata, krb5_pa_data ***inout_padata, krb5_boolean copy) { int i; krb5_pa_data **p; if (*inout_padata != NULL) { for (i = 0; (*inout_padata)[i] != NULL; i++) ; } else i = 0; p = realloc(*inout_padata, (i + 2) * sizeof(krb5_pa_data *)); if (p == NULL) return ENOMEM; *inout_padata = p; p[i] = (krb5_pa_data *)malloc(sizeof(krb5_pa_data)); if (p[i] == NULL) return ENOMEM; *(p[i]) = *padata; p[i + 1] = NULL; if (copy) { p[i]->contents = (krb5_octet *)malloc(padata->length); if (p[i]->contents == NULL) { free(p[i]); p[i] = NULL; return ENOMEM; } memcpy(p[i]->contents, padata->contents, padata->length); } return 0; } void kdc_get_ticket_endtime(kdc_realm_t *kdc_active_realm, krb5_timestamp starttime, krb5_timestamp endtime, krb5_timestamp till, krb5_db_entry *client, krb5_db_entry *server, krb5_timestamp *out_endtime) { krb5_timestamp until, life; if (till == 0) till = kdc_infinity; until = ts_min(till, endtime); life = ts_delta(until, starttime); if (client != NULL && client->max_life != 0) life = min(life, client->max_life); if (server->max_life != 0) life = min(life, server->max_life); if (kdc_active_realm->realm_maxlife != 0) life = min(life, kdc_active_realm->realm_maxlife); *out_endtime = ts_incr(starttime, life); } /* * Set tkt->renew_till to the requested renewable lifetime as modified by * policy. Set the TKT_FLG_RENEWABLE flag if we set a nonzero renew_till. * client and tgt may be NULL. */ void kdc_get_ticket_renewtime(kdc_realm_t *realm, krb5_kdc_req *request, krb5_enc_tkt_part *tgt, krb5_db_entry *client, krb5_db_entry *server, krb5_enc_tkt_part *tkt) { krb5_timestamp rtime, max_rlife; tkt->times.renew_till = 0; /* Don't issue renewable tickets if the client or server don't allow it, * or if this is a TGS request and the TGT isn't renewable. */ if (server->attributes & KRB5_KDB_DISALLOW_RENEWABLE) return; if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_RENEWABLE)) return; if (tgt != NULL && !(tgt->flags & TKT_FLG_RENEWABLE)) return; /* Determine the requested renewable time. */ if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE)) rtime = request->rtime ? request->rtime : kdc_infinity; else if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE_OK) && ts_after(request->till, tkt->times.endtime)) rtime = request->till; else return; /* Truncate it to the allowable renewable time. */ if (tgt != NULL) rtime = ts_min(rtime, tgt->times.renew_till); max_rlife = min(server->max_renewable_life, realm->realm_maxrlife); if (client != NULL) max_rlife = min(max_rlife, client->max_renewable_life); rtime = ts_min(rtime, ts_incr(tkt->times.starttime, max_rlife)); /* Make the ticket renewable if the truncated requested time is larger than * the ticket end time. */ if (ts_after(rtime, tkt->times.endtime)) { setflag(tkt->flags, TKT_FLG_RENEWABLE); tkt->times.renew_till = rtime; } } /** * Handle protected negotiation of FAST using enc_padata * - If ENCPADATA_REQ_ENC_PA_REP is present, then: * - Return ENCPADATA_REQ_ENC_PA_REP with checksum of AS-REQ from client * - Include PADATA_FX_FAST in the enc_padata to indicate FAST * @pre @c out_enc_padata has space for at least two more padata * @param index in/out index into @c out_enc_padata for next item */ krb5_error_code kdc_handle_protected_negotiation(krb5_context context, krb5_data *req_pkt, krb5_kdc_req *request, const krb5_keyblock *reply_key, krb5_pa_data ***out_enc_padata) { krb5_error_code retval = 0; krb5_checksum checksum; krb5_data *out = NULL; krb5_pa_data pa, *pa_in; pa_in = krb5int_find_pa_data(context, request->padata, KRB5_ENCPADATA_REQ_ENC_PA_REP); if (pa_in == NULL) return 0; pa.magic = KV5M_PA_DATA; pa.pa_type = KRB5_ENCPADATA_REQ_ENC_PA_REP; memset(&checksum, 0, sizeof(checksum)); retval = krb5_c_make_checksum(context,0, reply_key, KRB5_KEYUSAGE_AS_REQ, req_pkt, &checksum); if (retval != 0) goto cleanup; retval = encode_krb5_checksum(&checksum, &out); if (retval != 0) goto cleanup; pa.contents = (krb5_octet *) out->data; pa.length = out->length; retval = add_pa_data_element(context, &pa, out_enc_padata, FALSE); if (retval) goto cleanup; out->data = NULL; pa.magic = KV5M_PA_DATA; pa.pa_type = KRB5_PADATA_FX_FAST; pa.length = 0; pa.contents = NULL; retval = add_pa_data_element(context, &pa, out_enc_padata, FALSE); cleanup: if (checksum.contents) krb5_free_checksum_contents(context, &checksum); if (out != NULL) krb5_free_data(context, out); return retval; } /* * Although the KDC doesn't call this function directly, * process_tcp_connection_read() in net-server.c does call it. */ krb5_error_code make_toolong_error (void *handle, krb5_data **out) { krb5_error errpkt; krb5_error_code retval; krb5_data *scratch; struct server_handle *h = handle; retval = krb5_us_timeofday(h->kdc_err_context, &errpkt.stime, &errpkt.susec); if (retval) return retval; errpkt.error = KRB_ERR_FIELD_TOOLONG; errpkt.server = h->kdc_realmlist[0]->realm_tgsprinc; errpkt.client = NULL; errpkt.cusec = 0; errpkt.ctime = 0; errpkt.text.length = 0; errpkt.text.data = 0; errpkt.e_data.length = 0; errpkt.e_data.data = 0; scratch = malloc(sizeof(*scratch)); if (scratch == NULL) return ENOMEM; retval = krb5_mk_error(h->kdc_err_context, &errpkt, scratch); if (retval) { free(scratch); return retval; } *out = scratch; return 0; } void reset_for_hangup(void *ctx) { int k; struct server_handle *h = ctx; for (k = 0; k < h->kdc_numrealms; k++) krb5_db_refresh_config(h->kdc_realmlist[k]->realm_context); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2571_2
crossvul-cpp_data_bad_3397_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M PPPP CCCC % % MM MM P P C % % M M M PPPP C % % M M P C % % M M P CCCC % % % % % % Read/Write Magick Persistant Cache Image Format % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/constitute.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" #include "MagickCore/version-private.h" /* Forward declarations. */ static MagickBooleanType WriteMPCImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M P C % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMPC() returns MagickTrue if the image format type, identified by the % magick string, is an Magick Persistent Cache image. % % The format of the IsMPC method is: % % MagickBooleanType IsMPC(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsMPC(const unsigned char *magick,const size_t length) { if (length < 14) return(MagickFalse); if (LocaleNCompare((const char *) magick,"id=MagickCache",14) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d C A C H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadMPCImage() reads an Magick Persistent Cache image file and returns % it. It allocates the memory necessary for the new Image structure and % returns a pointer to the new image. % % The format of the ReadMPCImage method is: % % Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception) % % Decompression code contributed by Kyle Shorter. % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception) { char cache_filename[MagickPathExtent], id[MagickPathExtent], keyword[MagickPathExtent], *options; const unsigned char *p; GeometryInfo geometry_info; Image *image; int c; LinkedListInfo *profiles; MagickBooleanType status; MagickOffsetType offset; MagickStatusType flags; register ssize_t i; size_t depth, length; ssize_t count; StringInfo *profile; unsigned int signature; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) CopyMagickString(cache_filename,image->filename,MagickPathExtent); AppendImageFormat("cache",cache_filename); c=ReadBlobByte(image); if (c == EOF) { image=DestroyImage(image); return((Image *) NULL); } *id='\0'; (void) ResetMagickMemory(keyword,0,sizeof(keyword)); offset=0; do { /* Decode image header; header terminates one character beyond a ':'. */ profiles=(LinkedListInfo *) NULL; length=MagickPathExtent; options=AcquireString((char *) NULL); signature=GetMagickSignature((const StringInfo *) NULL); image->depth=8; image->compression=NoCompression; while ((isgraph(c) != MagickFalse) && (c != (int) ':')) { register char *p; if (c == (int) '{') { char *comment; /* Read comment-- any text between { }. */ length=MagickPathExtent; comment=AcquireString((char *) NULL); for (p=comment; comment != (char *) NULL; p++) { c=ReadBlobByte(image); if (c == (int) '\\') c=ReadBlobByte(image); else if ((c == EOF) || (c == (int) '}')) break; if ((size_t) (p-comment+1) >= length) { *p='\0'; length<<=1; comment=(char *) ResizeQuantumMemory(comment,length+ MagickPathExtent,sizeof(*comment)); if (comment == (char *) NULL) break; p=comment+strlen(comment); } *p=(char) c; } if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); *p='\0'; (void) SetImageProperty(image,"comment",comment,exception); comment=DestroyString(comment); c=ReadBlobByte(image); } else if (isalnum(c) != MagickFalse) { /* Get the keyword. */ length=MagickPathExtent; p=keyword; do { if (c == (int) '=') break; if ((size_t) (p-keyword) < (MagickPathExtent-1)) *p++=(char) c; c=ReadBlobByte(image); } while (c != EOF); *p='\0'; p=options; while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); if (c == (int) '=') { /* Get the keyword value. */ c=ReadBlobByte(image); while ((c != (int) '}') && (c != EOF)) { if ((size_t) (p-options+1) >= length) { *p='\0'; length<<=1; options=(char *) ResizeQuantumMemory(options,length+ MagickPathExtent,sizeof(*options)); if (options == (char *) NULL) break; p=options+strlen(options); } *p++=(char) c; c=ReadBlobByte(image); if (c == '\\') { c=ReadBlobByte(image); if (c == (int) '}') { *p++=(char) c; c=ReadBlobByte(image); } } if (*options != '{') if (isspace((int) ((unsigned char) c)) != 0) break; } if (options == (char *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } *p='\0'; if (*options == '{') (void) CopyMagickString(options,options+1,strlen(options)); /* Assign a value to the specified keyword. */ switch (*keyword) { case 'a': case 'A': { if (LocaleCompare(keyword,"alpha-trait") == 0) { ssize_t alpha_trait; alpha_trait=ParseCommandOption(MagickPixelTraitOptions, MagickFalse,options); if (alpha_trait < 0) break; image->alpha_trait=(PixelTrait) alpha_trait; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'b': case 'B': { if (LocaleCompare(keyword,"background-color") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->background_color,exception); break; } if (LocaleCompare(keyword,"blue-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y= image->chromaticity.blue_primary.x; break; } if (LocaleCompare(keyword,"border-color") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->border_color,exception); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'c': case 'C': { if (LocaleCompare(keyword,"class") == 0) { ssize_t storage_class; storage_class=ParseCommandOption(MagickClassOptions, MagickFalse,options); if (storage_class < 0) break; image->storage_class=(ClassType) storage_class; break; } if (LocaleCompare(keyword,"colors") == 0) { image->colors=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"colorspace") == 0) { ssize_t colorspace; colorspace=ParseCommandOption(MagickColorspaceOptions, MagickFalse,options); if (colorspace < 0) break; image->colorspace=(ColorspaceType) colorspace; break; } if (LocaleCompare(keyword,"compression") == 0) { ssize_t compression; compression=ParseCommandOption(MagickCompressOptions, MagickFalse,options); if (compression < 0) break; image->compression=(CompressionType) compression; break; } if (LocaleCompare(keyword,"columns") == 0) { image->columns=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'd': case 'D': { if (LocaleCompare(keyword,"delay") == 0) { image->delay=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"depth") == 0) { image->depth=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"dispose") == 0) { ssize_t dispose; dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, options); if (dispose < 0) break; image->dispose=(DisposeType) dispose; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'e': case 'E': { if (LocaleCompare(keyword,"endian") == 0) { ssize_t endian; endian=ParseCommandOption(MagickEndianOptions,MagickFalse, options); if (endian < 0) break; image->endian=(EndianType) endian; break; } if (LocaleCompare(keyword,"error") == 0) { image->error.mean_error_per_pixel=StringToDouble(options, (char **) NULL); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'g': case 'G': { if (LocaleCompare(keyword,"gamma") == 0) { image->gamma=StringToDouble(options,(char **) NULL); break; } if (LocaleCompare(keyword,"green-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y= image->chromaticity.green_primary.x; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'i': case 'I': { if (LocaleCompare(keyword,"id") == 0) { (void) CopyMagickString(id,options,MagickPathExtent); break; } if (LocaleCompare(keyword,"iterations") == 0) { image->iterations=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'm': case 'M': { if (LocaleCompare(keyword,"magick-signature") == 0) { signature=(unsigned int) StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"mattecolor") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->matte_color,exception); break; } if (LocaleCompare(keyword,"maximum-error") == 0) { image->error.normalized_maximum_error=StringToDouble( options,(char **) NULL); break; } if (LocaleCompare(keyword,"mean-error") == 0) { image->error.normalized_mean_error=StringToDouble(options, (char **) NULL); break; } if (LocaleCompare(keyword,"montage") == 0) { (void) CloneString(&image->montage,options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'n': case 'N': { if (LocaleCompare(keyword,"number-channels") == 0) { image->number_channels=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"number-meta-channels") == 0) { image->number_meta_channels=StringToUnsignedLong(options); break; } break; } case 'o': case 'O': { if (LocaleCompare(keyword,"orientation") == 0) { ssize_t orientation; orientation=ParseCommandOption(MagickOrientationOptions, MagickFalse,options); if (orientation < 0) break; image->orientation=(OrientationType) orientation; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'p': case 'P': { if (LocaleCompare(keyword,"page") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); break; } if (LocaleCompare(keyword,"pixel-intensity") == 0) { ssize_t intensity; intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,options); if (intensity < 0) break; image->intensity=(PixelIntensityMethod) intensity; break; } if ((LocaleNCompare(keyword,"profile:",8) == 0) || (LocaleNCompare(keyword,"profile-",8) == 0)) { if (profiles == (LinkedListInfo *) NULL) profiles=NewLinkedList(0); (void) AppendValueToLinkedList(profiles, AcquireString(keyword+8)); profile=BlobToStringInfo((const void *) NULL,(size_t) StringToLong(options)); if (profile == (StringInfo *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); (void) SetImageProfile(image,keyword+8,profile,exception); profile=DestroyStringInfo(profile); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'q': case 'Q': { if (LocaleCompare(keyword,"quality") == 0) { image->quality=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'r': case 'R': { if (LocaleCompare(keyword,"red-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; if ((flags & SigmaValue) != 0) image->chromaticity.red_primary.y=geometry_info.sigma; break; } if (LocaleCompare(keyword,"rendering-intent") == 0) { ssize_t rendering_intent; rendering_intent=ParseCommandOption(MagickIntentOptions, MagickFalse,options); if (rendering_intent < 0) break; image->rendering_intent=(RenderingIntent) rendering_intent; break; } if (LocaleCompare(keyword,"resolution") == 0) { flags=ParseGeometry(options,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; break; } if (LocaleCompare(keyword,"rows") == 0) { image->rows=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 's': case 'S': { if (LocaleCompare(keyword,"scene") == 0) { image->scene=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 't': case 'T': { if (LocaleCompare(keyword,"ticks-per-second") == 0) { image->ticks_per_second=(ssize_t) StringToLong(options); break; } if (LocaleCompare(keyword,"tile-offset") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } if (LocaleCompare(keyword,"type") == 0) { ssize_t type; type=ParseCommandOption(MagickTypeOptions,MagickFalse, options); if (type < 0) break; image->type=(ImageType) type; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'u': case 'U': { if (LocaleCompare(keyword,"units") == 0) { ssize_t units; units=ParseCommandOption(MagickResolutionOptions, MagickFalse,options); if (units < 0) break; image->units=(ResolutionType) units; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'w': case 'W': { if (LocaleCompare(keyword,"white-point") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y= image->chromaticity.white_point.x; break; } (void) SetImageProperty(image,keyword,options,exception); break; } default: { (void) SetImageProperty(image,keyword,options,exception); break; } } } else c=ReadBlobByte(image); while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); } options=DestroyString(options); (void) ReadBlobByte(image); /* Verify that required image information is defined. */ if ((LocaleCompare(id,"MagickCache") != 0) || (image->storage_class == UndefinedClass) || (image->compression == UndefinedCompression) || (image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (signature != GetMagickSignature((const StringInfo *) NULL)) ThrowReaderException(CacheError,"IncompatibleAPI"); if (image->montage != (char *) NULL) { register char *p; /* Image directory. */ length=MagickPathExtent; image->directory=AcquireString((char *) NULL); p=image->directory; do { *p='\0'; if ((strlen(image->directory)+MagickPathExtent) >= length) { /* Allocate more memory for the image directory. */ length<<=1; image->directory=(char *) ResizeQuantumMemory(image->directory, length+MagickPathExtent,sizeof(*image->directory)); if (image->directory == (char *) NULL) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); p=image->directory+strlen(image->directory); } c=ReadBlobByte(image); *p++=(char) c; } while (c != (int) '\0'); } if (profiles != (LinkedListInfo *) NULL) { const char *name; const StringInfo *profile; register unsigned char *p; /* Read image profiles. */ ResetLinkedListIterator(profiles); name=(const char *) GetNextValueInLinkedList(profiles); while (name != (const char *) NULL) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { p=GetStringInfoDatum(profile); count=ReadBlob(image,GetStringInfoLength(profile),p); } name=(const char *) GetNextValueInLinkedList(profiles); } profiles=DestroyLinkedList(profiles,RelinquishMagickMemory); } depth=GetImageQuantumDepth(image,MagickFalse); if (image->storage_class == PseudoClass) { /* Create image colormap. */ image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1, sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->colors != 0) { size_t packet_size; unsigned char *colormap; /* Read image colormap from file. */ packet_size=(size_t) (3UL*depth/8UL); colormap=(unsigned char *) AcquireQuantumMemory(image->colors, packet_size*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,packet_size*image->colors,colormap); if (count != (ssize_t) (packet_size*image->colors)) { colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } p=colormap; switch (depth) { default: colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowReaderException(CorruptImageError, "ImageDepthNotSupported"); case 8: { unsigned char pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushCharPixel(p,&pixel); image->colormap[i].red=ScaleCharToQuantum(pixel); p=PushCharPixel(p,&pixel); image->colormap[i].green=ScaleCharToQuantum(pixel); p=PushCharPixel(p,&pixel); image->colormap[i].blue=ScaleCharToQuantum(pixel); } break; } case 16: { unsigned short pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].red=ScaleShortToQuantum(pixel); p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].green=ScaleShortToQuantum(pixel); p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].blue=ScaleShortToQuantum(pixel); } break; } case 32: { unsigned int pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].red=ScaleLongToQuantum(pixel); p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].green=ScaleLongToQuantum(pixel); p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].blue=ScaleLongToQuantum(pixel); } break; } } colormap=(unsigned char *) RelinquishMagickMemory(colormap); } } if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit"); /* Attach persistent pixel cache. */ status=PersistPixelCache(image,cache_filename,MagickTrue,&offset,exception); if (status == MagickFalse) ThrowReaderException(CacheError,"UnableToPersistPixelCache"); /* Proceed to next image. */ do { c=ReadBlobByte(image); } while ((isgraph(c) == MagickFalse) && (c != EOF)); if (c != EOF) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (c != EOF); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterMPCImage() adds properties for the Cache image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterMPCImage method is: % % size_t RegisterMPCImage(void) % */ ModuleExport size_t RegisterMPCImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("MPC","CACHE", "Magick Persistent Cache image format"); entry->flags|=CoderStealthFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("MPC","MPC","Magick Persistent Cache image format"); entry->decoder=(DecodeImageHandler *) ReadMPCImage; entry->encoder=(EncodeImageHandler *) WriteMPCImage; entry->magick=(IsImageFormatHandler *) IsMPC; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterMPCImage() removes format registrations made by the % MPC module from the list of supported formats. % % The format of the UnregisterMPCImage method is: % % UnregisterMPCImage(void) % */ ModuleExport void UnregisterMPCImage(void) { (void) UnregisterMagickInfo("CACHE"); (void) UnregisterMagickInfo("MPC"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteMPCImage() writes an Magick Persistent Cache image to a file. % % The format of the WriteMPCImage method is: % % MagickBooleanType WriteMPCImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteMPCImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { char buffer[MagickPathExtent], cache_filename[MagickPathExtent]; const char *property, *value; MagickBooleanType status; MagickOffsetType offset, scene; register ssize_t i; size_t depth; /* Open persistent cache. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) CopyMagickString(cache_filename,image->filename,MagickPathExtent); AppendImageFormat("cache",cache_filename); scene=0; offset=0; do { /* Write persistent cache meta-information. */ depth=GetImageQuantumDepth(image,MagickTrue); if ((image->storage_class == PseudoClass) && (image->colors > (size_t) (GetQuantumRange(image->depth)+1))) (void) SetImageStorageClass(image,DirectClass,exception); (void) WriteBlobString(image,"id=MagickCache\n"); (void) FormatLocaleString(buffer,MagickPathExtent,"magick-signature=%u\n", GetMagickSignature((const StringInfo *) NULL)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "class=%s colors=%.20g alpha-trait=%s\n",CommandOptionToMnemonic( MagickClassOptions,image->storage_class),(double) image->colors, CommandOptionToMnemonic(MagickPixelTraitOptions,(ssize_t) image->alpha_trait)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "number-channels=%.20g number-meta-channels=%.20g\n", (double) image->number_channels,(double) image->number_meta_channels); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "columns=%.20g rows=%.20g depth=%.20g\n",(double) image->columns, (double) image->rows,(double) image->depth); (void) WriteBlobString(image,buffer); if (image->type != UndefinedType) { (void) FormatLocaleString(buffer,MagickPathExtent,"type=%s\n", CommandOptionToMnemonic(MagickTypeOptions,image->type)); (void) WriteBlobString(image,buffer); } (void) FormatLocaleString(buffer,MagickPathExtent,"colorspace=%s\n", CommandOptionToMnemonic(MagickColorspaceOptions,image->colorspace)); (void) WriteBlobString(image,buffer); if (image->intensity != UndefinedPixelIntensityMethod) { (void) FormatLocaleString(buffer,MagickPathExtent, "pixel-intensity=%s\n",CommandOptionToMnemonic( MagickPixelIntensityOptions,image->intensity)); (void) WriteBlobString(image,buffer); } if (image->endian != UndefinedEndian) { (void) FormatLocaleString(buffer,MagickPathExtent,"endian=%s\n", CommandOptionToMnemonic(MagickEndianOptions,image->endian)); (void) WriteBlobString(image,buffer); } if (image->compression != UndefinedCompression) { (void) FormatLocaleString(buffer,MagickPathExtent, "compression=%s quality=%.20g\n",CommandOptionToMnemonic( MagickCompressOptions,image->compression),(double) image->quality); (void) WriteBlobString(image,buffer); } if (image->units != UndefinedResolution) { (void) FormatLocaleString(buffer,MagickPathExtent,"units=%s\n", CommandOptionToMnemonic(MagickResolutionOptions,image->units)); (void) WriteBlobString(image,buffer); } if ((image->resolution.x != 0) || (image->resolution.y != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent, "resolution=%gx%g\n",image->resolution.x,image->resolution.y); (void) WriteBlobString(image,buffer); } if ((image->page.width != 0) || (image->page.height != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent, "page=%.20gx%.20g%+.20g%+.20g\n",(double) image->page.width,(double) image->page.height,(double) image->page.x,(double) image->page.y); (void) WriteBlobString(image,buffer); } else if ((image->page.x != 0) || (image->page.y != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent,"page=%+ld%+ld\n", (long) image->page.x,(long) image->page.y); (void) WriteBlobString(image,buffer); } if ((image->tile_offset.x != 0) || (image->tile_offset.y != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent, "tile-offset=%+ld%+ld\n",(long) image->tile_offset.x,(long) image->tile_offset.y); (void) WriteBlobString(image,buffer); } if ((GetNextImageInList(image) != (Image *) NULL) || (GetPreviousImageInList(image) != (Image *) NULL)) { if (image->scene == 0) (void) FormatLocaleString(buffer,MagickPathExtent, "iterations=%.20g delay=%.20g ticks-per-second=%.20g\n",(double) image->iterations,(double) image->delay,(double) image->ticks_per_second); else (void) FormatLocaleString(buffer,MagickPathExtent,"scene=%.20g " "iterations=%.20g delay=%.20g ticks-per-second=%.20g\n", (double) image->scene,(double) image->iterations,(double) image->delay,(double) image->ticks_per_second); (void) WriteBlobString(image,buffer); } else { if (image->scene != 0) { (void) FormatLocaleString(buffer,MagickPathExtent,"scene=%.20g\n", (double) image->scene); (void) WriteBlobString(image,buffer); } if (image->iterations != 0) { (void) FormatLocaleString(buffer,MagickPathExtent, "iterations=%.20g\n",(double) image->iterations); (void) WriteBlobString(image,buffer); } if (image->delay != 0) { (void) FormatLocaleString(buffer,MagickPathExtent,"delay=%.20g\n", (double) image->delay); (void) WriteBlobString(image,buffer); } if (image->ticks_per_second != UndefinedTicksPerSecond) { (void) FormatLocaleString(buffer,MagickPathExtent, "ticks-per-second=%.20g\n",(double) image->ticks_per_second); (void) WriteBlobString(image,buffer); } } if (image->gravity != UndefinedGravity) { (void) FormatLocaleString(buffer,MagickPathExtent,"gravity=%s\n", CommandOptionToMnemonic(MagickGravityOptions,image->gravity)); (void) WriteBlobString(image,buffer); } if (image->dispose != UndefinedDispose) { (void) FormatLocaleString(buffer,MagickPathExtent,"dispose=%s\n", CommandOptionToMnemonic(MagickDisposeOptions,image->dispose)); (void) WriteBlobString(image,buffer); } if (image->rendering_intent != UndefinedIntent) { (void) FormatLocaleString(buffer,MagickPathExtent, "rendering-intent=%s\n",CommandOptionToMnemonic(MagickIntentOptions, image->rendering_intent)); (void) WriteBlobString(image,buffer); } if (image->gamma != 0.0) { (void) FormatLocaleString(buffer,MagickPathExtent,"gamma=%g\n", image->gamma); (void) WriteBlobString(image,buffer); } if (image->chromaticity.white_point.x != 0.0) { /* Note chomaticity points. */ (void) FormatLocaleString(buffer,MagickPathExtent,"red-primary=" "%g,%g green-primary=%g,%g blue-primary=%g,%g\n", image->chromaticity.red_primary.x,image->chromaticity.red_primary.y, image->chromaticity.green_primary.x, image->chromaticity.green_primary.y, image->chromaticity.blue_primary.x, image->chromaticity.blue_primary.y); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "white-point=%g,%g\n",image->chromaticity.white_point.x, image->chromaticity.white_point.y); (void) WriteBlobString(image,buffer); } if (image->orientation != UndefinedOrientation) { (void) FormatLocaleString(buffer,MagickPathExtent, "orientation=%s\n",CommandOptionToMnemonic(MagickOrientationOptions, image->orientation)); (void) WriteBlobString(image,buffer); } if (image->profiles != (void *) NULL) { const char *name; const StringInfo *profile; /* Generic profile. */ ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { (void) FormatLocaleString(buffer,MagickPathExtent, "profile:%s=%.20g\n",name,(double) GetStringInfoLength(profile)); (void) WriteBlobString(image,buffer); } name=GetNextImageProfile(image); } } if (image->montage != (char *) NULL) { (void) FormatLocaleString(buffer,MagickPathExtent,"montage=%s\n", image->montage); (void) WriteBlobString(image,buffer); } ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { (void) FormatLocaleString(buffer,MagickPathExtent,"%s=",property); (void) WriteBlobString(image,buffer); value=GetImageProperty(image,property,exception); if (value != (const char *) NULL) { size_t length; length=strlen(value); for (i=0; i < (ssize_t) length; i++) if (isspace((int) ((unsigned char) value[i])) != 0) break; if ((i == (ssize_t) length) && (i != 0)) (void) WriteBlob(image,length,(const unsigned char *) value); else { (void) WriteBlobByte(image,'{'); if (strchr(value,'}') == (char *) NULL) (void) WriteBlob(image,length,(const unsigned char *) value); else for (i=0; i < (ssize_t) length; i++) { if (value[i] == (int) '}') (void) WriteBlobByte(image,'\\'); (void) WriteBlobByte(image,value[i]); } (void) WriteBlobByte(image,'}'); } } (void) WriteBlobByte(image,'\n'); property=GetNextImageProperty(image); } (void) WriteBlobString(image,"\f\n:\032"); if (image->montage != (char *) NULL) { /* Write montage tile directory. */ if (image->directory != (char *) NULL) (void) WriteBlobString(image,image->directory); (void) WriteBlobByte(image,'\0'); } if (image->profiles != 0) { const char *name; const StringInfo *profile; /* Write image profiles. */ ResetImageProfileIterator(image); name=GetNextImageProfile(image); while (name != (const char *) NULL) { profile=GetImageProfile(image,name); (void) WriteBlob(image,GetStringInfoLength(profile), GetStringInfoDatum(profile)); name=GetNextImageProfile(image); } } if (image->storage_class == PseudoClass) { size_t packet_size; unsigned char *colormap, *q; /* Allocate colormap. */ packet_size=(size_t) (3UL*depth/8UL); colormap=(unsigned char *) AcquireQuantumMemory(image->colors, packet_size*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) return(MagickFalse); /* Write colormap to file. */ q=colormap; for (i=0; i < (ssize_t) image->colors; i++) { switch (depth) { default: ThrowWriterException(CorruptImageError,"ImageDepthNotSupported"); case 32: { unsigned int pixel; pixel=ScaleQuantumToLong(image->colormap[i].red); q=PopLongPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToLong(image->colormap[i].green); q=PopLongPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToLong(image->colormap[i].blue); q=PopLongPixel(MSBEndian,pixel,q); break; } case 16: { unsigned short pixel; pixel=ScaleQuantumToShort(image->colormap[i].red); q=PopShortPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToShort(image->colormap[i].green); q=PopShortPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToShort(image->colormap[i].blue); q=PopShortPixel(MSBEndian,pixel,q); break; } case 8: { unsigned char pixel; pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].red); q=PopCharPixel(pixel,q); pixel=(unsigned char) ScaleQuantumToChar( image->colormap[i].green); q=PopCharPixel(pixel,q); pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].blue); q=PopCharPixel(pixel,q); break; } } } (void) WriteBlob(image,packet_size*image->colors,colormap); colormap=(unsigned char *) RelinquishMagickMemory(colormap); } /* Initialize persistent pixel cache. */ status=PersistPixelCache(image,cache_filename,MagickFalse,&offset, exception); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { status=image->progress_monitor(SaveImagesTag,scene, GetImageListLength(image),image->client_data); if (status == MagickFalse) break; } scene++; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_3397_0
crossvul-cpp_data_bad_1770_4
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_SONMP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <assert.h> static struct sonmp_chassis sonmp_chassis_types[] = { {1, "unknown (via SONMP)"}, {2, "Nortel 3000"}, {3, "Nortel 3030"}, {4, "Nortel 2310"}, {5, "Nortel 2810"}, {6, "Nortel 2912"}, {7, "Nortel 2914"}, {8, "Nortel 271x"}, {9, "Nortel 2813"}, {10, "Nortel 2814"}, {11, "Nortel 2915"}, {12, "Nortel 5000"}, {13, "Nortel 2813SA"}, {14, "Nortel 2814SA"}, {15, "Nortel 810M"}, {16, "Nortel EtherCell"}, {17, "Nortel 5005"}, {18, "Alcatel Ethernet workgroup conc."}, {20, "Nortel 2715SA"}, {21, "Nortel 2486"}, {22, "Nortel 28000 series"}, {23, "Nortel 23000 series"}, {24, "Nortel 5DN00x series"}, {25, "BayStack Ethernet"}, {26, "Nortel 23100 series"}, {27, "Nortel 100Base-T Hub"}, {28, "Nortel 3000 Fast Ethernet"}, {29, "Nortel Orion switch"}, {30, "unknown"}, {31, "Nortel DDS "}, {32, "Nortel Centillion"}, {33, "Nortel Centillion"}, {34, "Nortel Centillion"}, {35, "BayStack 301"}, {36, "BayStack TokenRing Hub"}, {37, "Nortel FVC Multimedia Switch"}, {38, "Nortel Switch Node"}, {39, "BayStack 302 Switch"}, {40, "BayStack 350 Switch"}, {41, "BayStack 150 Ethernet Hub"}, {42, "Nortel Centillion 50N switch"}, {43, "Nortel Centillion 50T switch"}, {44, "BayStack 303 and 304 Switches"}, {45, "BayStack 200 Ethernet Hub"}, {46, "BayStack 250 10/100 Ethernet Hub"}, {48, "BayStack 450 10/100/1000 Switches"}, {49, "BayStack 410 10/100 Switches"}, {50, "Nortel Ethernet Routing 1200 L3 Switch"}, {51, "Nortel Ethernet Routing 1250 L3 Switch"}, {52, "Nortel Ethernet Routing 1100 L3 Switch"}, {53, "Nortel Ethernet Routing 1150 L3 Switch"}, {54, "Nortel Ethernet Routing 1050 L3 Switch"}, {55, "Nortel Ethernet Routing 1051 L3 Switch"}, {56, "Nortel Ethernet Routing 8610 L3 Switch"}, {57, "Nortel Ethernet Routing 8606 L3 Switch"}, {58, "Nortel Ethernet Routing Switch 8010"}, {59, "Nortel Ethernet Routing Switch 8006"}, {60, "BayStack 670 wireless access point"}, {61, "Nortel Ethernet Routing Switch 740 "}, {62, "Nortel Ethernet Routing Switch 750 "}, {63, "Nortel Ethernet Routing Switch 790"}, {64, "Nortel Business Policy Switch 2000 10/100 Switches"}, {65, "Nortel Ethernet Routing 8110 L2 Switch"}, {66, "Nortel Ethernet Routing 8106 L2 Switch"}, {67, "BayStack 3580 Gig Switch"}, {68, "BayStack 10 Power Supply Unit"}, {69, "BayStack 420 10/100 Switch"}, {70, "OPTera Metro 1200 Ethernet Service Module"}, {71, "Nortel Ethernet Routing Switch 8010co"}, {72, "Nortel Ethernet Routing 8610co L3 switch"}, {73, "Nortel Ethernet Routing 8110co L2 switch"}, {74, "Nortel Ethernet Routing 8003"}, {75, "Nortel Ethernet Routing 8603 L3 switch"}, {76, "Nortel Ethernet Routing 8103 L2 switch"}, {77, "BayStack 380 10/100/1000 Switch"}, {78, "Nortel Ethernet Switch 470-48T"}, {79, "OPTera Metro 1450 Ethernet Service Module"}, {80, "OPTera Metro 1400 Ethernet Service Module"}, {81, "Alteon Switch Family"}, {82, "Ethernet Switch 460-24T-PWR"}, {83, "OPTera Metro 8010 OPM L2 Switch"}, {84, "OPTera Metro 8010co OPM L2 Switch"}, {85, "OPTera Metro 8006 OPM L2 Switch"}, {86, "OPTera Metro 8003 OPM L2 Switch"}, {87, "Alteon 180e"}, {88, "Alteon AD3"}, {89, "Alteon 184"}, {90, "Alteon AD4"}, {91, "Nortel Ethernet Routing 1424 L3 switch"}, {92, "Nortel Ethernet Routing 1648 L3 switch"}, {93, "Nortel Ethernet Routing 1612 L3 switch"}, {94, "Nortel Ethernet Routing 1624 L3 switch "}, {95, "BayStack 380-24F Fiber 1000 Switch"}, {96, "Nortel Ethernet Routing Switch 5510-24T"}, {97, "Nortel Ethernet Routing Switch 5510-48T"}, {98, "Nortel Ethernet Switch 470-24T"}, {99, "Nortel Networks Wireless LAN Access Point 2220"}, {100, "Ethernet Routing RBS 2402 L3 switch"}, {101, "Alteon Application Switch 2424 "}, {102, "Alteon Application Switch 2224 "}, {103, "Alteon Application Switch 2208 "}, {104, "Alteon Application Switch 2216"}, {105, "Alteon Application Switch 3408"}, {106, "Alteon Application Switch 3416"}, {107, "Nortel Networks Wireless LAN SecuritySwitch 2250"}, {108, "Ethernet Switch 425-48T"}, {109, "Ethernet Switch 425-24T"}, {110, "Nortel Networks Wireless LAN Access Point 2221"}, {111, "Nortel Metro Ethernet Service Unit 24-T SPF switch"}, {112, "Nortel Metro Ethernet Service Unit 24-T LX DC switch"}, {113, "Nortel Ethernet Routing Switch 8300 10-slot chassis"}, {114, "Nortel Ethernet Routing Switch 8300 6-slot chassis"}, {115, "Nortel Ethernet Routing Switch 5520-24T-PWR"}, {116, "Nortel Ethernet Routing Switch 5520-48T-PWR"}, {117, "Nortel Networks VPN Gateway 3050"}, {118, "Alteon SSL 310 10/100"}, {119, "Alteon SSL 310 10/100 Fiber"}, {120, "Alteon SSL 310 10/100 FIPS"}, {121, "Alteon SSL 410 10/100/1000"}, {122, "Alteon SSL 410 10/100/1000 Fiber"}, {123, "Alteon Application Switch 2424-SSL"}, {124, "Nortel Ethernet Switch 325-24T"}, {125, "Nortel Ethernet Switch 325-24G"}, {126, "Nortel Networks Wireless LAN Access Point 2225"}, {127, "Nortel Networks Wireless LAN SecuritySwitch 2270"}, {128, "Nortel 24-port Ethernet Switch 470-24T-PWR"}, {129, "Nortel 48-port Ethernet Switch 470-48T-PWR"}, {130, "Nortel Ethernet Routing Switch 5530-24TFD"}, {131, "Nortel Ethernet Switch 3510-24T"}, {132, "Nortel Metro Ethernet Service Unit 12G AC L3 switch"}, {133, "Nortel Metro Ethernet Service Unit 12G DC L3 switch"}, {134, "Nortel Secure Access Switch"}, {135, "Networks VPN Gateway 3070"}, {136, "OPTera Metro 3500"}, {137, "SMB BES 1010 24T"}, {138, "SMB BES 1010 48T"}, {139, "SMB BES 1020 24T PWR"}, {140, "SMB BES 1020 48T PWR"}, {141, "SMB BES 2010 24T"}, {142, "SMB BES 2010 48T"}, {143, "SMB BES 2020 24T PWR"}, {144, "SMB BES 2020 48T PWR"}, {145, "SMB BES 110 24T"}, {146, "SMB BES 110 48T"}, {147, "SMB BES 120 24T PWR"}, {148, "SMB BES 120 48T PWR"}, {149, "SMB BES 210 24T"}, {150, "SMB BES 210 48T"}, {151, "SMB BES 220 24T PWR"}, {152, "SMB BES 220 48T PWR"}, {153, "OME 6500"}, {0, "unknown (via SONMP)"}, }; int sonmp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_NORTEL; struct lldpd_chassis *chassis; struct lldpd_mgmt *mgmt; u_int8_t *packet, *pos, *pos_pid, *end; int length; struct in_addr address; log_debug("sonmp", "send SONMP PDU to %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* SONMP multicast address as target */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC addresss */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* SONMP frame is of fixed size */ POKE_UINT16(SONMP_SIZE))) goto toobig; /* LLC header */ if (!( /* DSAP and SSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_SAVE(pos_pid) && /* We will modify PID later to create a new frame */ POKE_UINT16(LLC_PID_SONMP_HELLO))) goto toobig; address.s_addr = htonl(INADDR_ANY); TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { if (mgmt->m_family == LLDPD_AF_IPV4) { address.s_addr = mgmt->m_addr.inet.s_addr; } break; } /* SONMP */ if (!( /* Our IP address */ POKE_BYTES(&address, sizeof(struct in_addr)) && /* Segment on three bytes, we don't have slots, so we skip the first two bytes */ POKE_UINT16(0) && POKE_UINT8(hardware->h_ifindex) && POKE_UINT8(1) && /* Chassis: Other */ POKE_UINT8(12) && /* Back: Ethernet, Fast Ethernet and Gigabit */ POKE_UINT8(SONMP_TOPOLOGY_NEW) && /* Should work. We have no state */ POKE_UINT8(1) && /* Links: Dunno what it is */ POKE_SAVE(end))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } POKE_RESTORE(pos_pid); /* Modify LLC PID */ (void)POKE_UINT16(LLC_PID_SONMP_FLATNET); POKE_RESTORE(packet); /* Go to the beginning */ PEEK_DISCARD(ETHER_ADDR_LEN - 1); /* Modify the last byte of the MAC address */ (void)POKE_UINT8(1); if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("sonmp", "unable to send second SONMP packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); hardware->h_tx_cnt++; return 0; toobig: free(packet); return -1; } int sonmp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { const u_int8_t mcastaddr[] = SONMP_MULTICAST_ADDR; struct lldpd_chassis *chassis; struct lldpd_port *port; struct lldpd_mgmt *mgmt; int length, i; u_int8_t *pos; u_int8_t seg[3], rchassis; struct in_addr address; log_debug("sonmp", "decode SONMP PDU from %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("sonmp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("sonmp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < SONMP_SIZE) { log_warnx("sonmp", "too short SONMP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(mcastaddr, sizeof(mcastaddr)) != 0) /* There is two multicast address. We just handle only one of * them. */ goto malformed; /* We skip to LLC PID */ PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); if (PEEK_UINT16 != LLC_PID_SONMP_HELLO) { log_debug("sonmp", "incorrect LLC protocol ID received for SONMP on %s", hardware->h_ifname); goto malformed; } chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_ADDR; if ((chassis->c_id = calloc(1, sizeof(struct in_addr) + 1)) == NULL) { log_warn("sonmp", "unable to allocate memory for chassis id on %s", hardware->h_ifname); goto malformed; } chassis->c_id_len = sizeof(struct in_addr) + 1; chassis->c_id[0] = 1; PEEK_BYTES(&address, sizeof(struct in_addr)); memcpy(chassis->c_id + 1, &address, sizeof(struct in_addr)); if (asprintf(&chassis->c_name, "%s", inet_ntoa(address)) == -1) { log_warnx("sonmp", "unable to write chassis name for %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(seg, sizeof(seg)); rchassis = PEEK_UINT8; for (i=0; sonmp_chassis_types[i].type != 0; i++) { if (sonmp_chassis_types[i].type == rchassis) break; } if (asprintf(&chassis->c_descr, "%s", sonmp_chassis_types[i].description) == -1) { log_warnx("sonmp", "unable to write chassis description for %s", hardware->h_ifname); goto malformed; } mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { assert(errno == ENOMEM); log_warn("sonmp", "unable to allocate memory for management address"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); chassis->c_ttl = cfg?(cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold): LLDPD_TTL; port->p_id_subtype = LLDP_PORTID_SUBTYPE_LOCAL; if (asprintf(&port->p_id, "%02x-%02x-%02x", seg[0], seg[1], seg[2]) == -1) { log_warn("sonmp", "unable to allocate memory for port id on %s", hardware->h_ifname); goto malformed; } port->p_id_len = strlen(port->p_id); /* Port description depend on the number of segments */ if ((seg[0] == 0) && (seg[1] == 0)) { if (asprintf(&port->p_descr, "port %d", seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else if (seg[0] == 0) { if (asprintf(&port->p_descr, "port %d/%d", seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } else { if (asprintf(&port->p_descr, "port %x:%x:%x", seg[0], seg[1], seg[2]) == -1) { log_warnx("sonmp", "unable to write port description for %s", hardware->h_ifname); goto malformed; } } *newchassis = chassis; *newport = port; return 1; malformed: lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_SONMP */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1770_4
crossvul-cpp_data_bad_1219_0
/*- * Copyright (c) 2011 NetApp, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include <errno.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <assert.h> #include <stdbool.h> #include "dm.h" #include "vmmapi.h" #include "acpi.h" #include "inout.h" #include "ioapic.h" #include "mem.h" #include "pci_core.h" #include "irq.h" #include "lpc.h" #include "sw_load.h" #define CONF1_ADDR_PORT 0x0cf8 #define CONF1_DATA_PORT 0x0cfc #define CONF1_ENABLE 0x80000000ul #define MAXBUSES (PCI_BUSMAX + 1) #define MAXSLOTS (PCI_SLOTMAX + 1) #define MAXFUNCS (PCI_FUNCMAX + 1) struct funcinfo { char *fi_name; char *fi_param; char *fi_param_saved; /* save for reboot */ struct pci_vdev *fi_devi; }; struct intxinfo { int ii_count; int ii_pirq_pin; int ii_ioapic_irq; }; struct slotinfo { struct intxinfo si_intpins[4]; struct funcinfo si_funcs[MAXFUNCS]; }; struct businfo { uint16_t iobase, iolimit; /* I/O window */ uint32_t membase32, memlimit32; /* mmio window below 4GB */ uint64_t membase64, memlimit64; /* mmio window above 4GB */ struct slotinfo slotinfo[MAXSLOTS]; }; static struct businfo *pci_businfo[MAXBUSES]; SET_DECLARE(pci_vdev_ops_set, struct pci_vdev_ops); static uint64_t pci_emul_iobase; static uint64_t pci_emul_membase32; static uint64_t pci_emul_membase64; extern bool skip_pci_mem64bar_workaround; #define PCI_EMUL_IOBASE 0x2000 #define PCI_EMUL_IOLIMIT 0x10000 #define PCI_EMUL_ECFG_SIZE (MAXBUSES * 1024 * 1024) /* 1MB per bus */ SYSRES_MEM(PCI_EMUL_ECFG_BASE, PCI_EMUL_ECFG_SIZE); #define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE static struct pci_vdev_ops *pci_emul_finddev(char *name); static void pci_lintr_route(struct pci_vdev *dev); static void pci_lintr_update(struct pci_vdev *dev); static void pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, int coff, int bytes, uint32_t *val); static void pci_emul_free_msixcap(struct pci_vdev *pdi); static inline void CFGWRITE(struct pci_vdev *dev, int coff, uint32_t val, int bytes) { if (bytes == 1) pci_set_cfgdata8(dev, coff, val); else if (bytes == 2) pci_set_cfgdata16(dev, coff, val); else pci_set_cfgdata32(dev, coff, val); } static inline uint32_t CFGREAD(struct pci_vdev *dev, int coff, int bytes) { if (bytes == 1) return pci_get_cfgdata8(dev, coff); else if (bytes == 2) return pci_get_cfgdata16(dev, coff); else return pci_get_cfgdata32(dev, coff); } /* * I/O access */ /* * Slot options are in the form: * * <bus>:<slot>:<func>,<emul>[,<config>] * <slot>[:<func>],<emul>[,<config>] * * slot is 0..31 * func is 0..7 * emul is a string describing the type of PCI device e.g. virtio-net * config is an optional string, depending on the device, that can be * used for configuration. * Examples are: * 1,virtio-net,tap0 * 3:0,dummy */ static void pci_parse_slot_usage(char *aopt) { fprintf(stderr, "Invalid PCI slot info field \"%s\"\n", aopt); } int parse_bdf(char *s, int *bus, int *dev, int *func, int base) { char *s_bus, *s_dev, *s_func; char *str, *cp; int ret = 0; str = cp = strdup(s); bus ? *bus = 0 : 0; dev ? *dev = 0 : 0; func ? *func = 0 : 0; s_bus = s_dev = s_func = NULL; s_dev = strsep(&cp, ":/."); if (cp) { s_func = strsep(&cp, ":/."); if (cp) { s_bus = s_dev; s_dev = s_func; s_func = strsep(&cp, ":/."); } } if (s_dev && dev) ret |= dm_strtoi(s_dev, &s_dev, base, dev); if (s_func && func) ret |= dm_strtoi(s_func, &s_func, base, func); if (s_bus && bus) ret |= dm_strtoi(s_bus, &s_bus, base, bus); free(str); return ret; } int pci_parse_slot(char *opt) { struct businfo *bi; struct slotinfo *si; char *emul, *config, *str, *cp, *b = NULL; int error, bnum, snum, fnum; error = -1; str = strdup(opt); if (!str) { fprintf(stderr, "%s: strdup returns NULL\n", __func__); return -1; } emul = config = NULL; cp = str; str = strsep(&cp, ","); if (cp) { emul = strsep(&cp, ","); /* for boot device */ if (cp && *cp == 'b' && *(cp+1) == ',') b = strsep(&cp, ","); config = cp; } else { pci_parse_slot_usage(opt); goto done; } /* <bus>:<slot>:<func> */ if (parse_bdf(str, &bnum, &snum, &fnum, 10) != 0) snum = -1; if (bnum < 0 || bnum >= MAXBUSES || snum < 0 || snum >= MAXSLOTS || fnum < 0 || fnum >= MAXFUNCS) { pci_parse_slot_usage(opt); goto done; } if (pci_businfo[bnum] == NULL) pci_businfo[bnum] = calloc(1, sizeof(struct businfo)); bi = pci_businfo[bnum]; si = &bi->slotinfo[snum]; if (si->si_funcs[fnum].fi_name != NULL) { fprintf(stderr, "pci slot %d:%d already occupied!\n", snum, fnum); goto done; } if (pci_emul_finddev(emul) == NULL) { fprintf(stderr, "pci slot %d:%d: unknown device \"%s\"\n", snum, fnum, emul); goto done; } error = 0; si->si_funcs[fnum].fi_name = emul; /* saved fi param in case reboot */ si->si_funcs[fnum].fi_param_saved = config; if (b != NULL) { if ((strcmp("virtio-blk", emul) == 0) && (b != NULL) && (strchr(b, 'b') != NULL)) { vsbl_set_bdf(bnum, snum, fnum); } } done: if (error) free(str); return error; } static int pci_valid_pba_offset(struct pci_vdev *dev, uint64_t offset) { if (offset < dev->msix.pba_offset) return 0; if (offset >= dev->msix.pba_offset + dev->msix.pba_size) return 0; return 1; } int pci_emul_msix_twrite(struct pci_vdev *dev, uint64_t offset, int size, uint64_t value) { int msix_entry_offset; int tab_index; char *dest; /* support only 4 or 8 byte writes */ if (size != 4 && size != 8) return -1; /* * Return if table index is beyond what device supports */ tab_index = offset / MSIX_TABLE_ENTRY_SIZE; if (tab_index >= dev->msix.table_count) return -1; msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; /* support only aligned writes */ if ((msix_entry_offset % size) != 0) return -1; dest = (char *)(dev->msix.table + tab_index); dest += msix_entry_offset; if (size == 4) *((uint32_t *)dest) = value; else *((uint64_t *)dest) = value; return 0; } uint64_t pci_emul_msix_tread(struct pci_vdev *dev, uint64_t offset, int size) { char *dest; int msix_entry_offset; int tab_index; uint64_t retval = ~0; /* * The PCI standard only allows 4 and 8 byte accesses to the MSI-X * table but we also allow 1 byte access to accommodate reads from * ddb. */ if (size != 1 && size != 4 && size != 8) return retval; msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; /* support only aligned reads */ if ((msix_entry_offset % size) != 0) return retval; tab_index = offset / MSIX_TABLE_ENTRY_SIZE; if (tab_index < dev->msix.table_count) { /* valid MSI-X Table access */ dest = (char *)(dev->msix.table + tab_index); dest += msix_entry_offset; if (size == 1) retval = *((uint8_t *)dest); else if (size == 4) retval = *((uint32_t *)dest); else retval = *((uint64_t *)dest); } else if (pci_valid_pba_offset(dev, offset)) { /* return 0 for PBA access */ retval = 0; } return retval; } int pci_msix_table_bar(struct pci_vdev *dev) { if (dev->msix.table != NULL) return dev->msix.table_bar; else return -1; } int pci_msix_pba_bar(struct pci_vdev *dev) { if (dev->msix.table != NULL) return dev->msix.pba_bar; else return -1; } static inline uint64_t bar_value(int size, uint64_t val) { uint64_t mask; assert(size == 1 || size == 2 || size == 4 || size == 8); mask = (size < 8 ? 1UL << (size * 8) : 0UL) - 1; return val & mask; } static int pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, uint32_t *eax, void *arg) { struct pci_vdev *pdi = arg; struct pci_vdev_ops *ops = pdi->dev_ops; uint64_t offset; int i; for (i = 0; i <= PCI_BARMAX; i++) { if (pdi->bar[i].type == PCIBAR_IO && port >= pdi->bar[i].addr && port + bytes <= pdi->bar[i].addr + pdi->bar[i].size) { offset = port - pdi->bar[i].addr; if (in) { *eax = (*ops->vdev_barread)(ctx, vcpu, pdi, i, offset, bytes); *eax = bar_value(bytes, *eax); } else (*ops->vdev_barwrite)(ctx, vcpu, pdi, i, offset, bytes, bar_value(bytes, *eax)); return 0; } } return -1; } static int pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int size, uint64_t *val, void *arg1, long arg2) { struct pci_vdev *pdi = arg1; struct pci_vdev_ops *ops = pdi->dev_ops; uint64_t offset; int bidx = (int) arg2; assert(bidx <= PCI_BARMAX); assert(pdi->bar[bidx].type == PCIBAR_MEM32 || pdi->bar[bidx].type == PCIBAR_MEM64); assert(addr >= pdi->bar[bidx].addr && addr + size <= pdi->bar[bidx].addr + pdi->bar[bidx].size); offset = addr - pdi->bar[bidx].addr; if (dir == MEM_F_WRITE) { if (size == 8) { (*ops->vdev_barwrite)(ctx, vcpu, pdi, bidx, offset, 4, *val & 0xffffffff); (*ops->vdev_barwrite)(ctx, vcpu, pdi, bidx, offset + 4, 4, *val >> 32); } else { (*ops->vdev_barwrite)(ctx, vcpu, pdi, bidx, offset, size, bar_value(size, *val)); } } else { if (size == 8) { uint64_t val_lo, val_hi; val_lo = (*ops->vdev_barread)(ctx, vcpu, pdi, bidx, offset, 4); val_lo = bar_value(4, val_lo); val_hi = (*ops->vdev_barread)(ctx, vcpu, pdi, bidx, offset + 4, 4); *val = val_lo | (val_hi << 32); } else { *val = (*ops->vdev_barread)(ctx, vcpu, pdi, bidx, offset, size); *val = bar_value(size, *val); } } return 0; } static int pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, uint64_t *addr) { uint64_t base; assert((size & (size - 1)) == 0); /* must be a power of 2 */ base = roundup2(*baseptr, size); if (base + size <= limit) { *addr = base; *baseptr = base + size; return 0; } else return -1; } int pci_emul_alloc_bar(struct pci_vdev *pdi, int idx, enum pcibar_type type, uint64_t size) { return pci_emul_alloc_pbar(pdi, idx, 0, type, size); } /* * Register (or unregister) the MMIO or I/O region associated with the BAR * register 'idx' of an emulated pci device. */ static void modify_bar_registration(struct pci_vdev *dev, int idx, int registration) { int error; struct inout_port iop; struct mem_range mr; switch (dev->bar[idx].type) { case PCIBAR_IO: bzero(&iop, sizeof(struct inout_port)); iop.name = dev->name; iop.port = dev->bar[idx].addr; iop.size = dev->bar[idx].size; if (registration) { iop.flags = IOPORT_F_INOUT; iop.handler = pci_emul_io_handler; iop.arg = dev; error = register_inout(&iop); } else error = unregister_inout(&iop); break; case PCIBAR_MEM32: case PCIBAR_MEM64: bzero(&mr, sizeof(struct mem_range)); mr.name = dev->name; mr.base = dev->bar[idx].addr; mr.size = dev->bar[idx].size; if (registration) { mr.flags = MEM_F_RW; mr.handler = pci_emul_mem_handler; mr.arg1 = dev; mr.arg2 = idx; error = register_mem(&mr); } else error = unregister_mem(&mr); break; default: error = EINVAL; break; } assert(error == 0); } static void unregister_bar(struct pci_vdev *dev, int idx) { modify_bar_registration(dev, idx, 0); } static void register_bar(struct pci_vdev *dev, int idx) { modify_bar_registration(dev, idx, 1); } /* Are we decoding i/o port accesses for the emulated pci device? */ static int porten(struct pci_vdev *dev) { uint16_t cmd; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); return (cmd & PCIM_CMD_PORTEN); } /* Are we decoding memory accesses for the emulated pci device? */ static int memen(struct pci_vdev *dev) { uint16_t cmd; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); return (cmd & PCIM_CMD_MEMEN); } /* * Update the MMIO or I/O address that is decoded by the BAR register. * * If the pci device has enabled the address space decoding then intercept * the address range decoded by the BAR register. */ static void update_bar_address(struct pci_vdev *dev, uint64_t addr, int idx, int type) { int decode; if (dev->bar[idx].type == PCIBAR_IO) decode = porten(dev); else decode = memen(dev); if (decode) unregister_bar(dev, idx); switch (type) { case PCIBAR_IO: case PCIBAR_MEM32: dev->bar[idx].addr = addr; break; case PCIBAR_MEM64: dev->bar[idx].addr &= ~0xffffffffUL; dev->bar[idx].addr |= addr; break; case PCIBAR_MEMHI64: dev->bar[idx].addr &= 0xffffffff; dev->bar[idx].addr |= addr; break; default: assert(0); } if (decode) register_bar(dev, idx); } int pci_emul_alloc_pbar(struct pci_vdev *pdi, int idx, uint64_t hostbase, enum pcibar_type type, uint64_t size) { int error; uint64_t *baseptr, limit, addr, mask, lobits, bar; assert(idx >= 0 && idx <= PCI_BARMAX); if ((size & (size - 1)) != 0) size = 1UL << flsl(size); /* round up to a power of 2 */ /* Enforce minimum BAR sizes required by the PCI standard */ if (type == PCIBAR_IO) { if (size < 4) size = 4; } else { if (size < 16) size = 16; } switch (type) { case PCIBAR_NONE: baseptr = NULL; addr = mask = lobits = 0; break; case PCIBAR_IO: baseptr = &pci_emul_iobase; limit = PCI_EMUL_IOLIMIT; mask = PCIM_BAR_IO_BASE; lobits = PCIM_BAR_IO_SPACE; break; case PCIBAR_MEM64: /* * FIXME * Some drivers do not work well if the 64-bit BAR is allocated * above 4GB. Allow for this by allocating small requests under * 4GB unless then allocation size is larger than some arbitrary * number (32MB currently). If guest booted by ovmf, then skip the * workaround. */ if (!skip_pci_mem64bar_workaround && (size <= 32 * 1024 * 1024)) { baseptr = &pci_emul_membase32; limit = PCI_EMUL_MEMLIMIT32; mask = PCIM_BAR_MEM_BASE; lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; break; } /* * XXX special case for device requiring peer-peer DMA */ if (size == 0x100000000UL) baseptr = &hostbase; else baseptr = &pci_emul_membase64; limit = PCI_EMUL_MEMLIMIT64; mask = PCIM_BAR_MEM_BASE; lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | PCIM_BAR_MEM_PREFETCH; break; case PCIBAR_MEM32: baseptr = &pci_emul_membase32; limit = PCI_EMUL_MEMLIMIT32; mask = PCIM_BAR_MEM_BASE; lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; break; default: printf("pci_emul_alloc_base: invalid bar type %d\n", type); assert(0); } if (baseptr != NULL) { error = pci_emul_alloc_resource(baseptr, limit, size, &addr); if (error != 0) return error; } pdi->bar[idx].type = type; pdi->bar[idx].addr = addr; pdi->bar[idx].size = size; /* Initialize the BAR register in config space */ bar = (addr & mask) | lobits; pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); if (type == PCIBAR_MEM64) { assert(idx + 1 <= PCI_BARMAX); pdi->bar[idx + 1].type = PCIBAR_MEMHI64; pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); } register_bar(pdi, idx); return 0; } void pci_emul_free_bars(struct pci_vdev *pdi) { int i, enabled; for (i = 0; i < PCI_BARMAX; i++) { if ((pdi->bar[i].type != PCIBAR_NONE) && (pdi->bar[i].type != PCIBAR_MEMHI64)){ /* * Check whether the bar is enabled or not, * if it is disabled then it should have been * unregistered in pci_emul_cmdsts_write. */ if (pdi->bar[i].type == PCIBAR_IO) enabled = porten(pdi); else enabled = memen(pdi); if (enabled) unregister_bar(pdi, i); pdi->bar[i].type = PCIBAR_NONE; } } } #define CAP_START_OFFSET 0x40 int pci_emul_add_capability(struct pci_vdev *dev, u_char *capdata, int caplen) { int i, capoff, reallen; uint16_t sts; assert(caplen > 0); reallen = roundup2(caplen, 4); /* dword aligned */ sts = pci_get_cfgdata16(dev, PCIR_STATUS); if ((sts & PCIM_STATUS_CAPPRESENT) == 0) capoff = CAP_START_OFFSET; else capoff = dev->capend + 1; /* Check if we have enough space */ if (capoff + reallen > PCI_REGMAX + 1) return -1; /* Set the previous capability pointer */ if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { pci_set_cfgdata8(dev, PCIR_CAP_PTR, capoff); pci_set_cfgdata16(dev, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); } else pci_set_cfgdata8(dev, dev->prevcap + 1, capoff); /* Copy the capability */ for (i = 0; i < caplen; i++) pci_set_cfgdata8(dev, capoff + i, capdata[i]); /* Set the next capability pointer */ pci_set_cfgdata8(dev, capoff + 1, 0); dev->prevcap = capoff; dev->capend = capoff + reallen - 1; return 0; } /* * p_capoff is used as both input and output. Set *p_capoff to 0 when this * function is called for the first time, it will return offset of the first * matched one in p_capoff. To find the next matched one, please use the * returned *p_capoff from last call as the input, in this case the offset of * the next matched one will be returned in *p_capoff. * Please check the returned value first before touch p_capoff. */ int pci_emul_find_capability(struct pci_vdev *dev, uint8_t capid, int *p_capoff) { int coff; uint16_t sts; sts = pci_get_cfgdata16(dev, PCIR_STATUS); if ((sts & PCIM_STATUS_CAPPRESENT) == 0) return -1; if (!p_capoff) return -1; if (*p_capoff == 0) coff = pci_get_cfgdata8(dev, PCIR_CAP_PTR); else if (*p_capoff >= CAP_START_OFFSET && *p_capoff <= dev->prevcap) coff = pci_get_cfgdata8(dev, *p_capoff + 1); else return -1; while (coff >= CAP_START_OFFSET && coff <= dev->prevcap) { if (pci_get_cfgdata8(dev, coff) == capid) { *p_capoff = coff; return 0; } coff = pci_get_cfgdata8(dev, coff + 1); } return -1; } static struct pci_vdev_ops * pci_emul_finddev(char *name) { struct pci_vdev_ops **pdpp, *pdp; SET_FOREACH(pdpp, pci_vdev_ops_set) { pdp = *pdpp; if (!strcmp(pdp->class_name, name)) return pdp; } return NULL; } static int pci_emul_init(struct vmctx *ctx, struct pci_vdev_ops *ops, int bus, int slot, int func, struct funcinfo *fi) { struct pci_vdev *pdi; int err; pdi = calloc(1, sizeof(struct pci_vdev)); if (!pdi) { fprintf(stderr, "%s: calloc returns NULL\n", __func__); return -1; } pdi->vmctx = ctx; pdi->bus = bus; pdi->slot = slot; pdi->func = func; pthread_mutex_init(&pdi->lintr.lock, NULL); pdi->lintr.pin = 0; pdi->lintr.state = IDLE; pdi->lintr.pirq_pin = 0; pdi->lintr.ioapic_irq = 0; pdi->dev_ops = ops; snprintf(pdi->name, PI_NAMESZ, "%s-pci-%d", ops->class_name, slot); /* Disable legacy interrupts */ pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); pci_set_cfgdata8(pdi, PCIR_COMMAND, PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); if (fi->fi_param_saved) fi->fi_param = strdup(fi->fi_param_saved); else fi->fi_param = NULL; err = (*ops->vdev_init)(ctx, pdi, fi->fi_param); if (err == 0) fi->fi_devi = pdi; else free(pdi); return err; } static void pci_emul_deinit(struct vmctx *ctx, struct pci_vdev_ops *ops, int bus, int slot, int func, struct funcinfo *fi) { if (ops->vdev_deinit && fi->fi_devi) (*ops->vdev_deinit)(ctx, fi->fi_devi, fi->fi_param); if (fi->fi_param) free(fi->fi_param); if (fi->fi_devi) { pci_lintr_release(fi->fi_devi); pci_emul_free_bars(fi->fi_devi); pci_emul_free_msixcap(fi->fi_devi); free(fi->fi_devi); } } void pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) { int mmc; /* Number of msi messages must be a power of 2 between 1 and 32 */ assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); mmc = ffs(msgnum) - 1; bzero(msicap, sizeof(struct msicap)); msicap->capid = PCIY_MSI; msicap->nextptr = nextptr; msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); } int pci_emul_add_msicap(struct pci_vdev *dev, int msgnum) { struct msicap msicap; pci_populate_msicap(&msicap, msgnum, 0); return pci_emul_add_capability(dev, (u_char *)&msicap, sizeof(msicap)); } static void pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, uint32_t msix_tab_size) { assert(msix_tab_size % 4096 == 0); bzero(msixcap, sizeof(struct msixcap)); msixcap->capid = PCIY_MSIX; /* * Message Control Register, all fields set to * zero except for the Table Size. * Note: Table size N is encoded as N-1 */ msixcap->msgctrl = msgnum - 1; /* * MSI-X BAR setup: * - MSI-X table start at offset 0 * - PBA table starts at a 4K aligned offset after the MSI-X table */ msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); } static void pci_msix_table_init(struct pci_vdev *dev, int table_entries) { int i, table_size; assert(table_entries > 0); assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; dev->msix.table = calloc(1, table_size); assert(dev->msix.table != NULL); /* set mask bit of vector control register */ for (i = 0; i < table_entries; i++) dev->msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; } int pci_emul_add_msixcap(struct pci_vdev *dev, int msgnum, int barnum) { uint32_t tab_size; struct msixcap msixcap; assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; /* Align table size to nearest 4K */ tab_size = roundup2(tab_size, 4096); dev->msix.table_bar = barnum; dev->msix.pba_bar = barnum; dev->msix.table_offset = 0; dev->msix.table_count = msgnum; dev->msix.pba_offset = tab_size; dev->msix.pba_size = PBA_SIZE(msgnum); pci_msix_table_init(dev, msgnum); pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size); /* allocate memory for MSI-X Table and PBA */ pci_emul_alloc_bar(dev, barnum, PCIBAR_MEM32, tab_size + dev->msix.pba_size); return (pci_emul_add_capability(dev, (u_char *)&msixcap, sizeof(msixcap))); } static void pci_emul_free_msixcap(struct pci_vdev *pdi) { if (pdi->msix.table) { free(pdi->msix.table); pdi->msix.table = NULL; } } void msixcap_cfgwrite(struct pci_vdev *dev, int capoff, int offset, int bytes, uint32_t val) { uint16_t msgctrl, rwmask; int off; off = offset - capoff; /* Message Control Register */ if (off == 2 && bytes == 2) { rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; msgctrl = pci_get_cfgdata16(dev, offset); msgctrl &= ~rwmask; msgctrl |= val & rwmask; val = msgctrl; dev->msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; dev->msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; pci_lintr_update(dev); } CFGWRITE(dev, offset, val, bytes); } void msicap_cfgwrite(struct pci_vdev *dev, int capoff, int offset, int bytes, uint32_t val) { uint16_t msgctrl, rwmask, msgdata, mme; uint32_t addrlo; /* * If guest is writing to the message control register make sure * we do not overwrite read-only fields. */ if ((offset - capoff) == 2 && bytes == 2) { rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; msgctrl = pci_get_cfgdata16(dev, offset); msgctrl &= ~rwmask; msgctrl |= val & rwmask; val = msgctrl; addrlo = pci_get_cfgdata32(dev, capoff + 4); if (msgctrl & PCIM_MSICTRL_64BIT) msgdata = pci_get_cfgdata16(dev, capoff + 12); else msgdata = pci_get_cfgdata16(dev, capoff + 8); mme = msgctrl & PCIM_MSICTRL_MME_MASK; dev->msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; if (dev->msi.enabled) { dev->msi.addr = addrlo; dev->msi.msg_data = msgdata; dev->msi.maxmsgnum = 1 << (mme >> 4); } else { dev->msi.maxmsgnum = 0; } pci_lintr_update(dev); } CFGWRITE(dev, offset, val, bytes); } void pciecap_cfgwrite(struct pci_vdev *dev, int capoff, int offset, int bytes, uint32_t val) { /* XXX don't write to the readonly parts */ CFGWRITE(dev, offset, val, bytes); } #define PCIECAP_VERSION 0x2 int pci_emul_add_pciecap(struct pci_vdev *dev, int type) { int err; struct pciecap pciecap; if (type != PCIEM_TYPE_ROOT_PORT) return -1; bzero(&pciecap, sizeof(pciecap)); pciecap.capid = PCIY_EXPRESS; pciecap.pcie_capabilities = PCIECAP_VERSION | PCIEM_TYPE_ROOT_PORT; pciecap.link_capabilities = 0x411; /* gen1, x1 */ pciecap.link_status = 0x11; /* gen1, x1 */ err = pci_emul_add_capability(dev, (u_char *)&pciecap, sizeof(pciecap)); return err; } /* * This function assumes that 'coff' is in the capabilities region of the * config space. */ static void pci_emul_capwrite(struct pci_vdev *dev, int offset, int bytes, uint32_t val) { int capid; uint8_t capoff, nextoff; /* Do not allow un-aligned writes */ if ((offset & (bytes - 1)) != 0) return; /* Find the capability that we want to update */ capoff = CAP_START_OFFSET; while (1) { nextoff = pci_get_cfgdata8(dev, capoff + 1); if (nextoff == 0) break; if (offset >= capoff && offset < nextoff) break; capoff = nextoff; } assert(offset >= capoff); /* * Capability ID and Next Capability Pointer are readonly. * However, some o/s's do 4-byte writes that include these. * For this case, trim the write back to 2 bytes and adjust * the data. */ if (offset == capoff || offset == capoff + 1) { if (offset == capoff && bytes == 4) { bytes = 2; offset += 2; val >>= 16; } else return; } capid = pci_get_cfgdata8(dev, capoff); switch (capid) { case PCIY_MSI: msicap_cfgwrite(dev, capoff, offset, bytes, val); break; case PCIY_MSIX: msixcap_cfgwrite(dev, capoff, offset, bytes, val); break; case PCIY_EXPRESS: pciecap_cfgwrite(dev, capoff, offset, bytes, val); break; default: CFGWRITE(dev, offset, val, bytes); break; } } static int pci_emul_iscap(struct pci_vdev *dev, int offset) { uint16_t sts; sts = pci_get_cfgdata16(dev, PCIR_STATUS); if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { if (offset >= CAP_START_OFFSET && offset <= dev->capend) return 1; } return 0; } static int pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int size, uint64_t *val, void *arg1, long arg2) { /* * Ignore writes; return 0xff's for reads. The mem read code * will take care of truncating to the correct size. */ if (dir == MEM_F_READ) *val = 0xffffffffffffffff; return 0; } static int pci_emul_ecfg_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int bytes, uint64_t *val, void *arg1, long arg2) { int bus, slot, func, coff, in; coff = addr & 0xfff; func = (addr >> 12) & 0x7; slot = (addr >> 15) & 0x1f; bus = (addr >> 20) & 0xff; in = (dir == MEM_F_READ); if (in) *val = ~0UL; pci_cfgrw(ctx, vcpu, in, bus, slot, func, coff, bytes, (uint32_t *)val); return 0; } #define BUSIO_ROUNDUP 32 #define BUSMEM_ROUNDUP (1024 * 1024) int init_pci(struct vmctx *ctx) { struct mem_range mr; struct pci_vdev_ops *ops; struct businfo *bi; struct slotinfo *si; struct funcinfo *fi; size_t lowmem; int bus, slot, func; int success_cnt = 0; int error; pci_emul_iobase = PCI_EMUL_IOBASE; pci_emul_membase32 = vm_get_lowmem_limit(ctx); pci_emul_membase64 = PCI_EMUL_MEMBASE64; create_gsi_sharing_groups(); for (bus = 0; bus < MAXBUSES; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; /* * Keep track of the i/o and memory resources allocated to * this bus. */ bi->iobase = pci_emul_iobase; bi->membase32 = pci_emul_membase32; bi->membase64 = pci_emul_membase64; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_name == NULL) continue; ops = pci_emul_finddev(fi->fi_name); assert(ops != NULL); error = pci_emul_init(ctx, ops, bus, slot, func, fi); if (error) goto pci_emul_init_fail; success_cnt++; } } /* * Add some slop to the I/O and memory resources decoded by * this bus to give a guest some flexibility if it wants to * reprogram the BARs. */ pci_emul_iobase += BUSIO_ROUNDUP; pci_emul_iobase = roundup2(pci_emul_iobase, BUSIO_ROUNDUP); bi->iolimit = pci_emul_iobase; pci_emul_membase32 += BUSMEM_ROUNDUP; pci_emul_membase32 = roundup2(pci_emul_membase32, BUSMEM_ROUNDUP); bi->memlimit32 = pci_emul_membase32; pci_emul_membase64 += BUSMEM_ROUNDUP; pci_emul_membase64 = roundup2(pci_emul_membase64, BUSMEM_ROUNDUP); bi->memlimit64 = pci_emul_membase64; } error = check_gsi_sharing_violation(); if (error < 0) goto pci_emul_init_fail; /* * PCI backends are initialized before routing INTx interrupts * so that LPC devices are able to reserve ISA IRQs before * routing PIRQ pins. */ for (bus = 0; bus < MAXBUSES; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_devi == NULL) continue; pci_lintr_route(fi->fi_devi); ops = fi->fi_devi->dev_ops; if (ops && ops->vdev_phys_access) ops->vdev_phys_access(ctx, fi->fi_devi); } } } lpc_pirq_routed(); /* * The guest physical memory map looks like the following: * [0, lowmem) guest system memory * [lowmem, lowmem_limit) memory hole (may be absent) * [lowmem_limit, 0xE0000000) PCI hole (32-bit BAR allocation) * [0xE0000000, 0xF0000000) PCI extended config window * [0xF0000000, 4GB) LAPIC, IOAPIC, HPET, firmware * [4GB, 5GB) PCI hole (64-bit BAR allocation) * [5GB, 5GB + highmem) guest system memory */ /* * Accesses to memory addresses that are not allocated to system * memory or PCI devices return 0xff's. */ lowmem = vm_get_lowmem_size(ctx); bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (32-bit)"; mr.flags = MEM_F_RW; mr.base = lowmem; mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; mr.handler = pci_emul_fallback_handler; error = register_mem_fallback(&mr); assert(error == 0); /* ditto for the 64-bit PCI host aperture */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (64-bit)"; mr.flags = MEM_F_RW; mr.base = PCI_EMUL_MEMBASE64; mr.size = PCI_EMUL_MEMLIMIT64 - PCI_EMUL_MEMBASE64; mr.handler = pci_emul_fallback_handler; error = register_mem_fallback(&mr); assert(error == 0); /* PCI extended config space */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI ECFG"; mr.flags = MEM_F_RW; mr.base = PCI_EMUL_ECFG_BASE; mr.size = PCI_EMUL_ECFG_SIZE; mr.handler = pci_emul_ecfg_handler; error = register_mem(&mr); assert(error == 0); return 0; pci_emul_init_fail: for (bus = 0; bus < MAXBUSES && success_cnt > 0; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; for (slot = 0; slot < MAXSLOTS && success_cnt > 0; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_name == NULL) continue; if (success_cnt-- <= 0) break; ops = pci_emul_finddev(fi->fi_name); assert(ops != NULL); pci_emul_deinit(ctx, ops, bus, slot, func, fi); } } } return error; } void deinit_pci(struct vmctx *ctx) { struct pci_vdev_ops *ops; struct businfo *bi; struct slotinfo *si; struct funcinfo *fi; int bus, slot, func; size_t lowmem; struct mem_range mr; /* Release PCI extended config space */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI ECFG"; mr.base = PCI_EMUL_ECFG_BASE; mr.size = PCI_EMUL_ECFG_SIZE; unregister_mem(&mr); /* Release PCI hole space */ lowmem = vm_get_lowmem_size(ctx); bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (32-bit)"; mr.base = lowmem; mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; unregister_mem_fallback(&mr); /* ditto for the 64-bit PCI host aperture */ bzero(&mr, sizeof(struct mem_range)); mr.name = "PCI hole (64-bit)"; mr.base = PCI_EMUL_MEMBASE64; mr.size = PCI_EMUL_MEMLIMIT64 - PCI_EMUL_MEMBASE64; unregister_mem_fallback(&mr); for (bus = 0; bus < MAXBUSES; bus++) { bi = pci_businfo[bus]; if (bi == NULL) continue; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { fi = &si->si_funcs[func]; if (fi->fi_name == NULL) continue; ops = pci_emul_finddev(fi->fi_name); assert(ops != NULL); pci_emul_deinit(ctx, ops, bus, slot, func, fi); } } } } static void pci_apic_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, void *arg) { dsdt_line(" Package ()"); dsdt_line(" {"); dsdt_line(" 0x%X,", slot << 16 | 0xffff); dsdt_line(" 0x%02X,", pin - 1); dsdt_line(" Zero,"); dsdt_line(" 0x%X", ioapic_irq); dsdt_line(" },"); } static void pci_pirq_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, void *arg) { char *name; name = lpc_pirq_name(pirq_pin); if (name == NULL) return; dsdt_line(" Package ()"); dsdt_line(" {"); dsdt_line(" 0x%X,", slot << 16 | 0xffff); dsdt_line(" 0x%02X,", pin - 1); dsdt_line(" %s,", name); dsdt_line(" 0x00"); dsdt_line(" },"); free(name); } /* * A acrn-dm virtual machine has a flat PCI hierarchy with a root port * corresponding to each PCI bus. */ static void pci_bus_write_dsdt(int bus) { struct businfo *bi; struct slotinfo *si; struct pci_vdev *dev; int count, func, slot; /* * If there are no devices on this 'bus' then just return. */ bi = pci_businfo[bus]; if (bi == NULL) { /* * Bus 0 is special because it decodes the I/O ports used * for PCI config space access even if there are no devices * on it. */ if (bus != 0) return; } dsdt_line(" Device (PCI%01X)", bus); dsdt_line(" {"); dsdt_line(" Name (_HID, EisaId (\"PNP0A03\"))"); dsdt_line(" Name (_ADR, Zero)"); dsdt_line(" Method (_BBN, 0, NotSerialized)"); dsdt_line(" {"); dsdt_line(" Return (0x%08X)", bus); dsdt_line(" }"); dsdt_line(" Name (_CRS, ResourceTemplate ()"); dsdt_line(" {"); dsdt_line(" WordBusNumber (ResourceProducer, MinFixed, " "MaxFixed, PosDecode,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x%04X, // Range Minimum", bus); dsdt_line(" 0x%04X, // Range Maximum", bus); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x0001, // Length"); dsdt_line(" ,, )"); if (bus == 0) { dsdt_indent(3); dsdt_fixed_ioport(0xCF8, 8); dsdt_unindent(3); dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " "PosDecode, EntireRange,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x0000, // Range Minimum"); dsdt_line(" 0x0CF7, // Range Maximum"); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x0CF8, // Length"); dsdt_line(" ,, , TypeStatic)"); dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " "PosDecode, EntireRange,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x0D00, // Range Minimum"); dsdt_line(" 0x%04X, // Range Maximum", PCI_EMUL_IOBASE - 1); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x%04X, // Length", PCI_EMUL_IOBASE - 0x0D00); dsdt_line(" ,, , TypeStatic)"); if (bi == NULL) { dsdt_line(" })"); goto done; } } assert(bi != NULL); /* i/o window */ dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " "PosDecode, EntireRange,"); dsdt_line(" 0x0000, // Granularity"); dsdt_line(" 0x%04X, // Range Minimum", bi->iobase); dsdt_line(" 0x%04X, // Range Maximum", bi->iolimit - 1); dsdt_line(" 0x0000, // Translation Offset"); dsdt_line(" 0x%04X, // Length", bi->iolimit - bi->iobase); dsdt_line(" ,, , TypeStatic)"); /* mmio window (32-bit) */ dsdt_line(" DWordMemory (ResourceProducer, PosDecode, " "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); dsdt_line(" 0x00000000, // Granularity"); dsdt_line(" 0x%08X, // Range Minimum\n", bi->membase32); dsdt_line(" 0x%08X, // Range Maximum\n", bi->memlimit32 - 1); dsdt_line(" 0x00000000, // Translation Offset"); dsdt_line(" 0x%08X, // Length\n", bi->memlimit32 - bi->membase32); dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); /* mmio window (64-bit) */ dsdt_line(" QWordMemory (ResourceProducer, PosDecode, " "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); dsdt_line(" 0x0000000000000000, // Granularity"); dsdt_line(" 0x%016lX, // Range Minimum\n", bi->membase64); dsdt_line(" 0x%016lX, // Range Maximum\n", bi->memlimit64 - 1); dsdt_line(" 0x0000000000000000, // Translation Offset"); dsdt_line(" 0x%016lX, // Length\n", bi->memlimit64 - bi->membase64); dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); dsdt_line(" })"); count = pci_count_lintr(bus); if (count != 0) { dsdt_indent(2); dsdt_line("Name (PPRT, Package ()"); dsdt_line("{"); pci_walk_lintr(bus, pci_pirq_prt_entry, NULL); dsdt_line("})"); dsdt_line("Name (APRT, Package ()"); dsdt_line("{"); pci_walk_lintr(bus, pci_apic_prt_entry, NULL); dsdt_line("})"); dsdt_line("Method (_PRT, 0, NotSerialized)"); dsdt_line("{"); dsdt_line(" If (PICM)"); dsdt_line(" {"); dsdt_line(" Return (APRT)"); dsdt_line(" }"); dsdt_line(" Else"); dsdt_line(" {"); dsdt_line(" Return (PPRT)"); dsdt_line(" }"); dsdt_line("}"); dsdt_unindent(2); } dsdt_indent(2); for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (func = 0; func < MAXFUNCS; func++) { dev = si->si_funcs[func].fi_devi; if (dev != NULL && dev->dev_ops->vdev_write_dsdt != NULL) dev->dev_ops->vdev_write_dsdt(dev); } } dsdt_unindent(2); done: dsdt_line(" }"); } void pci_write_dsdt(void) { int bus; dsdt_indent(1); dsdt_line("Name (PICM, 0x00)"); dsdt_line("Method (_PIC, 1, NotSerialized)"); dsdt_line("{"); dsdt_line(" Store (Arg0, PICM)"); dsdt_line("}"); dsdt_line(""); dsdt_line("Scope (_SB)"); dsdt_line("{"); for (bus = 0; bus < MAXBUSES; bus++) pci_bus_write_dsdt(bus); dsdt_line("}"); dsdt_unindent(1); } int pci_bus_configured(int bus) { assert(bus >= 0 && bus < MAXBUSES); return (pci_businfo[bus] != NULL); } int pci_msi_enabled(struct pci_vdev *dev) { return dev->msi.enabled; } int pci_msi_maxmsgnum(struct pci_vdev *dev) { if (dev->msi.enabled) return dev->msi.maxmsgnum; else return 0; } int pci_msix_enabled(struct pci_vdev *dev) { return (dev->msix.enabled && !dev->msi.enabled); } /** * @brief Generate a MSI-X interrupt to guest * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * @param index MSIx table entry index. * * @return None */ void pci_generate_msix(struct pci_vdev *dev, int index) { struct msix_table_entry *mte; if (!pci_msix_enabled(dev)) return; if (dev->msix.function_mask) return; if (index >= dev->msix.table_count) return; mte = &dev->msix.table[index]; if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { /* XXX Set PBA bit if interrupt is disabled */ vm_lapic_msi(dev->vmctx, mte->addr, mte->msg_data); } } /** * @brief Generate a MSI interrupt to guest * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * @param index Message data index. * * @return None */ void pci_generate_msi(struct pci_vdev *dev, int index) { if (pci_msi_enabled(dev) && index < pci_msi_maxmsgnum(dev)) { vm_lapic_msi(dev->vmctx, dev->msi.addr, dev->msi.msg_data + index); } } static bool pci_lintr_permitted(struct pci_vdev *dev) { uint16_t cmd; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); return (!(dev->msi.enabled || dev->msix.enabled || (cmd & PCIM_CMD_INTxDIS))); } void pci_lintr_request(struct pci_vdev *dev) { struct businfo *bi; struct slotinfo *si; int bestpin, bestcount, pin; bi = pci_businfo[dev->bus]; assert(bi != NULL); /* * Just allocate a pin from our slot. The pin will be * assigned IRQs later when interrupts are routed. */ si = &bi->slotinfo[dev->slot]; bestpin = 0; bestcount = si->si_intpins[0].ii_count; for (pin = 1; pin < 4; pin++) { if (si->si_intpins[pin].ii_count < bestcount) { bestpin = pin; bestcount = si->si_intpins[pin].ii_count; } } si->si_intpins[bestpin].ii_count++; dev->lintr.pin = bestpin + 1; pci_set_cfgdata8(dev, PCIR_INTPIN, bestpin + 1); } void pci_lintr_release(struct pci_vdev *dev) { struct businfo *bi; struct slotinfo *si; int pin; bi = pci_businfo[dev->bus]; assert(bi != NULL); si = &bi->slotinfo[dev->slot]; for (pin = 1; pin < 4; pin++) { si->si_intpins[pin].ii_count = 0; si->si_intpins[pin].ii_pirq_pin = 0; si->si_intpins[pin].ii_ioapic_irq = 0; } } static void pci_lintr_route(struct pci_vdev *dev) { struct businfo *bi; struct intxinfo *ii; if (dev->lintr.pin == 0) return; bi = pci_businfo[dev->bus]; assert(bi != NULL); ii = &bi->slotinfo[dev->slot].si_intpins[dev->lintr.pin - 1]; /* * Attempt to allocate an I/O APIC pin for this intpin if one * is not yet assigned. */ if (ii->ii_ioapic_irq == 0) ii->ii_ioapic_irq = ioapic_pci_alloc_irq(dev); assert(ii->ii_ioapic_irq > 0); /* * Attempt to allocate a PIRQ pin for this intpin if one is * not yet assigned. */ if (ii->ii_pirq_pin == 0) ii->ii_pirq_pin = pirq_alloc_pin(dev); assert(ii->ii_pirq_pin > 0); dev->lintr.ioapic_irq = ii->ii_ioapic_irq; dev->lintr.pirq_pin = ii->ii_pirq_pin; pci_set_cfgdata8(dev, PCIR_INTLINE, pirq_irq(ii->ii_pirq_pin)); } /** * @brief Assert INTx pin of virtual PCI device * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * * @return None */ void pci_lintr_assert(struct pci_vdev *dev) { assert(dev->lintr.pin > 0); pthread_mutex_lock(&dev->lintr.lock); if (dev->lintr.state == IDLE) { if (pci_lintr_permitted(dev)) { dev->lintr.state = ASSERTED; pci_irq_assert(dev); } else dev->lintr.state = PENDING; } pthread_mutex_unlock(&dev->lintr.lock); } /** * @brief Deassert INTx pin of virtual PCI device * * @param dev Pointer to struct pci_vdev representing virtual PCI device. * * @return None */ void pci_lintr_deassert(struct pci_vdev *dev) { assert(dev->lintr.pin > 0); pthread_mutex_lock(&dev->lintr.lock); if (dev->lintr.state == ASSERTED) { dev->lintr.state = IDLE; pci_irq_deassert(dev); } else if (dev->lintr.state == PENDING) dev->lintr.state = IDLE; pthread_mutex_unlock(&dev->lintr.lock); } static void pci_lintr_update(struct pci_vdev *dev) { pthread_mutex_lock(&dev->lintr.lock); if (dev->lintr.state == ASSERTED && !pci_lintr_permitted(dev)) { pci_irq_deassert(dev); dev->lintr.state = PENDING; } else if (dev->lintr.state == PENDING && pci_lintr_permitted(dev)) { dev->lintr.state = ASSERTED; pci_irq_assert(dev); } pthread_mutex_unlock(&dev->lintr.lock); } int pci_count_lintr(int bus) { int count, slot, pin; struct slotinfo *slotinfo; count = 0; if (pci_businfo[bus] != NULL) { for (slot = 0; slot < MAXSLOTS; slot++) { slotinfo = &pci_businfo[bus]->slotinfo[slot]; for (pin = 0; pin < 4; pin++) { if (slotinfo->si_intpins[pin].ii_count != 0) count++; } } } return count; } void pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg) { struct businfo *bi; struct slotinfo *si; struct intxinfo *ii; int slot, pin; bi = pci_businfo[bus]; if (bi == NULL) return; for (slot = 0; slot < MAXSLOTS; slot++) { si = &bi->slotinfo[slot]; for (pin = 0; pin < 4; pin++) { ii = &si->si_intpins[pin]; if (ii->ii_count != 0) cb(bus, slot, pin + 1, ii->ii_pirq_pin, ii->ii_ioapic_irq, arg); } } } /* * Return 1 if the emulated device in 'slot' is a multi-function device. * Return 0 otherwise. */ static int pci_emul_is_mfdev(int bus, int slot) { struct businfo *bi; struct slotinfo *si; int f, numfuncs; numfuncs = 0; bi = pci_businfo[bus]; if (bi != NULL) { si = &bi->slotinfo[slot]; for (f = 0; f < MAXFUNCS; f++) { if (si->si_funcs[f].fi_devi != NULL) numfuncs++; } } return (numfuncs > 1); } /* * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on * whether or not is a multi-function being emulated in the pci 'slot'. */ static void pci_emul_hdrtype_fixup(int bus, int slot, int off, int bytes, uint32_t *rv) { int mfdev; if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { mfdev = pci_emul_is_mfdev(bus, slot); switch (bytes) { case 1: case 2: *rv &= ~PCIM_MFDEV; if (mfdev) *rv |= PCIM_MFDEV; break; case 4: *rv &= ~(PCIM_MFDEV << 16); if (mfdev) *rv |= (PCIM_MFDEV << 16); break; } } } static void pci_emul_cmdsts_write(struct pci_vdev *dev, int coff, uint32_t new, int bytes) { int i, rshift; uint32_t cmd, cmd2, changed, old, readonly; cmd = pci_get_cfgdata16(dev, PCIR_COMMAND); /* stash old value */ /* * From PCI Local Bus Specification 3.0 sections 6.2.2 and 6.2.3. * * XXX Bits 8, 11, 12, 13, 14 and 15 in the status register are * 'write 1 to clear'. However these bits are not set to '1' by * any device emulation so it is simpler to treat them as readonly. */ rshift = (coff & 0x3) * 8; readonly = 0xFFFFF880 >> rshift; old = CFGREAD(dev, coff, bytes); new &= ~readonly; new |= (old & readonly); CFGWRITE(dev, coff, new, bytes); /* update config */ cmd2 = pci_get_cfgdata16(dev, PCIR_COMMAND); /* get updated value */ changed = cmd ^ cmd2; /* * If the MMIO or I/O address space decoding has changed then * register/unregister all BARs that decode that address space. */ for (i = 0; i <= PCI_BARMAX; i++) { switch (dev->bar[i].type) { case PCIBAR_NONE: case PCIBAR_MEMHI64: break; case PCIBAR_IO: /* I/O address space decoding changed? */ if (changed & PCIM_CMD_PORTEN) { if (porten(dev)) register_bar(dev, i); else unregister_bar(dev, i); } break; case PCIBAR_MEM32: case PCIBAR_MEM64: /* MMIO address space decoding changed? */ if (changed & PCIM_CMD_MEMEN) { if (memen(dev)) register_bar(dev, i); else unregister_bar(dev, i); } break; default: assert(0); } } /* * If INTx has been unmasked and is pending, assert the * interrupt. */ pci_lintr_update(dev); } static void pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, int coff, int bytes, uint32_t *eax) { struct businfo *bi; struct slotinfo *si; struct pci_vdev *dev; struct pci_vdev_ops *ops; int idx, needcfg; uint64_t addr, bar, mask; bi = pci_businfo[bus]; if (bi != NULL) { si = &bi->slotinfo[slot]; dev = si->si_funcs[func].fi_devi; } else dev = NULL; /* * Just return if there is no device at this slot:func or if the * the guest is doing an un-aligned access. */ if (dev == NULL || (bytes != 1 && bytes != 2 && bytes != 4) || (coff & (bytes - 1)) != 0) { if (in) *eax = 0xffffffff; return; } ops = dev->dev_ops; /* * For non-passthru device, extended config space is NOT supported. * Ignore all writes beyond the standard config space and return all * ones on reads. * * For passthru device, extended config space is supported. * Access to extended config space is implemented via libpciaccess. */ if (strcmp("passthru", ops->class_name)) { if (coff >= PCI_REGMAX + 1) { if (in) { *eax = 0xffffffff; /* * Extended capabilities begin at offset 256 in * config space. * Absence of extended capabilities is signaled * with all 0s in the extended capability header * at offset 256. */ if (coff <= PCI_REGMAX + 4) *eax = 0x00000000; } return; } } /* * Config read */ if (in) { /* Let the device emulation override the default handler */ if (ops->vdev_cfgread != NULL) { needcfg = ops->vdev_cfgread(ctx, vcpu, dev, coff, bytes, eax); } else { needcfg = 1; } if (needcfg) *eax = CFGREAD(dev, coff, bytes); pci_emul_hdrtype_fixup(bus, slot, coff, bytes, eax); } else { /* Let the device emulation override the default handler */ if (ops->vdev_cfgwrite != NULL && (*ops->vdev_cfgwrite)(ctx, vcpu, dev, coff, bytes, *eax) == 0) return; /* * Special handling for write to BAR registers */ if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) { /* * Ignore writes to BAR registers that are not * 4-byte aligned. */ if (bytes != 4 || (coff & 0x3) != 0) return; idx = (coff - PCIR_BAR(0)) / 4; mask = ~(dev->bar[idx].size - 1); switch (dev->bar[idx].type) { case PCIBAR_NONE: dev->bar[idx].addr = bar = 0; break; case PCIBAR_IO: addr = *eax & mask; addr &= 0xffff; bar = addr | PCIM_BAR_IO_SPACE; /* * Register the new BAR value for interception */ if (addr != dev->bar[idx].addr) { update_bar_address(dev, addr, idx, PCIBAR_IO); } break; case PCIBAR_MEM32: addr = bar = *eax & mask; bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; if (addr != dev->bar[idx].addr) { update_bar_address(dev, addr, idx, PCIBAR_MEM32); } break; case PCIBAR_MEM64: addr = bar = *eax & mask; bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | PCIM_BAR_MEM_PREFETCH; if (addr != (uint32_t)dev->bar[idx].addr) { update_bar_address(dev, addr, idx, PCIBAR_MEM64); } break; case PCIBAR_MEMHI64: assert(idx >= 1); mask = ~(dev->bar[idx - 1].size - 1); addr = ((uint64_t)*eax << 32) & mask; bar = addr >> 32; if (bar != dev->bar[idx - 1].addr >> 32) { update_bar_address(dev, addr, idx - 1, PCIBAR_MEMHI64); } break; default: assert(0); } pci_set_cfgdata32(dev, coff, bar); } else if (coff == PCIR_BIOS) { /* ignore ROM BAR length request */ } else if (pci_emul_iscap(dev, coff)) { pci_emul_capwrite(dev, coff, bytes, *eax); } else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) { pci_emul_cmdsts_write(dev, coff, *eax, bytes); } else { CFGWRITE(dev, coff, *eax, bytes); } } } static int cfgenable, cfgbus, cfgslot, cfgfunc, cfgoff; static int pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes, uint32_t *eax, void *arg) { uint32_t x; if (bytes != 4) { if (in) *eax = (bytes == 2) ? 0xffff : 0xff; return 0; } if (in) { x = (cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) | cfgoff; if (cfgenable) x |= CONF1_ENABLE; *eax = x; } else { x = *eax; cfgenable = (x & CONF1_ENABLE) == CONF1_ENABLE; cfgoff = x & PCI_REGMAX; cfgfunc = (x >> 8) & PCI_FUNCMAX; cfgslot = (x >> 11) & PCI_SLOTMAX; cfgbus = (x >> 16) & PCI_BUSMAX; } return 0; } INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_INOUT, pci_emul_cfgaddr); static int pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes, uint32_t *eax, void *arg) { int coff; assert(bytes == 1 || bytes == 2 || bytes == 4); coff = cfgoff + (port - CONF1_DATA_PORT); if (cfgenable) { pci_cfgrw(ctx, vcpu, in, cfgbus, cfgslot, cfgfunc, coff, bytes, eax); } else { /* Ignore accesses to cfgdata if not enabled by cfgaddr */ if (in) *eax = 0xffffffff; } return 0; } INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); int emulate_pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, int reg, int bytes, int *value) { pci_cfgrw(ctx, vcpu, in, bus, slot, func, reg, bytes, (uint32_t *)value); return 0; } #define PCI_EMUL_TEST #ifdef PCI_EMUL_TEST /* * Define a dummy test device */ #define DIOSZ 8 #define DMEMSZ 4096 struct pci_emul_dummy { uint8_t ioregs[DIOSZ]; uint8_t memregs[2][DMEMSZ]; }; #define PCI_EMUL_MSI_MSGS 4 #define PCI_EMUL_MSIX_MSGS 16 static int pci_emul_dinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts) { int error; struct pci_emul_dummy *dummy; dummy = calloc(1, sizeof(struct pci_emul_dummy)); dev->arg = dummy; pci_set_cfgdata16(dev, PCIR_DEVICE, 0x0001); pci_set_cfgdata16(dev, PCIR_VENDOR, 0x10DD); pci_set_cfgdata8(dev, PCIR_CLASS, 0x02); error = pci_emul_add_msicap(dev, PCI_EMUL_MSI_MSGS); assert(error == 0); error = pci_emul_alloc_bar(dev, 0, PCIBAR_IO, DIOSZ); assert(error == 0); error = pci_emul_alloc_bar(dev, 1, PCIBAR_MEM32, DMEMSZ); assert(error == 0); error = pci_emul_alloc_bar(dev, 2, PCIBAR_MEM32, DMEMSZ); assert(error == 0); return 0; } static void pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx, uint64_t offset, int size, uint64_t value) { int i; void *offset_ptr; struct pci_emul_dummy *dummy = dev->arg; if (baridx == 0) { if (offset + size > DIOSZ) { printf("diow: iow too large, offset %ld size %d\n", offset, size); return; } offset_ptr = (void *) &dummy->ioregs[offset]; if (size == 1) *(uint8_t *)offset_ptr = value & 0xff; else if (size == 2) *(uint16_t *)offset_ptr = value & 0xffff; else if (size == 4) *(uint32_t *)offset = value; else printf("diow: iow unknown size %d\n", size); /* * Special magic value to generate an interrupt */ if (offset == 4 && size == 4 && pci_msi_enabled(dev)) pci_generate_msi(dev, value % pci_msi_maxmsgnum(dev)); if (value == 0xabcdef) { for (i = 0; i < pci_msi_maxmsgnum(dev); i++) pci_generate_msi(dev, i); } } if (baridx == 1 || baridx == 2) { if (offset + size > DMEMSZ) { printf("diow: memw too large, offset %ld size %d\n", offset, size); return; } i = baridx - 1; /* 'memregs' index */ offset_ptr = (void *) &dummy->memregs[i][offset]; if (size == 1) *(uint8_t *)offset_ptr = value; else if (size == 2) *(uint16_t *)offset_ptr = value; else if (size == 4) *(uint32_t *)offset_ptr = value; else if (size == 8) *(uint64_t *)offset_ptr = value; else printf("diow: memw unknown size %d\n", size); /* * magic interrupt ?? */ } if (baridx > 2 || baridx < 0) printf("diow: unknown bar idx %d\n", baridx); } static uint64_t pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx, uint64_t offset, int size) { struct pci_emul_dummy *dummy = dev->arg; uint32_t value = 0; int i; void *offset_ptr; if (baridx == 0) { if (offset + size > DIOSZ) { printf("dior: ior too large, offset %ld size %d\n", offset, size); return 0; } value = 0; offset_ptr = (void *) &dummy->ioregs[offset]; if (size == 1) value = *(uint8_t *)offset_ptr; else if (size == 2) value = *(uint16_t *)offset_ptr; else if (size == 4) value = *(uint32_t *)offset_ptr; else printf("dior: ior unknown size %d\n", size); } if (baridx == 1 || baridx == 2) { if (offset + size > DMEMSZ) { printf("dior: memr too large, offset %ld size %d\n", offset, size); return 0; } i = baridx - 1; /* 'memregs' index */ offset_ptr = (void *) &dummy->memregs[i][offset]; if (size == 1) value = *(uint8_t *)offset_ptr; else if (size == 2) value = *(uint16_t *)offset_ptr; else if (size == 4) value = *(uint32_t *)offset_ptr; else if (size == 8) value = *(uint64_t *)offset_ptr; else printf("dior: ior unknown size %d\n", size); } if (baridx > 2 || baridx < 0) { printf("dior: unknown bar idx %d\n", baridx); return 0; } return value; } struct pci_vdev_ops pci_dummy = { .class_name = "dummy", .vdev_init = pci_emul_dinit, .vdev_barwrite = pci_emul_diow, .vdev_barread = pci_emul_dior }; DEFINE_PCI_DEVTYPE(pci_dummy); #endif /* PCI_EMUL_TEST */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1219_0
crossvul-cpp_data_bad_1770_0
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "trace.h" #include <stdio.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <sys/stat.h> #include <fcntl.h> #include <time.h> #include <libgen.h> #include <assert.h> #include <sys/utsname.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/socket.h> #include <sys/select.h> #include <sys/time.h> #include <sys/ioctl.h> #include <arpa/inet.h> #include <netinet/if_ether.h> #include <pwd.h> #include <grp.h> static void usage(void); static struct protocol protos[] = { { LLDPD_MODE_LLDP, 1, "LLDP", 'l', lldp_send, lldp_decode, NULL, LLDP_MULTICAST_ADDR }, #ifdef ENABLE_CDP { LLDPD_MODE_CDPV1, 0, "CDPv1", 'c', cdpv1_send, cdp_decode, cdpv1_guess, CDP_MULTICAST_ADDR }, { LLDPD_MODE_CDPV2, 0, "CDPv2", 'c', cdpv2_send, cdp_decode, cdpv2_guess, CDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_SONMP { LLDPD_MODE_SONMP, 0, "SONMP", 's', sonmp_send, sonmp_decode, NULL, SONMP_MULTICAST_ADDR }, #endif #ifdef ENABLE_EDP { LLDPD_MODE_EDP, 0, "EDP", 'e', edp_send, edp_decode, NULL, EDP_MULTICAST_ADDR }, #endif #ifdef ENABLE_FDP { LLDPD_MODE_FDP, 0, "FDP", 'f', fdp_send, cdp_decode, NULL, FDP_MULTICAST_ADDR }, #endif { 0, 0, "any", ' ', NULL, NULL, NULL, {0,0,0,0,0,0} } }; static char **saved_argv; #ifdef HAVE___PROGNAME extern const char *__progname; #else # define __progname "lldpd" #endif static void usage(void) { fprintf(stderr, "Usage: %s [OPTIONS ...]\n", __progname); fprintf(stderr, "Version: %s\n", PACKAGE_STRING); fprintf(stderr, "\n"); fprintf(stderr, "-d Do not daemonize.\n"); fprintf(stderr, "-r Receive-only mode\n"); fprintf(stderr, "-i Disable LLDP-MED inventory TLV transmission.\n"); fprintf(stderr, "-k Disable advertising of kernel release, version, machine.\n"); fprintf(stderr, "-S descr Override the default system description.\n"); fprintf(stderr, "-P name Override the default hardware platform.\n"); fprintf(stderr, "-m IP Specify the IPv4 management addresses of this system.\n"); fprintf(stderr, "-u file Specify the Unix-domain socket used for communication with lldpctl(8).\n"); fprintf(stderr, "-H mode Specify the behaviour when detecting multiple neighbors.\n"); fprintf(stderr, "-I iface Limit interfaces to use.\n"); #ifdef ENABLE_LLDPMED fprintf(stderr, "-M class Enable emission of LLDP-MED frame. 'class' should be one of:\n"); fprintf(stderr, " 1 Generic Endpoint (Class I)\n"); fprintf(stderr, " 2 Media Endpoint (Class II)\n"); fprintf(stderr, " 3 Communication Device Endpoints (Class III)\n"); fprintf(stderr, " 4 Network Connectivity Device\n"); #endif #ifdef USE_SNMP fprintf(stderr, "-x Enable SNMP subagent.\n"); #endif fprintf(stderr, "\n"); #if defined ENABLE_CDP || defined ENABLE_EDP || defined ENABLE_FDP || defined ENABLE_SONMP fprintf(stderr, "Additional protocol support.\n"); #ifdef ENABLE_CDP fprintf(stderr, "-c Enable the support of CDP protocol. (Cisco)\n"); #endif #ifdef ENABLE_EDP fprintf(stderr, "-e Enable the support of EDP protocol. (Extreme)\n"); #endif #ifdef ENABLE_FDP fprintf(stderr, "-f Enable the support of FDP protocol. (Foundry)\n"); #endif #ifdef ENABLE_SONMP fprintf(stderr, "-s Enable the support of SONMP protocol. (Nortel)\n"); #endif fprintf(stderr, "\n"); #endif fprintf(stderr, "see manual page lldpd(8) for more information\n"); exit(1); } struct lldpd_hardware * lldpd_get_hardware(struct lldpd *cfg, char *name, int index, struct lldpd_ops *ops) { struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if ((strcmp(hardware->h_ifname, name) == 0) && (hardware->h_ifindex == index) && ((!ops) || (ops == hardware->h_ops))) break; } return hardware; } struct lldpd_hardware * lldpd_alloc_hardware(struct lldpd *cfg, char *name, int index) { struct lldpd_hardware *hardware; log_debug("alloc", "allocate a new local port (%s)", name); if ((hardware = (struct lldpd_hardware *) calloc(1, sizeof(struct lldpd_hardware))) == NULL) return NULL; hardware->h_cfg = cfg; strlcpy(hardware->h_ifname, name, sizeof(hardware->h_ifname)); hardware->h_ifindex = index; hardware->h_lport.p_chassis = LOCAL_CHASSIS(cfg); hardware->h_lport.p_chassis->c_refcount++; TAILQ_INIT(&hardware->h_rports); #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_med_cap_available) { hardware->h_lport.p_med_cap_enabled = LLDP_MED_CAP_CAP; if (!cfg->g_config.c_noinventory) hardware->h_lport.p_med_cap_enabled |= LLDP_MED_CAP_IV; } #endif #ifdef ENABLE_DOT1 TAILQ_INIT(&hardware->h_lport.p_vlans); TAILQ_INIT(&hardware->h_lport.p_ppvids); TAILQ_INIT(&hardware->h_lport.p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&hardware->h_lport.p_custom_list); #endif levent_hardware_init(hardware); return hardware; } struct lldpd_mgmt * lldpd_alloc_mgmt(int family, void *addrptr, size_t addrsize, u_int32_t iface) { struct lldpd_mgmt *mgmt; log_debug("alloc", "allocate a new management address (family: %d)", family); if (family <= LLDPD_AF_UNSPEC || family >= LLDPD_AF_LAST) { errno = EAFNOSUPPORT; return NULL; } if (addrsize > LLDPD_MGMT_MAXADDRSIZE) { errno = EOVERFLOW; return NULL; } mgmt = calloc(1, sizeof(struct lldpd_mgmt)); if (mgmt == NULL) { errno = ENOMEM; return NULL; } mgmt->m_family = family; assert(addrsize <= LLDPD_MGMT_MAXADDRSIZE); memcpy(&mgmt->m_addr, addrptr, addrsize); mgmt->m_addrsize = addrsize; mgmt->m_iface = iface; return mgmt; } void lldpd_hardware_cleanup(struct lldpd *cfg, struct lldpd_hardware *hardware) { log_debug("alloc", "cleanup hardware port %s", hardware->h_ifname); free(hardware->h_lport_previous); free(hardware->h_lchassis_previous_id); free(hardware->h_lport_previous_id); lldpd_port_cleanup(&hardware->h_lport, 1); if (hardware->h_ops && hardware->h_ops->cleanup) hardware->h_ops->cleanup(cfg, hardware); levent_hardware_release(hardware); free(hardware); } static void lldpd_display_neighbors(struct lldpd *cfg) { if (!cfg->g_config.c_set_ifdescr) return; struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { struct lldpd_port *port; char *description; const char *neighbor = NULL; unsigned neighbors = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (SMART_HIDDEN(port)) continue; neighbors++; neighbor = port->p_chassis->c_name; } if (neighbors == 0) priv_iface_description(hardware->h_ifname, ""); else if (neighbors == 1 && neighbor && *neighbor != '\0') { if (asprintf(&description, "%s", neighbor) != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } else { if (asprintf(&description, "%d neighbor%s", neighbors, (neighbors > 1)?"s":"") != -1) { priv_iface_description(hardware->h_ifname, description); free(description); } } } } static void lldpd_count_neighbors(struct lldpd *cfg) { #if HAVE_SETPROCTITLE struct lldpd_chassis *chassis; const char *neighbor; unsigned neighbors = 0; TAILQ_FOREACH(chassis, &cfg->g_chassis, c_entries) { neighbors++; neighbor = chassis->c_name; } neighbors--; if (neighbors == 0) setproctitle("no neighbor"); else if (neighbors == 1 && neighbor && *neighbor != '\0') setproctitle("connected to %s", neighbor); else setproctitle("%d neighbor%s", neighbors, (neighbors > 1)?"s":""); #endif lldpd_display_neighbors(cfg); } static void notify_clients_deletion(struct lldpd_hardware *hardware, struct lldpd_port *rport) { TRACE(LLDPD_NEIGHBOR_DELETE(hardware->h_ifname, rport->p_chassis->c_name, rport->p_descr)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_DELETED, rport); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_DELETED, rport); #endif } static void lldpd_reset_timer(struct lldpd *cfg) { /* Reset timer for ports that have been changed. */ struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { /* We keep a flat copy of the local port to see if there is any * change. To do this, we zero out fields that are not * significant, marshal the port, then restore. */ struct lldpd_port *port = &hardware->h_lport; u_int8_t *output = NULL; ssize_t output_len; char save[LLDPD_PORT_START_MARKER]; memcpy(save, port, sizeof(save)); /* coverity[suspicious_sizeof] We intentionally partially memset port */ memset(port, 0, sizeof(save)); output_len = lldpd_port_serialize(port, (void**)&output); memcpy(port, save, sizeof(save)); if (output_len == -1) { log_warnx("localchassis", "unable to serialize local port %s to check for differences", hardware->h_ifname); continue; } /* Compare with the previous value */ if (hardware->h_lport_previous && output_len == hardware->h_lport_previous_len && !memcmp(output, hardware->h_lport_previous, output_len)) { log_debug("localchassis", "no change detected for port %s", hardware->h_ifname); } else { log_debug("localchassis", "change detected for port %s, resetting its timer", hardware->h_ifname); levent_schedule_pdu(hardware); } /* Update the value */ free(hardware->h_lport_previous); hardware->h_lport_previous = output; hardware->h_lport_previous_len = output_len; } } void lldpd_cleanup(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; struct lldpd_chassis *chassis, *chassis_next; log_debug("localchassis", "cleanup all ports"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); if (!hardware->h_flags) { TRACE(LLDPD_INTERFACES_DELETE(hardware->h_ifname)); TAILQ_REMOVE(&cfg->g_hardware, hardware, h_entries); lldpd_remote_cleanup(hardware, notify_clients_deletion, 1); lldpd_hardware_cleanup(cfg, hardware); } else lldpd_remote_cleanup(hardware, notify_clients_deletion, 0); } log_debug("localchassis", "cleanup all chassis"); for (chassis = TAILQ_FIRST(&cfg->g_chassis); chassis; chassis = chassis_next) { chassis_next = TAILQ_NEXT(chassis, c_entries); if (chassis->c_refcount == 0) { TAILQ_REMOVE(&cfg->g_chassis, chassis, c_entries); lldpd_chassis_cleanup(chassis, 1); } } lldpd_count_neighbors(cfg); levent_schedule_cleanup(cfg); } /* Update chassis `ochassis' with values from `chassis'. The later one is not expected to be part of a list! It will also be wiped from memory. */ static void lldpd_move_chassis(struct lldpd_chassis *ochassis, struct lldpd_chassis *chassis) { struct lldpd_mgmt *mgmt, *mgmt_next; /* We want to keep refcount, index and list stuff from the current * chassis */ TAILQ_ENTRY(lldpd_chassis) entries; int refcount = ochassis->c_refcount; int index = ochassis->c_index; memcpy(&entries, &ochassis->c_entries, sizeof(entries)); lldpd_chassis_cleanup(ochassis, 0); /* Make the copy. */ /* WARNING: this is a kludgy hack, we need in-place copy and cannot use * marshaling. */ memcpy(ochassis, chassis, sizeof(struct lldpd_chassis)); TAILQ_INIT(&ochassis->c_mgmt); /* Copy of management addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); TAILQ_INSERT_TAIL(&ochassis->c_mgmt, mgmt, m_entries); } /* Restore saved values */ ochassis->c_refcount = refcount; ochassis->c_index = index; memcpy(&ochassis->c_entries, &entries, sizeof(entries)); /* Get rid of the new chassis */ free(chassis); } static int lldpd_guess_type(struct lldpd *cfg, char *frame, int s) { int i; if (s < ETHER_ADDR_LEN) return -1; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].guess == NULL) { if (memcmp(frame, cfg->g_protocols[i].mac, ETHER_ADDR_LEN) == 0) { log_debug("decode", "guessed protocol is %s (from MAC address)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } else { if (cfg->g_protocols[i].guess(frame, s)) { log_debug("decode", "guessed protocol is %s (from detector function)", cfg->g_protocols[i].name); return cfg->g_protocols[i].mode; } } } return -1; } static void lldpd_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware) { int i; struct lldpd_chassis *chassis, *ochassis = NULL; struct lldpd_port *port, *oport = NULL, *aport; int guess = LLDPD_MODE_LLDP; log_debug("decode", "decode a received frame on %s", hardware->h_ifname); if (s < sizeof(struct ether_header) + 4) /* Too short, just discard it */ return; /* Decapsulate VLAN frames */ struct ether_header eheader; memcpy(&eheader, frame, sizeof(struct ether_header)); if (eheader.ether_type == htons(ETHERTYPE_VLAN)) { /* VLAN decapsulation means to shift 4 bytes left the frame from * offset 2*ETHER_ADDR_LEN */ memmove(frame + 2*ETHER_ADDR_LEN, frame + 2*ETHER_ADDR_LEN + 4, s - 2*ETHER_ADDR_LEN); s -= 4; } TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if ((oport->p_lastframe != NULL) && (oport->p_lastframe->size == s) && (memcmp(oport->p_lastframe->frame, frame, s) == 0)) { /* Already received the same frame */ log_debug("decode", "duplicate frame, no need to decode"); oport->p_lastupdate = time(NULL); return; } } guess = lldpd_guess_type(cfg, frame, s); for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; if (cfg->g_protocols[i].mode == guess) { log_debug("decode", "using decode function for %s protocol", cfg->g_protocols[i].name); if (cfg->g_protocols[i].decode(cfg, frame, s, hardware, &chassis, &port) == -1) { log_debug("decode", "function for %s protocol did not decode this frame", cfg->g_protocols[i].name); return; } chassis->c_protocol = port->p_protocol = cfg->g_protocols[i].mode; break; } } if (cfg->g_protocols[i].mode == 0) { log_debug("decode", "unable to guess frame type on %s", hardware->h_ifname); return; } TRACE(LLDPD_FRAME_DECODED( hardware->h_ifname, cfg->g_protocols[i].name, chassis->c_name, port->p_descr)); /* Do we already have the same MSAP somewhere? */ int count = 0; log_debug("decode", "search for the same MSAP"); TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (port->p_protocol == oport->p_protocol) { count++; if ((port->p_id_subtype == oport->p_id_subtype) && (port->p_id_len == oport->p_id_len) && (memcmp(port->p_id, oport->p_id, port->p_id_len) == 0) && (chassis->c_id_subtype == oport->p_chassis->c_id_subtype) && (chassis->c_id_len == oport->p_chassis->c_id_len) && (memcmp(chassis->c_id, oport->p_chassis->c_id, chassis->c_id_len) == 0)) { ochassis = oport->p_chassis; log_debug("decode", "MSAP is already known"); break; } } } /* Do we have room for a new MSAP? */ if (!oport && cfg->g_config.c_max_neighbors) { if (count == (cfg->g_config.c_max_neighbors - 1)) { log_debug("decode", "max neighbors %d reached for port %s, " "dropping any new ones silently", cfg->g_config.c_max_neighbors, hardware->h_ifname); } else if (count > cfg->g_config.c_max_neighbors - 1) { log_debug("decode", "too many neighbors for port %s, drop this new one", hardware->h_ifname); lldpd_port_cleanup(port, 1); lldpd_chassis_cleanup(chassis, 1); free(port); return; } } /* No, but do we already know the system? */ if (!oport) { log_debug("decode", "MSAP is unknown, search for the chassis"); TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) { if ((chassis->c_protocol == ochassis->c_protocol) && (chassis->c_id_subtype == ochassis->c_id_subtype) && (chassis->c_id_len == ochassis->c_id_len) && (memcmp(chassis->c_id, ochassis->c_id, chassis->c_id_len) == 0)) break; } } if (oport) { /* The port is known, remove it before adding it back */ TAILQ_REMOVE(&hardware->h_rports, oport, p_entries); lldpd_port_cleanup(oport, 1); free(oport); } if (ochassis) { lldpd_move_chassis(ochassis, chassis); chassis = ochassis; } else { /* Chassis not known, add it */ log_debug("decode", "unknown chassis, add it to the list"); chassis->c_index = ++cfg->g_lastrid; chassis->c_refcount = 0; TAILQ_INSERT_TAIL(&cfg->g_chassis, chassis, c_entries); i = 0; TAILQ_FOREACH(ochassis, &cfg->g_chassis, c_entries) i++; log_debug("decode", "%d different systems are known", i); } /* Add port */ port->p_lastchange = port->p_lastupdate = time(NULL); if ((port->p_lastframe = (struct lldpd_frame *)malloc(s + sizeof(struct lldpd_frame))) != NULL) { port->p_lastframe->size = s; memcpy(port->p_lastframe->frame, frame, s); } TAILQ_INSERT_TAIL(&hardware->h_rports, port, p_entries); port->p_chassis = chassis; port->p_chassis->c_refcount++; /* Several cases are possible : 1. chassis is new, its refcount was 0. It is now attached to this port, its refcount is 1. 2. chassis already exists and was attached to another port, we increase its refcount accordingly. 3. chassis already exists and was attached to the same port, its refcount was decreased with lldpd_port_cleanup() and is now increased again. In all cases, if the port already existed, it has been freed with lldpd_port_cleanup() and therefore, the refcount of the chassis that was attached to it is decreased. */ /* coverity[use_after_free] TAILQ_REMOVE does the right thing */ i = 0; TAILQ_FOREACH(aport, &hardware->h_rports, p_entries) i++; log_debug("decode", "%d neighbors for %s", i, hardware->h_ifname); if (!oport) hardware->h_insert_cnt++; /* Notify */ log_debug("decode", "send notifications for changes on %s", hardware->h_ifname); if (oport) { TRACE(LLDPD_NEIGHBOR_UPDATE(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_UPDATED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_UPDATED, port); #endif } else { TRACE(LLDPD_NEIGHBOR_NEW(hardware->h_ifname, chassis->c_name, port->p_descr, i)); levent_ctl_notify(hardware->h_ifname, NEIGHBOR_CHANGE_ADDED, port); #ifdef USE_SNMP agent_notify(hardware, NEIGHBOR_CHANGE_ADDED, port); #endif } #ifdef ENABLE_LLDPMED if (!oport && port->p_chassis->c_med_type) { /* New neighbor, fast start */ if (hardware->h_cfg->g_config.c_enable_fast_start && !hardware->h_tx_fast) { log_debug("decode", "%s: entering fast start due to " "new neighbor", hardware->h_ifname); hardware->h_tx_fast = hardware->h_cfg->g_config.c_tx_fast_init; } levent_schedule_pdu(hardware); } #endif return; } /* Get the output of lsb_release -s -d. This is a slow function. It should be called once. It return NULL if any problem happens. Otherwise, this is a statically allocated buffer. The result includes the trailing \n */ static char * lldpd_get_lsb_release() { static char release[1024]; char *const command[] = { "lsb_release", "-s", "-d", NULL }; int pid, status, devnull, count; int pipefd[2]; log_debug("localchassis", "grab LSB release"); if (pipe(pipefd)) { log_warn("localchassis", "unable to get a pair of pipes"); return NULL; } pid = vfork(); switch (pid) { case -1: log_warn("localchassis", "unable to fork"); return NULL; case 0: /* Child, exec lsb_release */ close(pipefd[0]); if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDERR_FILENO); dup2(pipefd[1], STDOUT_FILENO); if (devnull > 2) close(devnull); if (pipefd[1] > 2) close(pipefd[1]); execvp("lsb_release", command); } _exit(127); break; default: /* Father, read the output from the children */ close(pipefd[1]); count = 0; do { status = read(pipefd[0], release+count, sizeof(release)-count); if ((status == -1) && (errno == EINTR)) continue; if (status > 0) count += status; } while (count < sizeof(release) && (status > 0)); if (status < 0) { log_info("localchassis", "unable to read from lsb_release"); close(pipefd[0]); waitpid(pid, &status, 0); return NULL; } close(pipefd[0]); if (count >= sizeof(release)) { log_info("localchassis", "output of lsb_release is too large"); waitpid(pid, &status, 0); return NULL; } status = -1; if (waitpid(pid, &status, 0) != pid) return NULL; if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) { log_info("localchassis", "lsb_release information not available"); return NULL; } if (!count) { log_info("localchassis", "lsb_release returned an empty string"); return NULL; } release[count] = '\0'; return release; } /* Should not be here */ return NULL; } /* Same like lldpd_get_lsb_release but reads /etc/os-release for PRETTY_NAME=. */ static char * lldpd_get_os_release() { static char release[1024]; char line[1024]; char *key, *val; char *ptr1 = release; log_debug("localchassis", "grab OS release"); FILE *fp = fopen("/etc/os-release", "r"); if (!fp) { log_debug("localchassis", "could not open /etc/os-release"); fp = fopen("/usr/lib/os-release", "r"); } if (!fp) { log_info("localchassis", "could not open either /etc/os-release or /usr/lib/os-release"); return NULL; } while ((fgets(line, sizeof(line), fp) != NULL)) { key = strtok(line, "="); val = strtok(NULL, "="); if (strncmp(key, "PRETTY_NAME", sizeof(line)) == 0) { strlcpy(release, val, sizeof(line)); break; } } fclose(fp); /* Remove trailing newline and all " in the string. */ ptr1 = release + strlen(release) - 1; while (ptr1 != release && ((*ptr1 == '"') || (*ptr1 == '\n'))) { *ptr1 = '\0'; ptr1--; } if (release[0] == '"') return release+1; return release; } static void lldpd_hide_ports(struct lldpd *cfg, struct lldpd_hardware *hardware, int mask) { struct lldpd_port *port; int protocols[LLDPD_MODE_MAX+1]; char buffer[256]; int i, j, k, found; unsigned int min; log_debug("smartfilter", "apply smart filter for port %s", hardware->h_ifname); /* Compute the number of occurrences of each protocol */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) protocols[port->p_protocol]++; /* Turn the protocols[] array into an array of enabled/disabled protocols. 1 means enabled, 0 means disabled. */ min = (unsigned int)-1; for (i = 0; i <= LLDPD_MODE_MAX; i++) if (protocols[i] && (protocols[i] < min)) min = protocols[i]; found = 0; for (i = 0; i <= LLDPD_MODE_MAX; i++) if ((protocols[i] == min) && !found) { /* If we need a tie breaker, we take the first protocol only */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_PROTO | SMART_INCOMING_ONE_PROTO)) found = 1; protocols[i] = 1; } else protocols[i] = 0; /* We set the p_hidden flag to 1 if the protocol is disabled */ TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) port->p_hidden_out = protocols[port->p_protocol]?0:1; else port->p_hidden_in = protocols[port->p_protocol]?0:1; } /* If we want only one neighbor, we take the first one */ if (cfg->g_config.c_smart & mask & (SMART_OUTGOING_ONE_NEIGH | SMART_INCOMING_ONE_NEIGH)) { found = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (mask == SMART_OUTGOING) { if (found) port->p_hidden_out = 1; if (!port->p_hidden_out) found = 1; } if (mask == SMART_INCOMING) { if (found) port->p_hidden_in = 1; if (!port->p_hidden_in) found = 1; } } } /* Print a debug message summarizing the operation */ for (i = 0; i <= LLDPD_MODE_MAX; i++) protocols[i] = 0; k = j = 0; TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { if (!(((mask == SMART_OUTGOING) && port->p_hidden_out) || ((mask == SMART_INCOMING) && port->p_hidden_in))) { k++; protocols[port->p_protocol] = 1; } j++; } buffer[0] = '\0'; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (cfg->g_protocols[i].enabled && protocols[cfg->g_protocols[i].mode]) { if (strlen(buffer) + strlen(cfg->g_protocols[i].name) + 3 > sizeof(buffer)) { /* Unlikely, our buffer is too small */ memcpy(buffer + sizeof(buffer) - 4, "...", 4); break; } if (buffer[0]) strncat(buffer, ", ", 2); strncat(buffer, cfg->g_protocols[i].name, strlen(cfg->g_protocols[i].name)); } } log_debug("smartfilter", "%s: %s: %d visible neighbors (out of %d)", hardware->h_ifname, (mask == SMART_OUTGOING)?"out filter":"in filter", k, j); log_debug("smartfilter", "%s: protocols: %s", hardware->h_ifname, buffer[0]?buffer:"(none)"); } /* Hide unwanted ports depending on smart mode set by the user */ static void lldpd_hide_all(struct lldpd *cfg) { struct lldpd_hardware *hardware; if (!cfg->g_config.c_smart) return; log_debug("smartfilter", "apply smart filter results on all ports"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) { if (cfg->g_config.c_smart & SMART_INCOMING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_INCOMING); if (cfg->g_config.c_smart & SMART_OUTGOING_FILTER) lldpd_hide_ports(cfg, hardware, SMART_OUTGOING); } } void lldpd_recv(struct lldpd *cfg, struct lldpd_hardware *hardware, int fd) { char *buffer = NULL; int n; log_debug("receive", "receive a frame on %s", hardware->h_ifname); if ((buffer = (char *)malloc(hardware->h_mtu)) == NULL) { log_warn("receive", "failed to alloc reception buffer"); return; } if ((n = hardware->h_ops->recv(cfg, hardware, fd, buffer, hardware->h_mtu)) == -1) { log_debug("receive", "discard frame received on %s", hardware->h_ifname); free(buffer); return; } if (cfg->g_config.c_paused) { log_debug("receive", "paused, ignore the frame on %s", hardware->h_ifname); free(buffer); return; } hardware->h_rx_cnt++; log_debug("receive", "decode received frame on %s", hardware->h_ifname); TRACE(LLDPD_FRAME_RECEIVED(hardware->h_ifname, buffer, (size_t)n)); lldpd_decode(cfg, buffer, n, hardware); lldpd_hide_all(cfg); /* Immediatly hide */ lldpd_count_neighbors(cfg); free(buffer); } static void lldpd_send_shutdown(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; /* It's safe to call `lldp_send_shutdown()` because shutdown LLDPU will * only be emitted if LLDP was sent on that port. */ if (lldp_send_shutdown(hardware->h_cfg, hardware) != 0) log_warnx("send", "unable to send shutdown LLDPDU on %s", hardware->h_ifname); } void lldpd_send(struct lldpd_hardware *hardware) { struct lldpd *cfg = hardware->h_cfg; struct lldpd_port *port; int i, sent; if (cfg->g_config.c_receiveonly || cfg->g_config.c_paused) return; if ((hardware->h_flags & IFF_RUNNING) == 0) return; log_debug("send", "send PDU on %s", hardware->h_ifname); sent = 0; for (i=0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; /* We send only if we have at least one remote system * speaking this protocol or if the protocol is forced */ if (cfg->g_protocols[i].enabled > 1) { cfg->g_protocols[i].send(cfg, hardware); sent++; continue; } TAILQ_FOREACH(port, &hardware->h_rports, p_entries) { /* If this remote port is disabled, we don't * consider it */ if (port->p_hidden_out) continue; if (port->p_protocol == cfg->g_protocols[i].mode) { TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "send PDU on %s with protocol %s", hardware->h_ifname, cfg->g_protocols[i].name); cfg->g_protocols[i].send(cfg, hardware); sent++; break; } } } if (!sent) { /* Nothing was sent for this port, let's speak the first * available protocol. */ for (i = 0; cfg->g_protocols[i].mode != 0; i++) { if (!cfg->g_protocols[i].enabled) continue; TRACE(LLDPD_FRAME_SEND(hardware->h_ifname, cfg->g_protocols[i].name)); log_debug("send", "fallback to protocol %s for %s", cfg->g_protocols[i].name, hardware->h_ifname); cfg->g_protocols[i].send(cfg, hardware); break; } if (cfg->g_protocols[i].mode == 0) log_warnx("send", "no protocol enabled, dunno what to send"); } } #ifdef ENABLE_LLDPMED static void lldpd_med(struct lldpd_chassis *chassis) { static short int once = 0; if (!once) { chassis->c_med_hw = dmi_hw(); chassis->c_med_fw = dmi_fw(); chassis->c_med_sn = dmi_sn(); chassis->c_med_manuf = dmi_manuf(); chassis->c_med_model = dmi_model(); chassis->c_med_asset = dmi_asset(); once = 1; } } #endif static int lldpd_routing_enabled(struct lldpd *cfg) { int routing; if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_ROUTER) == 0) return 0; if ((routing = interfaces_routing_enabled(cfg)) == -1) { log_debug("localchassis", "unable to check if routing is enabled"); return 0; } return routing; } static void lldpd_update_localchassis(struct lldpd *cfg) { struct utsname un; char *hp; log_debug("localchassis", "update information for local chassis"); assert(LOCAL_CHASSIS(cfg) != NULL); /* Set system name and description */ if (uname(&un) < 0) fatal("localchassis", "failed to get system information"); if (cfg->g_config.c_hostname) { log_debug("localchassis", "use overridden system name `%s`", cfg->g_config.c_hostname); hp = cfg->g_config.c_hostname; } else { if ((hp = priv_gethostname()) == NULL) fatal("localchassis", "failed to get system name"); } free(LOCAL_CHASSIS(cfg)->c_name); free(LOCAL_CHASSIS(cfg)->c_descr); if ((LOCAL_CHASSIS(cfg)->c_name = strdup(hp)) == NULL) fatal("localchassis", NULL); if (cfg->g_config.c_description) { log_debug("localchassis", "use overridden description `%s`", cfg->g_config.c_description); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_config.c_description) == -1) fatal("localchassis", "failed to set full system description"); } else { if (cfg->g_config.c_advertise_version) { log_debug("localchassis", "advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s %s %s %s %s", cfg->g_lsb_release?cfg->g_lsb_release:"", un.sysname, un.release, un.version, un.machine) == -1) fatal("localchassis", "failed to set full system description"); } else { log_debug("localchassis", "do not advertise system version"); if (asprintf(&LOCAL_CHASSIS(cfg)->c_descr, "%s", cfg->g_lsb_release?cfg->g_lsb_release:un.sysname) == -1) fatal("localchassis", "failed to set minimal system description"); } } if (cfg->g_config.c_platform == NULL) cfg->g_config.c_platform = strdup(un.sysname); /* Check routing */ if (lldpd_routing_enabled(cfg)) { log_debug("localchassis", "routing is enabled, enable router capability"); LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_ROUTER; } else LOCAL_CHASSIS(cfg)->c_cap_enabled &= ~LLDP_CAP_ROUTER; #ifdef ENABLE_LLDPMED if (LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_TELEPHONE) LOCAL_CHASSIS(cfg)->c_cap_enabled |= LLDP_CAP_TELEPHONE; lldpd_med(LOCAL_CHASSIS(cfg)); free(LOCAL_CHASSIS(cfg)->c_med_sw); if (cfg->g_config.c_advertise_version) LOCAL_CHASSIS(cfg)->c_med_sw = strdup(un.release); else LOCAL_CHASSIS(cfg)->c_med_sw = strdup("Unknown"); #endif if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_STATION) && (LOCAL_CHASSIS(cfg)->c_cap_enabled == 0)) LOCAL_CHASSIS(cfg)->c_cap_enabled = LLDP_CAP_STATION; /* Set chassis ID if needed. This is only done if chassis ID has not been set previously (with the MAC address of an interface for example) */ if (LOCAL_CHASSIS(cfg)->c_id == NULL) { log_debug("localchassis", "no chassis ID is currently set, use chassis name"); if (!(LOCAL_CHASSIS(cfg)->c_id = strdup(LOCAL_CHASSIS(cfg)->c_name))) fatal("localchassis", NULL); LOCAL_CHASSIS(cfg)->c_id_len = strlen(LOCAL_CHASSIS(cfg)->c_name); LOCAL_CHASSIS(cfg)->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LOCAL; } } void lldpd_update_localports(struct lldpd *cfg) { struct lldpd_hardware *hardware; log_debug("localchassis", "update information for local ports"); /* h_flags is set to 0 for each port. If the port is updated, h_flags * will be set to a non-zero value. This will allow us to clean up any * non up-to-date port */ TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) hardware->h_flags = 0; TRACE(LLDPD_INTERFACES_UPDATE()); interfaces_update(cfg); lldpd_cleanup(cfg); lldpd_reset_timer(cfg); } void lldpd_loop(struct lldpd *cfg) { /* Main loop. 1. Update local ports information 2. Update local chassis information */ log_debug("loop", "start new loop"); LOCAL_CHASSIS(cfg)->c_cap_enabled = 0; /* Information for local ports is triggered even when it is possible to * update them on some other event because we want to refresh them if we * missed something. */ log_debug("loop", "update information for local ports"); lldpd_update_localports(cfg); log_debug("loop", "update information for local chassis"); lldpd_update_localchassis(cfg); lldpd_count_neighbors(cfg); } static void lldpd_exit(struct lldpd *cfg) { struct lldpd_hardware *hardware, *hardware_next; log_debug("main", "exit lldpd"); TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) lldpd_send_shutdown(hardware); close(cfg->g_ctl); priv_ctl_cleanup(cfg->g_ctlname); log_debug("main", "cleanup hardware information"); for (hardware = TAILQ_FIRST(&cfg->g_hardware); hardware != NULL; hardware = hardware_next) { hardware_next = TAILQ_NEXT(hardware, h_entries); log_debug("main", "cleanup interface %s", hardware->h_ifname); lldpd_remote_cleanup(hardware, NULL, 1); lldpd_hardware_cleanup(cfg, hardware); } } /** * Run lldpcli to configure lldpd. * * @return PID of running lldpcli or -1 if error. */ static pid_t lldpd_configure(int debug, const char *path, const char *ctlname) { pid_t lldpcli = vfork(); int devnull; char sdebug[debug + 3]; memset(sdebug, 'd', debug + 3); sdebug[debug + 2] = '\0'; sdebug[0] = '-'; sdebug[1] = 's'; log_debug("main", "invoke %s %s", path, sdebug); switch (lldpcli) { case -1: log_warn("main", "unable to fork"); return -1; case 0: /* Child, exec lldpcli */ if ((devnull = open("/dev/null", O_RDWR, 0)) != -1) { dup2(devnull, STDIN_FILENO); dup2(devnull, STDOUT_FILENO); if (devnull > 2) close(devnull); execl(path, "lldpcli", sdebug, "-u", ctlname, "-c", SYSCONFDIR "/lldpd.conf", "-c", SYSCONFDIR "/lldpd.d", "resume", (char *)NULL); log_warn("main", "unable to execute %s", path); log_warnx("main", "configuration is incomplete, lldpd needs to be unpaused"); } _exit(127); break; default: /* Father, don't do anything stupid */ return lldpcli; } /* Should not be here */ return -1; } struct intint { int a; int b; }; static const struct intint filters[] = { { 0, 0 }, { 1, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 2, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO }, { 3, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 4, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER }, { 5, SMART_INCOMING_FILTER }, { 6, SMART_OUTGOING_FILTER }, { 7, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 8, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH }, { 9, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { 10, SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 11, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH }, { 12, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 13, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 14, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 15, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_OUTGOING_FILTER }, { 16, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 17, SMART_INCOMING_FILTER | SMART_INCOMING_ONE_PROTO | SMART_INCOMING_ONE_NEIGH | SMART_OUTGOING_FILTER }, { 18, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_NEIGH }, { 19, SMART_INCOMING_FILTER | SMART_OUTGOING_FILTER | SMART_OUTGOING_ONE_PROTO }, { -1, 0 } }; #ifndef HOST_OS_OSX /** * Tell if we have been started by upstart. */ static int lldpd_started_by_upstart() { #ifdef HOST_OS_LINUX const char *upstartjob = getenv("UPSTART_JOB"); if (!(upstartjob && !strcmp(upstartjob, "lldpd"))) return 0; log_debug("main", "running with upstart, don't fork but stop"); raise(SIGSTOP); unsetenv("UPSTART_JOB"); return 1; #else return 0; #endif } /** * Tell if we have been started by systemd. */ static int lldpd_started_by_systemd() { #ifdef HOST_OS_LINUX int fd = -1; const char *notifysocket = getenv("NOTIFY_SOCKET"); if (!notifysocket || !strchr("@/", notifysocket[0]) || strlen(notifysocket) < 2) return 0; log_debug("main", "running with systemd, don't fork but signal ready"); if ((fd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) { log_warn("main", "unable to open systemd notification socket %s", notifysocket); return 0; } struct sockaddr_un su = { .sun_family = AF_UNIX }; strlcpy(su.sun_path, notifysocket, sizeof(su.sun_path)); if (notifysocket[0] == '@') su.sun_path[0] = 0; struct iovec iov = { .iov_base = "READY=1", .iov_len = strlen("READY=1") }; struct msghdr hdr = { .msg_name = &su, .msg_namelen = offsetof(struct sockaddr_un, sun_path) + strlen(notifysocket), .msg_iov = &iov, .msg_iovlen = 1 }; unsetenv("NOTIFY_SOCKET"); if (sendmsg(fd, &hdr, MSG_NOSIGNAL) < 0) { log_warn("main", "unable to send notification to systemd"); close(fd); return 0; } close(fd); return 1; #else return 0; #endif } #endif int lldpd_main(int argc, char *argv[], char *envp[]) { struct lldpd *cfg; struct lldpd_chassis *lchassis; int ch, debug = 0; #ifdef USE_SNMP int snmp = 0; const char *agentx = NULL; /* AgentX socket */ #endif const char *ctlname = NULL; char *mgmtp = NULL; char *cidp = NULL; char *interfaces = NULL; /* We do not want more options here. Please add them in lldpcli instead * unless there is a very good reason. Most command-line options will * get deprecated at some point. */ char *popt, opts[] = "H:vhkrdD:xX:m:u:4:6:I:C:p:M:P:S:iL:@ "; int i, found, advertise_version = 1; #ifdef ENABLE_LLDPMED int lldpmed = 0, noinventory = 0; int enable_fast_start = 1; #endif char *descr_override = NULL; char *platform_override = NULL; char *lsb_release = NULL; const char *lldpcli = LLDPCLI_PATH; int smart = 15; int receiveonly = 0; int ctl; #ifdef ENABLE_PRIVSEP /* Non privileged user */ struct passwd *user; struct group *group; uid_t uid; gid_t gid; #endif saved_argv = argv; #if HAVE_SETPROCTITLE_INIT setproctitle_init(argc, argv, envp); #endif /* * Get and parse command line options */ if ((popt = strchr(opts, '@')) != NULL) { for (i=0; protos[i].mode != 0 && *popt != '\0'; i++) *(popt++) = protos[i].arg; *popt = '\0'; } while ((ch = getopt(argc, argv, opts)) != -1) { switch (ch) { case 'h': usage(); break; case 'v': fprintf(stdout, "%s\n", PACKAGE_VERSION); exit(0); break; case 'd': debug++; break; case 'D': log_accept(optarg); break; case 'r': receiveonly = 1; break; case 'm': if (mgmtp) { fprintf(stderr, "-m can only be used once\n"); usage(); } mgmtp = strdup(optarg); break; case 'u': if (ctlname) { fprintf(stderr, "-u can only be used once\n"); usage(); } ctlname = optarg; break; case 'I': if (interfaces) { fprintf(stderr, "-I can only be used once\n"); usage(); } interfaces = strdup(optarg); break; case 'C': if (cidp) { fprintf(stderr, "-C can only be used once\n"); usage(); } cidp = strdup(optarg); break; case 'L': if (strlen(optarg)) lldpcli = optarg; else lldpcli = NULL; break; case 'k': advertise_version = 0; break; #ifdef ENABLE_LLDPMED case 'M': lldpmed = atoi(optarg); if ((lldpmed < 1) || (lldpmed > 4)) { fprintf(stderr, "-M requires an argument between 1 and 4\n"); usage(); } break; case 'i': noinventory = 1; break; #else case 'M': case 'i': fprintf(stderr, "LLDP-MED support is not built-in\n"); usage(); break; #endif #ifdef USE_SNMP case 'x': snmp = 1; break; case 'X': if (agentx) { fprintf(stderr, "-X can only be used once\n"); usage(); } snmp = 1; agentx = optarg; break; #else case 'x': case 'X': fprintf(stderr, "SNMP support is not built-in\n"); usage(); #endif break; case 'S': if (descr_override) { fprintf(stderr, "-S can only be used once\n"); usage(); } descr_override = strdup(optarg); break; case 'P': if (platform_override) { fprintf(stderr, "-P can only be used once\n"); usage(); } platform_override = strdup(optarg); break; case 'H': smart = atoi(optarg); break; default: found = 0; for (i=0; protos[i].mode != 0; i++) { if (ch == protos[i].arg) { found = 1; protos[i].enabled++; } } if (!found) usage(); } } if (ctlname == NULL) ctlname = LLDPD_CTL_SOCKET; /* Set correct smart mode */ for (i=0; (filters[i].a != -1) && (filters[i].a != smart); i++); if (filters[i].a == -1) { fprintf(stderr, "Incorrect mode for -H\n"); usage(); } smart = filters[i].b; log_init(debug, __progname); tzset(); /* Get timezone info before chroot */ log_debug("main", "lldpd " PACKAGE_VERSION " starting..."); /* Grab uid and gid to use for priv sep */ #ifdef ENABLE_PRIVSEP if ((user = getpwnam(PRIVSEP_USER)) == NULL) fatal("main", "no " PRIVSEP_USER " user for privilege separation"); uid = user->pw_uid; if ((group = getgrnam(PRIVSEP_GROUP)) == NULL) fatal("main", "no " PRIVSEP_GROUP " group for privilege separation"); gid = group->gr_gid; #endif /* Create and setup socket */ int retry = 1; log_debug("main", "creating control socket"); while ((ctl = ctl_create(ctlname)) == -1) { if (retry-- && errno == EADDRINUSE) { /* Check if a daemon is really listening */ int tfd; log_info("main", "unable to create control socket because it already exists"); log_info("main", "check if another instance is running"); if ((tfd = ctl_connect(ctlname)) != -1) { /* Another instance is running */ close(tfd); log_warnx("main", "another instance is running, please stop it"); fatalx("main", "giving up"); } else if (errno == ECONNREFUSED) { /* Nobody is listening */ log_info("main", "old control socket is present, clean it"); ctl_cleanup(ctlname); continue; } log_warn("main", "cannot determine if another daemon is already running"); fatalx("main", "giving up"); } log_warn("main", "unable to create control socket"); fatalx("main", "giving up"); } #ifdef ENABLE_PRIVSEP if (chown(ctlname, uid, gid) == -1) log_warn("main", "unable to chown control socket"); if (chmod(ctlname, S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IWGRP | S_IXGRP) == -1) log_warn("main", "unable to chmod control socket"); #endif /* Disable SIGPIPE */ signal(SIGPIPE, SIG_IGN); /* Disable SIGHUP, until handlers are installed */ signal(SIGHUP, SIG_IGN); /* Configuration with lldpcli */ if (lldpcli) { log_debug("main", "invoking lldpcli for configuration"); if (lldpd_configure(debug, lldpcli, ctlname) == -1) fatal("main", "unable to spawn lldpcli"); } /* Daemonization, unless started by upstart, systemd or launchd or debug */ #ifndef HOST_OS_OSX if (!lldpd_started_by_upstart() && !lldpd_started_by_systemd() && !debug) { int pid; char *spid; log_debug("main", "daemonize"); if (daemon(0, 0) != 0) fatal("main", "failed to detach daemon"); if ((pid = open(LLDPD_PID_FILE, O_TRUNC | O_CREAT | O_WRONLY, 0666)) == -1) fatal("main", "unable to open pid file " LLDPD_PID_FILE); if (asprintf(&spid, "%d\n", getpid()) == -1) fatal("main", "unable to create pid file " LLDPD_PID_FILE); if (write(pid, spid, strlen(spid)) == -1) fatal("main", "unable to write pid file " LLDPD_PID_FILE); free(spid); close(pid); } #endif /* Try to read system information from /etc/os-release if possible. Fall back to lsb_release for compatibility. */ log_debug("main", "get OS/LSB release information"); lsb_release = lldpd_get_os_release(); if (!lsb_release) { lsb_release = lldpd_get_lsb_release(); } log_debug("main", "initialize privilege separation"); #ifdef ENABLE_PRIVSEP priv_init(PRIVSEP_CHROOT, ctl, uid, gid); #else priv_init(PRIVSEP_CHROOT, ctl, 0, 0); #endif /* Initialization of global configuration */ if ((cfg = (struct lldpd *) calloc(1, sizeof(struct lldpd))) == NULL) fatal("main", NULL); cfg->g_ctlname = ctlname; cfg->g_ctl = ctl; cfg->g_config.c_mgmt_pattern = mgmtp; cfg->g_config.c_cid_pattern = cidp; cfg->g_config.c_iface_pattern = interfaces; cfg->g_config.c_smart = smart; if (lldpcli) cfg->g_config.c_paused = 1; cfg->g_config.c_receiveonly = receiveonly; cfg->g_config.c_tx_interval = LLDPD_TX_INTERVAL; cfg->g_config.c_tx_hold = LLDPD_TX_HOLD; cfg->g_config.c_max_neighbors = LLDPD_MAX_NEIGHBORS; #ifdef ENABLE_LLDPMED cfg->g_config.c_enable_fast_start = enable_fast_start; cfg->g_config.c_tx_fast_init = LLDPD_FAST_INIT; cfg->g_config.c_tx_fast_interval = LLDPD_FAST_TX_INTERVAL; #endif #ifdef USE_SNMP cfg->g_snmp = snmp; cfg->g_snmp_agentx = agentx; #endif /* USE_SNMP */ cfg->g_config.c_bond_slave_src_mac_type = \ LLDP_BOND_SLAVE_SRC_MAC_TYPE_LOCALLY_ADMINISTERED; /* Get ioctl socket */ log_debug("main", "get an ioctl socket"); if ((cfg->g_sock = socket(AF_INET, SOCK_DGRAM, 0)) == -1) fatal("main", "failed to get ioctl socket"); /* Description */ if (!(cfg->g_config.c_advertise_version = advertise_version) && lsb_release && lsb_release[strlen(lsb_release) - 1] == '\n') lsb_release[strlen(lsb_release) - 1] = '\0'; cfg->g_lsb_release = lsb_release; if (descr_override) cfg->g_config.c_description = descr_override; if (platform_override) cfg->g_config.c_platform = platform_override; /* Set system capabilities */ log_debug("main", "set system capabilities"); if ((lchassis = (struct lldpd_chassis*) calloc(1, sizeof(struct lldpd_chassis))) == NULL) fatal("localchassis", NULL); cfg->g_config.c_cap_advertise = 1; lchassis->c_cap_available = LLDP_CAP_BRIDGE | LLDP_CAP_WLAN | LLDP_CAP_ROUTER | LLDP_CAP_STATION; cfg->g_config.c_mgmt_advertise = 1; TAILQ_INIT(&lchassis->c_mgmt); #ifdef ENABLE_LLDPMED if (lldpmed > 0) { if (lldpmed == LLDP_MED_CLASS_III) lchassis->c_cap_available |= LLDP_CAP_TELEPHONE; lchassis->c_med_type = lldpmed; lchassis->c_med_cap_available = LLDP_MED_CAP_CAP | LLDP_MED_CAP_IV | LLDP_MED_CAP_LOCATION | LLDP_MED_CAP_POLICY | LLDP_MED_CAP_MDI_PSE | LLDP_MED_CAP_MDI_PD; cfg->g_config.c_noinventory = noinventory; } else cfg->g_config.c_noinventory = 1; #endif /* Set TTL */ lchassis->c_ttl = cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold; log_debug("main", "initialize protocols"); cfg->g_protocols = protos; for (i=0; protos[i].mode != 0; i++) { /* With -ll, disable LLDP */ if (protos[i].mode == LLDPD_MODE_LLDP) protos[i].enabled %= 3; /* With -ccc force CDPV2, enable CDPV1 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled == 3) { protos[i].enabled = 1; } /* With -cc force CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 2) { protos[i].enabled = 1; } /* With -cccc disable CDPV1, enable CDPV2 */ if (protos[i].mode == LLDPD_MODE_CDPV1 && protos[i].enabled >= 4) { protos[i].enabled = 0; } /* With -cccc disable CDPV1, enable CDPV2; -ccccc will force CDPv2 */ if (protos[i].mode == LLDPD_MODE_CDPV2 && protos[i].enabled == 4) { protos[i].enabled = 1; } if (protos[i].enabled > 1) log_info("main", "protocol %s enabled and forced", protos[i].name); else if (protos[i].enabled) log_info("main", "protocol %s enabled", protos[i].name); else log_info("main", "protocol %s disabled", protos[i].name); } TAILQ_INIT(&cfg->g_hardware); TAILQ_INIT(&cfg->g_chassis); TAILQ_INSERT_TAIL(&cfg->g_chassis, lchassis, c_entries); lchassis->c_refcount++; /* We should always keep a reference to local chassis */ /* Main loop */ log_debug("main", "start main loop"); levent_loop(cfg); lldpd_exit(cfg); free(cfg); return (0); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1770_0
crossvul-cpp_data_good_313_0
/* * FLV muxer * Copyright (c) 2003 The FFmpeg Project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/intreadwrite.h" #include "libavutil/dict.h" #include "libavutil/intfloat.h" #include "libavutil/avassert.h" #include "libavutil/mathematics.h" #include "avio_internal.h" #include "avio.h" #include "avc.h" #include "avformat.h" #include "flv.h" #include "internal.h" #include "metadata.h" #include "libavutil/opt.h" #include "libavcodec/put_bits.h" #include "libavcodec/aacenctab.h" static const AVCodecTag flv_video_codec_ids[] = { { AV_CODEC_ID_FLV1, FLV_CODECID_H263 }, { AV_CODEC_ID_H263, FLV_CODECID_REALH263 }, { AV_CODEC_ID_MPEG4, FLV_CODECID_MPEG4 }, { AV_CODEC_ID_FLASHSV, FLV_CODECID_SCREEN }, { AV_CODEC_ID_FLASHSV2, FLV_CODECID_SCREEN2 }, { AV_CODEC_ID_VP6F, FLV_CODECID_VP6 }, { AV_CODEC_ID_VP6, FLV_CODECID_VP6 }, { AV_CODEC_ID_VP6A, FLV_CODECID_VP6A }, { AV_CODEC_ID_H264, FLV_CODECID_H264 }, { AV_CODEC_ID_NONE, 0 } }; static const AVCodecTag flv_audio_codec_ids[] = { { AV_CODEC_ID_MP3, FLV_CODECID_MP3 >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_U8, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_S16BE, FLV_CODECID_PCM >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_S16LE, FLV_CODECID_PCM_LE >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_ADPCM_SWF, FLV_CODECID_ADPCM >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_AAC, FLV_CODECID_AAC >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_NELLYMOSER, FLV_CODECID_NELLYMOSER >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_MULAW, FLV_CODECID_PCM_MULAW >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_PCM_ALAW, FLV_CODECID_PCM_ALAW >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_SPEEX, FLV_CODECID_SPEEX >> FLV_AUDIO_CODECID_OFFSET }, { AV_CODEC_ID_NONE, 0 } }; typedef enum { FLV_AAC_SEQ_HEADER_DETECT = (1 << 0), FLV_NO_SEQUENCE_END = (1 << 1), FLV_ADD_KEYFRAME_INDEX = (1 << 2), FLV_NO_METADATA = (1 << 3), FLV_NO_DURATION_FILESIZE = (1 << 4), } FLVFlags; typedef struct FLVFileposition { int64_t keyframe_position; double keyframe_timestamp; struct FLVFileposition *next; } FLVFileposition; typedef struct FLVContext { AVClass *av_class; int reserved; int64_t duration_offset; int64_t filesize_offset; int64_t duration; int64_t delay; ///< first dts delay (needed for AVC & Speex) int64_t datastart_offset; int64_t datasize_offset; int64_t datasize; int64_t videosize_offset; int64_t videosize; int64_t audiosize_offset; int64_t audiosize; int64_t metadata_size_pos; int64_t metadata_totalsize_pos; int64_t metadata_totalsize; int64_t keyframe_index_size; int64_t lasttimestamp_offset; double lasttimestamp; int64_t lastkeyframetimestamp_offset; double lastkeyframetimestamp; int64_t lastkeyframelocation_offset; int64_t lastkeyframelocation; int acurframeindex; int64_t keyframes_info_offset; int64_t filepositions_count; FLVFileposition *filepositions; FLVFileposition *head_filepositions; AVCodecParameters *audio_par; AVCodecParameters *video_par; double framerate; AVCodecParameters *data_par; int flags; } FLVContext; typedef struct FLVStreamContext { int64_t last_ts; ///< last timestamp for each stream } FLVStreamContext; static int get_audio_flags(AVFormatContext *s, AVCodecParameters *par) { int flags = (par->bits_per_coded_sample == 16) ? FLV_SAMPLESSIZE_16BIT : FLV_SAMPLESSIZE_8BIT; if (par->codec_id == AV_CODEC_ID_AAC) // specs force these parameters return FLV_CODECID_AAC | FLV_SAMPLERATE_44100HZ | FLV_SAMPLESSIZE_16BIT | FLV_STEREO; else if (par->codec_id == AV_CODEC_ID_SPEEX) { if (par->sample_rate != 16000) { av_log(s, AV_LOG_ERROR, "FLV only supports wideband (16kHz) Speex audio\n"); return AVERROR(EINVAL); } if (par->channels != 1) { av_log(s, AV_LOG_ERROR, "FLV only supports mono Speex audio\n"); return AVERROR(EINVAL); } return FLV_CODECID_SPEEX | FLV_SAMPLERATE_11025HZ | FLV_SAMPLESSIZE_16BIT; } else { switch (par->sample_rate) { case 48000: // 48khz mp3 is stored with 44k1 samplerate identifer if (par->codec_id == AV_CODEC_ID_MP3) { flags |= FLV_SAMPLERATE_44100HZ; break; } else { goto error; } case 44100: flags |= FLV_SAMPLERATE_44100HZ; break; case 22050: flags |= FLV_SAMPLERATE_22050HZ; break; case 11025: flags |= FLV_SAMPLERATE_11025HZ; break; case 16000: // nellymoser only case 8000: // nellymoser only case 5512: // not MP3 if (par->codec_id != AV_CODEC_ID_MP3) { flags |= FLV_SAMPLERATE_SPECIAL; break; } default: error: av_log(s, AV_LOG_ERROR, "FLV does not support sample rate %d, " "choose from (44100, 22050, 11025)\n", par->sample_rate); return AVERROR(EINVAL); } } if (par->channels > 1) flags |= FLV_STEREO; switch (par->codec_id) { case AV_CODEC_ID_MP3: flags |= FLV_CODECID_MP3 | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_U8: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_8BIT; break; case AV_CODEC_ID_PCM_S16BE: flags |= FLV_CODECID_PCM | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_S16LE: flags |= FLV_CODECID_PCM_LE | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_ADPCM_SWF: flags |= FLV_CODECID_ADPCM | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_NELLYMOSER: if (par->sample_rate == 8000) flags |= FLV_CODECID_NELLYMOSER_8KHZ_MONO | FLV_SAMPLESSIZE_16BIT; else if (par->sample_rate == 16000) flags |= FLV_CODECID_NELLYMOSER_16KHZ_MONO | FLV_SAMPLESSIZE_16BIT; else flags |= FLV_CODECID_NELLYMOSER | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_MULAW: flags = FLV_CODECID_PCM_MULAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT; break; case AV_CODEC_ID_PCM_ALAW: flags = FLV_CODECID_PCM_ALAW | FLV_SAMPLERATE_SPECIAL | FLV_SAMPLESSIZE_16BIT; break; case 0: flags |= par->codec_tag << 4; break; default: av_log(s, AV_LOG_ERROR, "Audio codec '%s' not compatible with FLV\n", avcodec_get_name(par->codec_id)); return AVERROR(EINVAL); } return flags; } static void put_amf_string(AVIOContext *pb, const char *str) { size_t len = strlen(str); avio_wb16(pb, len); avio_write(pb, str, len); } // FLV timestamps are 32 bits signed, RTMP timestamps should be 32-bit unsigned static void put_timestamp(AVIOContext *pb, int64_t ts) { avio_wb24(pb, ts & 0xFFFFFF); avio_w8(pb, (ts >> 24) & 0x7F); } static void put_avc_eos_tag(AVIOContext *pb, unsigned ts) { avio_w8(pb, FLV_TAG_TYPE_VIDEO); avio_wb24(pb, 5); /* Tag Data Size */ put_timestamp(pb, ts); avio_wb24(pb, 0); /* StreamId = 0 */ avio_w8(pb, 23); /* ub[4] FrameType = 1, ub[4] CodecId = 7 */ avio_w8(pb, 2); /* AVC end of sequence */ avio_wb24(pb, 0); /* Always 0 for AVC EOS. */ avio_wb32(pb, 16); /* Size of FLV tag */ } static void put_amf_double(AVIOContext *pb, double d) { avio_w8(pb, AMF_DATA_TYPE_NUMBER); avio_wb64(pb, av_double2int(d)); } static void put_amf_byte(AVIOContext *pb, unsigned char abyte) { avio_w8(pb, abyte); } static void put_amf_dword_array(AVIOContext *pb, uint32_t dw) { avio_w8(pb, AMF_DATA_TYPE_ARRAY); avio_wb32(pb, dw); } static void put_amf_bool(AVIOContext *pb, int b) { avio_w8(pb, AMF_DATA_TYPE_BOOL); avio_w8(pb, !!b); } static void write_metadata(AVFormatContext *s, unsigned int ts) { AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; int write_duration_filesize = !(flv->flags & FLV_NO_DURATION_FILESIZE); int metadata_count = 0; int64_t metadata_count_pos; AVDictionaryEntry *tag = NULL; /* write meta_tag */ avio_w8(pb, FLV_TAG_TYPE_META); // tag type META flv->metadata_size_pos = avio_tell(pb); avio_wb24(pb, 0); // size of data part (sum of all parts below) avio_wb24(pb, ts); // timestamp avio_wb32(pb, 0); // reserved /* now data of data_size size */ /* first event name as a string */ avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onMetaData"); // 12 bytes /* mixed array (hash) with size and string/type/data tuples */ avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY); metadata_count_pos = avio_tell(pb); metadata_count = 4 * !!flv->video_par + 5 * !!flv->audio_par + 1 * !!flv->data_par; if (write_duration_filesize) { metadata_count += 2; // +2 for duration and file size } avio_wb32(pb, metadata_count); if (write_duration_filesize) { put_amf_string(pb, "duration"); flv->duration_offset = avio_tell(pb); // fill in the guessed duration, it'll be corrected later if incorrect put_amf_double(pb, s->duration / AV_TIME_BASE); } if (flv->video_par) { put_amf_string(pb, "width"); put_amf_double(pb, flv->video_par->width); put_amf_string(pb, "height"); put_amf_double(pb, flv->video_par->height); put_amf_string(pb, "videodatarate"); put_amf_double(pb, flv->video_par->bit_rate / 1024.0); if (flv->framerate != 0.0) { put_amf_string(pb, "framerate"); put_amf_double(pb, flv->framerate); metadata_count++; } put_amf_string(pb, "videocodecid"); put_amf_double(pb, flv->video_par->codec_tag); } if (flv->audio_par) { put_amf_string(pb, "audiodatarate"); put_amf_double(pb, flv->audio_par->bit_rate / 1024.0); put_amf_string(pb, "audiosamplerate"); put_amf_double(pb, flv->audio_par->sample_rate); put_amf_string(pb, "audiosamplesize"); put_amf_double(pb, flv->audio_par->codec_id == AV_CODEC_ID_PCM_U8 ? 8 : 16); put_amf_string(pb, "stereo"); put_amf_bool(pb, flv->audio_par->channels == 2); put_amf_string(pb, "audiocodecid"); put_amf_double(pb, flv->audio_par->codec_tag); } if (flv->data_par) { put_amf_string(pb, "datastream"); put_amf_double(pb, 0.0); } ff_standardize_creation_time(s); while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) { if( !strcmp(tag->key, "width") ||!strcmp(tag->key, "height") ||!strcmp(tag->key, "videodatarate") ||!strcmp(tag->key, "framerate") ||!strcmp(tag->key, "videocodecid") ||!strcmp(tag->key, "audiodatarate") ||!strcmp(tag->key, "audiosamplerate") ||!strcmp(tag->key, "audiosamplesize") ||!strcmp(tag->key, "stereo") ||!strcmp(tag->key, "audiocodecid") ||!strcmp(tag->key, "duration") ||!strcmp(tag->key, "onMetaData") ||!strcmp(tag->key, "datasize") ||!strcmp(tag->key, "lasttimestamp") ||!strcmp(tag->key, "totalframes") ||!strcmp(tag->key, "hasAudio") ||!strcmp(tag->key, "hasVideo") ||!strcmp(tag->key, "hasCuePoints") ||!strcmp(tag->key, "hasMetadata") ||!strcmp(tag->key, "hasKeyframes") ){ av_log(s, AV_LOG_DEBUG, "Ignoring metadata for %s\n", tag->key); continue; } put_amf_string(pb, tag->key); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, tag->value); metadata_count++; } if (write_duration_filesize) { put_amf_string(pb, "filesize"); flv->filesize_offset = avio_tell(pb); put_amf_double(pb, 0); // delayed write } if (flv->flags & FLV_ADD_KEYFRAME_INDEX) { flv->acurframeindex = 0; flv->keyframe_index_size = 0; put_amf_string(pb, "hasVideo"); put_amf_bool(pb, !!flv->video_par); metadata_count++; put_amf_string(pb, "hasKeyframes"); put_amf_bool(pb, 1); metadata_count++; put_amf_string(pb, "hasAudio"); put_amf_bool(pb, !!flv->audio_par); metadata_count++; put_amf_string(pb, "hasMetadata"); put_amf_bool(pb, 1); metadata_count++; put_amf_string(pb, "canSeekToEnd"); put_amf_bool(pb, 1); metadata_count++; put_amf_string(pb, "datasize"); flv->datasize_offset = avio_tell(pb); flv->datasize = 0; put_amf_double(pb, flv->datasize); metadata_count++; put_amf_string(pb, "videosize"); flv->videosize_offset = avio_tell(pb); flv->videosize = 0; put_amf_double(pb, flv->videosize); metadata_count++; put_amf_string(pb, "audiosize"); flv->audiosize_offset = avio_tell(pb); flv->audiosize = 0; put_amf_double(pb, flv->audiosize); metadata_count++; put_amf_string(pb, "lasttimestamp"); flv->lasttimestamp_offset = avio_tell(pb); flv->lasttimestamp = 0; put_amf_double(pb, 0); metadata_count++; put_amf_string(pb, "lastkeyframetimestamp"); flv->lastkeyframetimestamp_offset = avio_tell(pb); flv->lastkeyframetimestamp = 0; put_amf_double(pb, 0); metadata_count++; put_amf_string(pb, "lastkeyframelocation"); flv->lastkeyframelocation_offset = avio_tell(pb); flv->lastkeyframelocation = 0; put_amf_double(pb, 0); metadata_count++; put_amf_string(pb, "keyframes"); put_amf_byte(pb, AMF_DATA_TYPE_OBJECT); metadata_count++; flv->keyframes_info_offset = avio_tell(pb); } put_amf_string(pb, ""); avio_w8(pb, AMF_END_OF_OBJECT); /* write total size of tag */ flv->metadata_totalsize = avio_tell(pb) - flv->metadata_size_pos - 10; avio_seek(pb, metadata_count_pos, SEEK_SET); avio_wb32(pb, metadata_count); avio_seek(pb, flv->metadata_size_pos, SEEK_SET); avio_wb24(pb, flv->metadata_totalsize); avio_skip(pb, flv->metadata_totalsize + 10 - 3); flv->metadata_totalsize_pos = avio_tell(pb); avio_wb32(pb, flv->metadata_totalsize + 11); } static int unsupported_codec(AVFormatContext *s, const char* type, int codec_id) { const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id); av_log(s, AV_LOG_ERROR, "%s codec %s not compatible with flv\n", type, desc ? desc->name : "unknown"); return AVERROR(ENOSYS); } static void flv_write_codec_header(AVFormatContext* s, AVCodecParameters* par, int64_t ts) { int64_t data_size; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; if (par->codec_id == AV_CODEC_ID_AAC || par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { int64_t pos; avio_w8(pb, par->codec_type == AVMEDIA_TYPE_VIDEO ? FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO); avio_wb24(pb, 0); // size patched later put_timestamp(pb, ts); avio_wb24(pb, 0); // streamid pos = avio_tell(pb); if (par->codec_id == AV_CODEC_ID_AAC) { avio_w8(pb, get_audio_flags(s, par)); avio_w8(pb, 0); // AAC sequence header if (!par->extradata_size && (flv->flags & FLV_AAC_SEQ_HEADER_DETECT)) { PutBitContext pbc; int samplerate_index; int channels = flv->audio_par->channels - (flv->audio_par->channels == 8 ? 1 : 0); uint8_t data[2]; for (samplerate_index = 0; samplerate_index < 16; samplerate_index++) if (flv->audio_par->sample_rate == mpeg4audio_sample_rates[samplerate_index]) break; init_put_bits(&pbc, data, sizeof(data)); put_bits(&pbc, 5, flv->audio_par->profile + 1); //profile put_bits(&pbc, 4, samplerate_index); //sample rate index put_bits(&pbc, 4, channels); put_bits(&pbc, 1, 0); //frame length - 1024 samples put_bits(&pbc, 1, 0); //does not depend on core coder put_bits(&pbc, 1, 0); //is not extension flush_put_bits(&pbc); avio_w8(pb, data[0]); avio_w8(pb, data[1]); av_log(s, AV_LOG_WARNING, "AAC sequence header: %02x %02x.\n", data[0], data[1]); } avio_write(pb, par->extradata, par->extradata_size); } else { avio_w8(pb, par->codec_tag | FLV_FRAME_KEY); // flags avio_w8(pb, 0); // AVC sequence header avio_wb24(pb, 0); // composition time ff_isom_write_avcc(pb, par->extradata, par->extradata_size); } data_size = avio_tell(pb) - pos; avio_seek(pb, -data_size - 10, SEEK_CUR); avio_wb24(pb, data_size); avio_skip(pb, data_size + 10 - 3); avio_wb32(pb, data_size + 11); // previous tag size } } static int flv_append_keyframe_info(AVFormatContext *s, FLVContext *flv, double ts, int64_t pos) { FLVFileposition *position = av_malloc(sizeof(FLVFileposition)); if (!position) { av_log(s, AV_LOG_WARNING, "no mem for add keyframe index!\n"); return AVERROR(ENOMEM); } position->keyframe_timestamp = ts; position->keyframe_position = pos; if (!flv->filepositions_count) { flv->filepositions = position; flv->head_filepositions = flv->filepositions; position->next = NULL; } else { flv->filepositions->next = position; position->next = NULL; flv->filepositions = flv->filepositions->next; } flv->filepositions_count++; return 0; } static int shift_data(AVFormatContext *s) { int ret = 0; int n = 0; int64_t metadata_size = 0; FLVContext *flv = s->priv_data; int64_t pos, pos_end = avio_tell(s->pb); uint8_t *buf, *read_buf[2]; int read_buf_id = 0; int read_size[2]; AVIOContext *read_pb; metadata_size = flv->filepositions_count * 9 * 2 + 10; /* filepositions and times value */ metadata_size += 2 + 13; /* filepositions String */ metadata_size += 2 + 5; /* times String */ metadata_size += 3; /* Object end */ flv->keyframe_index_size = metadata_size; if (metadata_size < 0) return metadata_size; buf = av_malloc_array(metadata_size, 2); if (!buf) { return AVERROR(ENOMEM); } read_buf[0] = buf; read_buf[1] = buf + metadata_size; avio_seek(s->pb, flv->metadata_size_pos, SEEK_SET); avio_wb24(s->pb, flv->metadata_totalsize + metadata_size); avio_seek(s->pb, flv->metadata_totalsize_pos, SEEK_SET); avio_wb32(s->pb, flv->metadata_totalsize + 11 + metadata_size); avio_seek(s->pb, pos_end, SEEK_SET); /* Shift the data: the AVIO context of the output can only be used for * writing, so we re-open the same output, but for reading. It also avoids * a read/seek/write/seek back and forth. */ avio_flush(s->pb); ret = s->io_open(s, &read_pb, s->url, AVIO_FLAG_READ, NULL); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Unable to re-open %s output file for " "the second pass (add_keyframe_index)\n", s->url); goto end; } /* mark the end of the shift to up to the last data we wrote, and get ready * for writing */ pos_end = avio_tell(s->pb); avio_seek(s->pb, flv->keyframes_info_offset + metadata_size, SEEK_SET); /* start reading at where the keyframe index information will be placed */ avio_seek(read_pb, flv->keyframes_info_offset, SEEK_SET); pos = avio_tell(read_pb); #define READ_BLOCK do { \ read_size[read_buf_id] = avio_read(read_pb, read_buf[read_buf_id], metadata_size); \ read_buf_id ^= 1; \ } while (0) /* shift data by chunk of at most keyframe *filepositions* and *times* size */ READ_BLOCK; do { READ_BLOCK; n = read_size[read_buf_id]; if (n < 0) break; avio_write(s->pb, read_buf[read_buf_id], n); pos += n; } while (pos <= pos_end); ff_format_io_close(s, &read_pb); end: av_free(buf); return ret; } static int flv_write_header(AVFormatContext *s) { int i; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; for (i = 0; i < s->nb_streams; i++) { AVCodecParameters *par = s->streams[i]->codecpar; FLVStreamContext *sc; switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: if (s->streams[i]->avg_frame_rate.den && s->streams[i]->avg_frame_rate.num) { flv->framerate = av_q2d(s->streams[i]->avg_frame_rate); } if (flv->video_par) { av_log(s, AV_LOG_ERROR, "at most one video stream is supported in flv\n"); return AVERROR(EINVAL); } flv->video_par = par; if (!ff_codec_get_tag(flv_video_codec_ids, par->codec_id)) return unsupported_codec(s, "Video", par->codec_id); if (par->codec_id == AV_CODEC_ID_MPEG4 || par->codec_id == AV_CODEC_ID_H263) { int error = s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL; av_log(s, error ? AV_LOG_ERROR : AV_LOG_WARNING, "Codec %s is not supported in the official FLV specification,\n", avcodec_get_name(par->codec_id)); if (error) { av_log(s, AV_LOG_ERROR, "use vstrict=-1 / -strict -1 to use it anyway.\n"); return AVERROR(EINVAL); } } else if (par->codec_id == AV_CODEC_ID_VP6) { av_log(s, AV_LOG_WARNING, "Muxing VP6 in flv will produce flipped video on playback.\n"); } break; case AVMEDIA_TYPE_AUDIO: if (flv->audio_par) { av_log(s, AV_LOG_ERROR, "at most one audio stream is supported in flv\n"); return AVERROR(EINVAL); } flv->audio_par = par; if (get_audio_flags(s, par) < 0) return unsupported_codec(s, "Audio", par->codec_id); if (par->codec_id == AV_CODEC_ID_PCM_S16BE) av_log(s, AV_LOG_WARNING, "16-bit big-endian audio in flv is valid but most likely unplayable (hardware dependent); use s16le\n"); break; case AVMEDIA_TYPE_DATA: if (par->codec_id != AV_CODEC_ID_TEXT && par->codec_id != AV_CODEC_ID_NONE) return unsupported_codec(s, "Data", par->codec_id); flv->data_par = par; break; case AVMEDIA_TYPE_SUBTITLE: if (par->codec_id != AV_CODEC_ID_TEXT) { av_log(s, AV_LOG_ERROR, "Subtitle codec '%s' for stream %d is not compatible with FLV\n", avcodec_get_name(par->codec_id), i); return AVERROR_INVALIDDATA; } flv->data_par = par; break; default: av_log(s, AV_LOG_ERROR, "Codec type '%s' for stream %d is not compatible with FLV\n", av_get_media_type_string(par->codec_type), i); return AVERROR(EINVAL); } avpriv_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */ sc = av_mallocz(sizeof(FLVStreamContext)); if (!sc) return AVERROR(ENOMEM); s->streams[i]->priv_data = sc; sc->last_ts = -1; } flv->delay = AV_NOPTS_VALUE; avio_write(pb, "FLV", 3); avio_w8(pb, 1); avio_w8(pb, FLV_HEADER_FLAG_HASAUDIO * !!flv->audio_par + FLV_HEADER_FLAG_HASVIDEO * !!flv->video_par); avio_wb32(pb, 9); avio_wb32(pb, 0); for (i = 0; i < s->nb_streams; i++) if (s->streams[i]->codecpar->codec_tag == 5) { avio_w8(pb, 8); // message type avio_wb24(pb, 0); // include flags avio_wb24(pb, 0); // time stamp avio_wb32(pb, 0); // reserved avio_wb32(pb, 11); // size flv->reserved = 5; } if (flv->flags & FLV_NO_METADATA) { pb->seekable = 0; } else { write_metadata(s, 0); } for (i = 0; i < s->nb_streams; i++) { flv_write_codec_header(s, s->streams[i]->codecpar, 0); } flv->datastart_offset = avio_tell(pb); return 0; } static int flv_write_trailer(AVFormatContext *s) { int64_t file_size; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; int build_keyframes_idx = flv->flags & FLV_ADD_KEYFRAME_INDEX; int i, res; int64_t cur_pos = avio_tell(s->pb); if (build_keyframes_idx) { FLVFileposition *newflv_posinfo, *p; avio_seek(pb, flv->videosize_offset, SEEK_SET); put_amf_double(pb, flv->videosize); avio_seek(pb, flv->audiosize_offset, SEEK_SET); put_amf_double(pb, flv->audiosize); avio_seek(pb, flv->lasttimestamp_offset, SEEK_SET); put_amf_double(pb, flv->lasttimestamp); avio_seek(pb, flv->lastkeyframetimestamp_offset, SEEK_SET); put_amf_double(pb, flv->lastkeyframetimestamp); avio_seek(pb, flv->lastkeyframelocation_offset, SEEK_SET); put_amf_double(pb, flv->lastkeyframelocation + flv->keyframe_index_size); avio_seek(pb, cur_pos, SEEK_SET); res = shift_data(s); if (res < 0) { goto end; } avio_seek(pb, flv->keyframes_info_offset, SEEK_SET); put_amf_string(pb, "filepositions"); put_amf_dword_array(pb, flv->filepositions_count); for (newflv_posinfo = flv->head_filepositions; newflv_posinfo; newflv_posinfo = newflv_posinfo->next) { put_amf_double(pb, newflv_posinfo->keyframe_position + flv->keyframe_index_size); } put_amf_string(pb, "times"); put_amf_dword_array(pb, flv->filepositions_count); for (newflv_posinfo = flv->head_filepositions; newflv_posinfo; newflv_posinfo = newflv_posinfo->next) { put_amf_double(pb, newflv_posinfo->keyframe_timestamp); } newflv_posinfo = flv->head_filepositions; while (newflv_posinfo) { p = newflv_posinfo->next; if (p) { newflv_posinfo->next = p->next; av_free(p); p = NULL; } else { av_free(newflv_posinfo); newflv_posinfo = NULL; } } put_amf_string(pb, ""); avio_w8(pb, AMF_END_OF_OBJECT); avio_seek(pb, cur_pos + flv->keyframe_index_size, SEEK_SET); } end: if (flv->flags & FLV_NO_SEQUENCE_END) { av_log(s, AV_LOG_DEBUG, "FLV no sequence end mode open\n"); } else { /* Add EOS tag */ for (i = 0; i < s->nb_streams; i++) { AVCodecParameters *par = s->streams[i]->codecpar; FLVStreamContext *sc = s->streams[i]->priv_data; if (par->codec_type == AVMEDIA_TYPE_VIDEO && (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4)) put_avc_eos_tag(pb, sc->last_ts); } } file_size = avio_tell(pb); if (build_keyframes_idx) { flv->datasize = file_size - flv->datastart_offset; avio_seek(pb, flv->datasize_offset, SEEK_SET); put_amf_double(pb, flv->datasize); } if (!(flv->flags & FLV_NO_METADATA)) { if (!(flv->flags & FLV_NO_DURATION_FILESIZE)) { /* update information */ if (avio_seek(pb, flv->duration_offset, SEEK_SET) < 0) { av_log(s, AV_LOG_WARNING, "Failed to update header with correct duration.\n"); } else { put_amf_double(pb, flv->duration / (double)1000); } if (avio_seek(pb, flv->filesize_offset, SEEK_SET) < 0) { av_log(s, AV_LOG_WARNING, "Failed to update header with correct filesize.\n"); } else { put_amf_double(pb, file_size); } } } return 0; } static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; AVCodecParameters *par = s->streams[pkt->stream_index]->codecpar; FLVContext *flv = s->priv_data; FLVStreamContext *sc = s->streams[pkt->stream_index]->priv_data; unsigned ts; int size = pkt->size; uint8_t *data = NULL; int flags = -1, flags_size, ret; int64_t cur_offset = avio_tell(pb); if (par->codec_type == AVMEDIA_TYPE_AUDIO && !pkt->size) { av_log(s, AV_LOG_WARNING, "Empty audio Packet\n"); return AVERROR(EINVAL); } if (par->codec_id == AV_CODEC_ID_VP6F || par->codec_id == AV_CODEC_ID_VP6A || par->codec_id == AV_CODEC_ID_VP6 || par->codec_id == AV_CODEC_ID_AAC) flags_size = 2; else if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) flags_size = 5; else flags_size = 1; if (par->codec_id == AV_CODEC_ID_AAC || par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { int side_size = 0; uint8_t *side = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); if (side && side_size > 0 && (side_size != par->extradata_size || memcmp(side, par->extradata, side_size))) { av_free(par->extradata); par->extradata = av_mallocz(side_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!par->extradata) { par->extradata_size = 0; return AVERROR(ENOMEM); } memcpy(par->extradata, side, side_size); par->extradata_size = side_size; flv_write_codec_header(s, par, pkt->dts); } } if (flv->delay == AV_NOPTS_VALUE) flv->delay = -pkt->dts; if (pkt->dts < -flv->delay) { av_log(s, AV_LOG_WARNING, "Packets are not in the proper order with respect to DTS\n"); return AVERROR(EINVAL); } ts = pkt->dts; if (s->event_flags & AVSTREAM_EVENT_FLAG_METADATA_UPDATED) { write_metadata(s, ts); s->event_flags &= ~AVSTREAM_EVENT_FLAG_METADATA_UPDATED; } avio_write_marker(pb, av_rescale(ts, AV_TIME_BASE, 1000), pkt->flags & AV_PKT_FLAG_KEY && (flv->video_par ? par->codec_type == AVMEDIA_TYPE_VIDEO : 1) ? AVIO_DATA_MARKER_SYNC_POINT : AVIO_DATA_MARKER_BOUNDARY_POINT); switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: avio_w8(pb, FLV_TAG_TYPE_VIDEO); flags = ff_codec_get_tag(flv_video_codec_ids, par->codec_id); flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER; break; case AVMEDIA_TYPE_AUDIO: flags = get_audio_flags(s, par); av_assert0(size); avio_w8(pb, FLV_TAG_TYPE_AUDIO); break; case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_DATA: avio_w8(pb, FLV_TAG_TYPE_META); break; default: return AVERROR(EINVAL); } if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { /* check if extradata looks like mp4 formatted */ if (par->extradata_size > 0 && *(uint8_t*)par->extradata != 1) if ((ret = ff_avc_parse_nal_units_buf(pkt->data, &data, &size)) < 0) return ret; } else if (par->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 && (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) { if (!s->streams[pkt->stream_index]->nb_frames) { av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: " "use the audio bitstream filter 'aac_adtstoasc' to fix it " "('-bsf:a aac_adtstoasc' option with ffmpeg)\n"); return AVERROR_INVALIDDATA; } av_log(s, AV_LOG_WARNING, "aac bitstream error\n"); } /* check Speex packet duration */ if (par->codec_id == AV_CODEC_ID_SPEEX && ts - sc->last_ts > 160) av_log(s, AV_LOG_WARNING, "Warning: Speex stream has more than " "8 frames per packet. Adobe Flash " "Player cannot handle this!\n"); if (sc->last_ts < ts) sc->last_ts = ts; if (size + flags_size >= 1<<24) { av_log(s, AV_LOG_ERROR, "Too large packet with size %u >= %u\n", size + flags_size, 1<<24); return AVERROR(EINVAL); } avio_wb24(pb, size + flags_size); put_timestamp(pb, ts); avio_wb24(pb, flv->reserved); if (par->codec_type == AVMEDIA_TYPE_DATA || par->codec_type == AVMEDIA_TYPE_SUBTITLE ) { int data_size; int64_t metadata_size_pos = avio_tell(pb); if (par->codec_id == AV_CODEC_ID_TEXT) { // legacy FFmpeg magic? avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "onTextData"); avio_w8(pb, AMF_DATA_TYPE_MIXEDARRAY); avio_wb32(pb, 2); put_amf_string(pb, "type"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, "Text"); put_amf_string(pb, "text"); avio_w8(pb, AMF_DATA_TYPE_STRING); put_amf_string(pb, pkt->data); put_amf_string(pb, ""); avio_w8(pb, AMF_END_OF_OBJECT); } else { // just pass the metadata through avio_write(pb, data ? data : pkt->data, size); } /* write total size of tag */ data_size = avio_tell(pb) - metadata_size_pos; avio_seek(pb, metadata_size_pos - 10, SEEK_SET); avio_wb24(pb, data_size); avio_seek(pb, data_size + 10 - 3, SEEK_CUR); avio_wb32(pb, data_size + 11); } else { av_assert1(flags>=0); avio_w8(pb,flags); if (par->codec_id == AV_CODEC_ID_VP6) avio_w8(pb,0); if (par->codec_id == AV_CODEC_ID_VP6F || par->codec_id == AV_CODEC_ID_VP6A) { if (par->extradata_size) avio_w8(pb, par->extradata[0]); else avio_w8(pb, ((FFALIGN(par->width, 16) - par->width) << 4) | (FFALIGN(par->height, 16) - par->height)); } else if (par->codec_id == AV_CODEC_ID_AAC) avio_w8(pb, 1); // AAC raw else if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_MPEG4) { avio_w8(pb, 1); // AVC NALU avio_wb24(pb, pkt->pts - pkt->dts); } avio_write(pb, data ? data : pkt->data, size); avio_wb32(pb, size + flags_size + 11); // previous tag size flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration); } if (flv->flags & FLV_ADD_KEYFRAME_INDEX) { switch (par->codec_type) { case AVMEDIA_TYPE_VIDEO: flv->videosize += (avio_tell(pb) - cur_offset); flv->lasttimestamp = flv->acurframeindex / flv->framerate; if (pkt->flags & AV_PKT_FLAG_KEY) { double ts = flv->acurframeindex / flv->framerate; int64_t pos = cur_offset; flv->lastkeyframetimestamp = flv->acurframeindex / flv->framerate; flv->lastkeyframelocation = pos; flv_append_keyframe_info(s, flv, ts, pos); } flv->acurframeindex++; break; case AVMEDIA_TYPE_AUDIO: flv->audiosize += (avio_tell(pb) - cur_offset); break; default: av_log(s, AV_LOG_WARNING, "par->codec_type is type = [%d]\n", par->codec_type); break; } } av_free(data); return pb->error; } static const AVOption options[] = { { "flvflags", "FLV muxer flags", offsetof(FLVContext, flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "aac_seq_header_detect", "Put AAC sequence header based on stream data", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_AAC_SEQ_HEADER_DETECT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "no_sequence_end", "disable sequence end for FLV", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_NO_SEQUENCE_END}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "no_metadata", "disable metadata for FLV", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_NO_METADATA}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "no_duration_filesize", "disable duration and filesize zero value metadata for FLV", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_NO_DURATION_FILESIZE}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { "add_keyframe_index", "Add keyframe index metadata", 0, AV_OPT_TYPE_CONST, {.i64 = FLV_ADD_KEYFRAME_INDEX}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "flvflags" }, { NULL }, }; static const AVClass flv_muxer_class = { .class_name = "flv muxer", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVOutputFormat ff_flv_muxer = { .name = "flv", .long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"), .mime_type = "video/x-flv", .extensions = "flv", .priv_data_size = sizeof(FLVContext), .audio_codec = CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_ADPCM_SWF, .video_codec = AV_CODEC_ID_FLV1, .write_header = flv_write_header, .write_packet = flv_write_packet, .write_trailer = flv_write_trailer, .codec_tag = (const AVCodecTag* const []) { flv_video_codec_ids, flv_audio_codec_ids, 0 }, .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS | AVFMT_TS_NONSTRICT, .priv_class = &flv_muxer_class, };
./CrossVul/dataset_final_sorted/CWE-617/c/good_313_0
crossvul-cpp_data_bad_2488_1
/* Copyright (c) 2001 Matej Pfajfar. * Copyright (c) 2001-2004, Roger Dingledine. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2016, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file connection_edge.c * \brief Handle edge streams. * * An edge_connection_t is a subtype of a connection_t, and represents two * critical concepts in Tor: a stream, and an edge connection. From the Tor * protocol's point of view, a stream is a bi-directional channel that is * multiplexed on a single circuit. Each stream on a circuit is identified * with a separate 16-bit stream ID, local to the (circuit,exit) pair. * Streams are created in response to client requests. * * An edge connection is one thing that can implement a stream: it is either a * TCP application socket that has arrived via (e.g.) a SOCKS request, or an * exit connection. * * Not every instance of edge_connection_t truly represents an edge connction, * however. (Sorry!) We also create edge_connection_t objects for streams that * we will not be handling with TCP. The types of these streams are: * <ul> * <li>DNS lookup streams, created on the client side in response to * a UDP DNS request received on a DNSPort, or a RESOLVE command * on a controller. * <li>DNS lookup streams, created on the exit side in response to * a RELAY_RESOLVE cell from a client. * <li>Tunneled directory streams, created on the directory cache side * in response to a RELAY_BEGINDIR cell. These streams attach directly * to a dir_connection_t object without ever using TCP. * </ul> * * This module handles general-purpose functionality having to do with * edge_connection_t. On the client side, it accepts various types of * application requests on SocksPorts, TransPorts, and NATDPorts, and * creates streams appropriately. * * This module is also responsible for implementing stream isolation: * ensuring that streams that should not be linkable to one another are * kept to different circuits. * * On the exit side, this module handles the various stream-creating * type of RELAY cells by launching appropriate outgoing connections, * DNS requests, or directory connection objects. * * And for all edge connections, this module is responsible for handling * incoming and outdoing data as it arrives or leaves in the relay.c * module. (Outgoing data will be packaged in * connection_edge_process_inbuf() as it calls * connection_edge_package_raw_inbuf(); incoming data from RELAY_DATA * cells is applied in connection_edge_process_relay_cell().) **/ #define CONNECTION_EDGE_PRIVATE #include "or.h" #include "backtrace.h" #include "addressmap.h" #include "buffers.h" #include "channel.h" #include "circpathbias.h" #include "circuitlist.h" #include "circuituse.h" #include "config.h" #include "connection.h" #include "connection_edge.h" #include "connection_or.h" #include "control.h" #include "dns.h" #include "dnsserv.h" #include "directory.h" #include "dirserv.h" #include "hibernate.h" #include "hs_common.h" #include "main.h" #include "nodelist.h" #include "policies.h" #include "reasons.h" #include "relay.h" #include "rendclient.h" #include "rendcommon.h" #include "rendservice.h" #include "rephist.h" #include "router.h" #include "routerlist.h" #include "routerset.h" #include "circuitbuild.h" #ifdef HAVE_LINUX_TYPES_H #include <linux/types.h> #endif #ifdef HAVE_LINUX_NETFILTER_IPV4_H #include <linux/netfilter_ipv4.h> #define TRANS_NETFILTER #define TRANS_NETFILTER_IPV4 #endif #ifdef HAVE_LINUX_IF_H #include <linux/if.h> #endif #ifdef HAVE_LINUX_NETFILTER_IPV6_IP6_TABLES_H #include <linux/netfilter_ipv6/ip6_tables.h> #if defined(IP6T_SO_ORIGINAL_DST) #define TRANS_NETFILTER #define TRANS_NETFILTER_IPV6 #endif #endif #if defined(HAVE_NET_IF_H) && defined(HAVE_NET_PFVAR_H) #include <net/if.h> #include <net/pfvar.h> #define TRANS_PF #endif #ifdef IP_TRANSPARENT #define TRANS_TPROXY #endif #define SOCKS4_GRANTED 90 #define SOCKS4_REJECT 91 static int connection_ap_handshake_process_socks(entry_connection_t *conn); static int connection_ap_process_natd(entry_connection_t *conn); static int connection_exit_connect_dir(edge_connection_t *exitconn); static int consider_plaintext_ports(entry_connection_t *conn, uint16_t port); static int connection_ap_supports_optimistic_data(const entry_connection_t *); /** An AP stream has failed/finished. If it hasn't already sent back * a socks reply, send one now (based on endreason). Also set * has_sent_end to 1, and mark the conn. */ MOCK_IMPL(void, connection_mark_unattached_ap_,(entry_connection_t *conn, int endreason, int line, const char *file)) { connection_t *base_conn = ENTRY_TO_CONN(conn); edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn); tor_assert(base_conn->type == CONN_TYPE_AP); ENTRY_TO_EDGE_CONN(conn)->edge_has_sent_end = 1; /* no circ yet */ /* If this is a rendezvous stream and it is failing without ever * being attached to a circuit, assume that an attempt to connect to * the destination hidden service has just ended. * * XXXX This condition doesn't limit to only streams failing * without ever being attached. That sloppiness should be harmless, * but we should fix it someday anyway. */ if ((edge_conn->on_circuit != NULL || edge_conn->edge_has_sent_end) && connection_edge_is_rendezvous_stream(edge_conn)) { rend_client_note_connection_attempt_ended(edge_conn->rend_data); } if (base_conn->marked_for_close) { /* This call will warn as appropriate. */ connection_mark_for_close_(base_conn, line, file); return; } if (!conn->socks_request->has_finished) { if (endreason & END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED) log_warn(LD_BUG, "stream (marked at %s:%d) sending two socks replies?", file, line); if (SOCKS_COMMAND_IS_CONNECT(conn->socks_request->command)) connection_ap_handshake_socks_reply(conn, NULL, 0, endreason); else if (SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR_TRANSIENT, 0, NULL, -1, -1); else /* unknown or no handshake at all. send no response. */ conn->socks_request->has_finished = 1; } connection_mark_and_flush_(base_conn, line, file); ENTRY_TO_EDGE_CONN(conn)->end_reason = endreason; } /** There was an EOF. Send an end and mark the connection for close. */ int connection_edge_reached_eof(edge_connection_t *conn) { if (connection_get_inbuf_len(TO_CONN(conn)) && connection_state_is_open(TO_CONN(conn))) { /* it still has stuff to process. don't let it die yet. */ return 0; } log_info(LD_EDGE,"conn (fd "TOR_SOCKET_T_FORMAT") reached eof. Closing.", conn->base_.s); if (!conn->base_.marked_for_close) { /* only mark it if not already marked. it's possible to * get the 'end' right around when the client hangs up on us. */ connection_edge_end(conn, END_STREAM_REASON_DONE); if (conn->base_.type == CONN_TYPE_AP) { /* eof, so don't send a socks reply back */ if (EDGE_TO_ENTRY_CONN(conn)->socks_request) EDGE_TO_ENTRY_CONN(conn)->socks_request->has_finished = 1; } connection_mark_for_close(TO_CONN(conn)); } return 0; } /** Handle new bytes on conn->inbuf based on state: * - If it's waiting for socks info, try to read another step of the * socks handshake out of conn->inbuf. * - If it's waiting for the original destination, fetch it. * - If it's open, then package more relay cells from the stream. * - Else, leave the bytes on inbuf alone for now. * * Mark and return -1 if there was an unexpected error with the conn, * else return 0. */ int connection_edge_process_inbuf(edge_connection_t *conn, int package_partial) { tor_assert(conn); switch (conn->base_.state) { case AP_CONN_STATE_SOCKS_WAIT: if (connection_ap_handshake_process_socks(EDGE_TO_ENTRY_CONN(conn)) <0) { /* already marked */ return -1; } return 0; case AP_CONN_STATE_NATD_WAIT: if (connection_ap_process_natd(EDGE_TO_ENTRY_CONN(conn)) < 0) { /* already marked */ return -1; } return 0; case AP_CONN_STATE_OPEN: case EXIT_CONN_STATE_OPEN: if (connection_edge_package_raw_inbuf(conn, package_partial, NULL) < 0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return -1; } return 0; case AP_CONN_STATE_CONNECT_WAIT: if (connection_ap_supports_optimistic_data(EDGE_TO_ENTRY_CONN(conn))) { log_info(LD_EDGE, "data from edge while in '%s' state. Sending it anyway. " "package_partial=%d, buflen=%ld", conn_state_to_string(conn->base_.type, conn->base_.state), package_partial, (long)connection_get_inbuf_len(TO_CONN(conn))); if (connection_edge_package_raw_inbuf(conn, package_partial, NULL)<0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return -1; } return 0; } /* Fall through if the connection is on a circuit without optimistic * data support. */ case EXIT_CONN_STATE_CONNECTING: case AP_CONN_STATE_RENDDESC_WAIT: case AP_CONN_STATE_CIRCUIT_WAIT: case AP_CONN_STATE_RESOLVE_WAIT: case AP_CONN_STATE_CONTROLLER_WAIT: log_info(LD_EDGE, "data from edge while in '%s' state. Leaving it on buffer.", conn_state_to_string(conn->base_.type, conn->base_.state)); return 0; } log_warn(LD_BUG,"Got unexpected state %d. Closing.",conn->base_.state); tor_fragile_assert(); connection_edge_end(conn, END_STREAM_REASON_INTERNAL); connection_mark_for_close(TO_CONN(conn)); return -1; } /** This edge needs to be closed, because its circuit has closed. * Mark it for close and return 0. */ int connection_edge_destroy(circid_t circ_id, edge_connection_t *conn) { if (!conn->base_.marked_for_close) { log_info(LD_EDGE, "CircID %u: At an edge. Marking connection for close.", (unsigned) circ_id); if (conn->base_.type == CONN_TYPE_AP) { entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_DESTROY); control_event_stream_bandwidth(conn); control_event_stream_status(entry_conn, STREAM_EVENT_CLOSED, END_STREAM_REASON_DESTROY); conn->end_reason |= END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED; } else { /* closing the circuit, nothing to send an END to */ conn->edge_has_sent_end = 1; conn->end_reason = END_STREAM_REASON_DESTROY; conn->end_reason |= END_STREAM_REASON_FLAG_ALREADY_SENT_CLOSED; connection_mark_and_flush(TO_CONN(conn)); } } conn->cpath_layer = NULL; conn->on_circuit = NULL; return 0; } /** Send a raw end cell to the stream with ID <b>stream_id</b> out over the * <b>circ</b> towards the hop identified with <b>cpath_layer</b>. If this * is not a client connection, set the relay end cell's reason for closing * as <b>reason</b> */ static int relay_send_end_cell_from_edge(streamid_t stream_id, circuit_t *circ, uint8_t reason, crypt_path_t *cpath_layer) { char payload[1]; if (CIRCUIT_PURPOSE_IS_CLIENT(circ->purpose)) { /* Never send the server an informative reason code; it doesn't need to * know why the client stream is failing. */ reason = END_STREAM_REASON_MISC; } payload[0] = (char) reason; return relay_send_command_from_edge(stream_id, circ, RELAY_COMMAND_END, payload, 1, cpath_layer); } /* If the connection <b>conn</b> is attempting to connect to an external * destination that is an hidden service and the reason is a connection * refused or timeout, log it so the operator can take appropriate actions. * The log statement is a rate limited warning. */ static void warn_if_hs_unreachable(const edge_connection_t *conn, uint8_t reason) { tor_assert(conn); if (conn->base_.type == CONN_TYPE_EXIT && connection_edge_is_rendezvous_stream(conn) && (reason == END_STREAM_REASON_CONNECTREFUSED || reason == END_STREAM_REASON_TIMEOUT)) { #define WARN_FAILED_HS_CONNECTION 300 static ratelim_t warn_limit = RATELIM_INIT(WARN_FAILED_HS_CONNECTION); char *m; if ((m = rate_limit_log(&warn_limit, approx_time()))) { log_warn(LD_EDGE, "Onion service connection to %s failed (%s)", (conn->base_.socket_family == AF_UNIX) ? safe_str(conn->base_.address) : safe_str(fmt_addrport(&conn->base_.addr, conn->base_.port)), stream_end_reason_to_string(reason)); tor_free(m); } } } /** Send a relay end cell from stream <b>conn</b> down conn's circuit, and * remember that we've done so. If this is not a client connection, set the * relay end cell's reason for closing as <b>reason</b>. * * Return -1 if this function has already been called on this conn, * else return 0. */ int connection_edge_end(edge_connection_t *conn, uint8_t reason) { char payload[RELAY_PAYLOAD_SIZE]; size_t payload_len=1; circuit_t *circ; uint8_t control_reason = reason; if (conn->edge_has_sent_end) { log_warn(LD_BUG,"(Harmless.) Calling connection_edge_end (reason %d) " "on an already ended stream?", reason); tor_fragile_assert(); return -1; } if (conn->base_.marked_for_close) { log_warn(LD_BUG, "called on conn that's already marked for close at %s:%d.", conn->base_.marked_for_close_file, conn->base_.marked_for_close); return 0; } circ = circuit_get_by_edge_conn(conn); if (circ && CIRCUIT_PURPOSE_IS_CLIENT(circ->purpose)) { /* If this is a client circuit, don't send the server an informative * reason code; it doesn't need to know why the client stream is * failing. */ reason = END_STREAM_REASON_MISC; } payload[0] = (char)reason; if (reason == END_STREAM_REASON_EXITPOLICY && !connection_edge_is_rendezvous_stream(conn)) { int addrlen; if (tor_addr_family(&conn->base_.addr) == AF_INET) { set_uint32(payload+1, tor_addr_to_ipv4n(&conn->base_.addr)); addrlen = 4; } else { memcpy(payload+1, tor_addr_to_in6_addr8(&conn->base_.addr), 16); addrlen = 16; } set_uint32(payload+1+addrlen, htonl(dns_clip_ttl(conn->address_ttl))); payload_len += 4+addrlen; } if (circ && !circ->marked_for_close) { log_debug(LD_EDGE,"Sending end on conn (fd "TOR_SOCKET_T_FORMAT").", conn->base_.s); connection_edge_send_command(conn, RELAY_COMMAND_END, payload, payload_len); /* We'll log warn if the connection was an hidden service and couldn't be * made because the service wasn't available. */ warn_if_hs_unreachable(conn, control_reason); } else { log_debug(LD_EDGE,"No circ to send end on conn " "(fd "TOR_SOCKET_T_FORMAT").", conn->base_.s); } conn->edge_has_sent_end = 1; conn->end_reason = control_reason; return 0; } /** An error has just occurred on an operation on an edge connection * <b>conn</b>. Extract the errno; convert it to an end reason, and send an * appropriate relay end cell to the other end of the connection's circuit. **/ int connection_edge_end_errno(edge_connection_t *conn) { uint8_t reason; tor_assert(conn); reason = errno_to_stream_end_reason(tor_socket_errno(conn->base_.s)); return connection_edge_end(conn, reason); } /** We just wrote some data to <b>conn</b>; act appropriately. * * (That is, if it's open, consider sending a stream-level sendme cell if we * have just flushed enough.) */ int connection_edge_flushed_some(edge_connection_t *conn) { switch (conn->base_.state) { case AP_CONN_STATE_OPEN: case EXIT_CONN_STATE_OPEN: connection_edge_consider_sending_sendme(conn); break; } return 0; } /** Connection <b>conn</b> has finished writing and has no bytes left on * its outbuf. * * If it's in state 'open', stop writing, consider responding with a * sendme, and return. * Otherwise, stop writing and return. * * If <b>conn</b> is broken, mark it for close and return -1, else * return 0. */ int connection_edge_finished_flushing(edge_connection_t *conn) { tor_assert(conn); switch (conn->base_.state) { case AP_CONN_STATE_OPEN: case EXIT_CONN_STATE_OPEN: connection_edge_consider_sending_sendme(conn); return 0; case AP_CONN_STATE_SOCKS_WAIT: case AP_CONN_STATE_NATD_WAIT: case AP_CONN_STATE_RENDDESC_WAIT: case AP_CONN_STATE_CIRCUIT_WAIT: case AP_CONN_STATE_CONNECT_WAIT: case AP_CONN_STATE_CONTROLLER_WAIT: case AP_CONN_STATE_RESOLVE_WAIT: return 0; default: log_warn(LD_BUG, "Called in unexpected state %d.",conn->base_.state); tor_fragile_assert(); return -1; } return 0; } /** Longest size for the relay payload of a RELAY_CONNECTED cell that we're * able to generate. */ /* 4 zero bytes; 1 type byte; 16 byte IPv6 address; 4 byte TTL. */ #define MAX_CONNECTED_CELL_PAYLOAD_LEN 25 /** Set the buffer at <b>payload_out</b> -- which must have at least * MAX_CONNECTED_CELL_PAYLOAD_LEN bytes available -- to the body of a * RELAY_CONNECTED cell indicating that we have connected to <b>addr</b>, and * that the name resolution that led us to <b>addr</b> will be valid for * <b>ttl</b> seconds. Return -1 on error, or the number of bytes used on * success. */ STATIC int connected_cell_format_payload(uint8_t *payload_out, const tor_addr_t *addr, uint32_t ttl) { const sa_family_t family = tor_addr_family(addr); int connected_payload_len; /* should be needless */ memset(payload_out, 0, MAX_CONNECTED_CELL_PAYLOAD_LEN); if (family == AF_INET) { set_uint32(payload_out, tor_addr_to_ipv4n(addr)); connected_payload_len = 4; } else if (family == AF_INET6) { set_uint32(payload_out, 0); set_uint8(payload_out + 4, 6); memcpy(payload_out + 5, tor_addr_to_in6_addr8(addr), 16); connected_payload_len = 21; } else { return -1; } set_uint32(payload_out + connected_payload_len, htonl(dns_clip_ttl(ttl))); connected_payload_len += 4; tor_assert(connected_payload_len <= MAX_CONNECTED_CELL_PAYLOAD_LEN); return connected_payload_len; } /** Connected handler for exit connections: start writing pending * data, deliver 'CONNECTED' relay cells as appropriate, and check * any pending data that may have been received. */ int connection_edge_finished_connecting(edge_connection_t *edge_conn) { connection_t *conn; tor_assert(edge_conn); tor_assert(edge_conn->base_.type == CONN_TYPE_EXIT); conn = TO_CONN(edge_conn); tor_assert(conn->state == EXIT_CONN_STATE_CONNECTING); log_info(LD_EXIT,"Exit connection to %s:%u (%s) established.", escaped_safe_str(conn->address), conn->port, safe_str(fmt_and_decorate_addr(&conn->addr))); rep_hist_note_exit_stream_opened(conn->port); conn->state = EXIT_CONN_STATE_OPEN; connection_watch_events(conn, READ_EVENT); /* stop writing, keep reading */ if (connection_get_outbuf_len(conn)) /* in case there are any queued relay * cells */ connection_start_writing(conn); /* deliver a 'connected' relay cell back through the circuit. */ if (connection_edge_is_rendezvous_stream(edge_conn)) { if (connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, NULL, 0) < 0) return 0; /* circuit is closed, don't continue */ } else { uint8_t connected_payload[MAX_CONNECTED_CELL_PAYLOAD_LEN]; int connected_payload_len = connected_cell_format_payload(connected_payload, &conn->addr, edge_conn->address_ttl); if (connected_payload_len < 0) return -1; if (connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, (char*)connected_payload, connected_payload_len) < 0) return 0; /* circuit is closed, don't continue */ } tor_assert(edge_conn->package_window > 0); /* in case the server has written anything */ return connection_edge_process_inbuf(edge_conn, 1); } /** A list of all the entry_connection_t * objects that are not marked * for close, and are in AP_CONN_STATE_CIRCUIT_WAIT. * * (Right now, we check in several places to make sure that this list is * correct. When it's incorrect, we'll fix it, and log a BUG message.) */ static smartlist_t *pending_entry_connections = NULL; static int untried_pending_connections = 0; /** Common code to connection_(ap|exit)_about_to_close. */ static void connection_edge_about_to_close(edge_connection_t *edge_conn) { if (!edge_conn->edge_has_sent_end) { connection_t *conn = TO_CONN(edge_conn); log_warn(LD_BUG, "(Harmless.) Edge connection (marked at %s:%d) " "hasn't sent end yet?", conn->marked_for_close_file, conn->marked_for_close); tor_fragile_assert(); } } /** Called when we're about to finally unlink and free an AP (client) * connection: perform necessary accounting and cleanup */ void connection_ap_about_to_close(entry_connection_t *entry_conn) { circuit_t *circ; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(entry_conn); connection_t *conn = ENTRY_TO_CONN(entry_conn); connection_edge_about_to_close(edge_conn); if (entry_conn->socks_request->has_finished == 0) { /* since conn gets removed right after this function finishes, * there's no point trying to send back a reply at this point. */ log_warn(LD_BUG,"Closing stream (marked at %s:%d) without sending" " back a socks reply.", conn->marked_for_close_file, conn->marked_for_close); } if (!edge_conn->end_reason) { log_warn(LD_BUG,"Closing stream (marked at %s:%d) without having" " set end_reason.", conn->marked_for_close_file, conn->marked_for_close); } if (entry_conn->dns_server_request) { log_warn(LD_BUG,"Closing stream (marked at %s:%d) without having" " replied to DNS request.", conn->marked_for_close_file, conn->marked_for_close); dnsserv_reject_request(entry_conn); } if (TO_CONN(edge_conn)->state == AP_CONN_STATE_CIRCUIT_WAIT) { smartlist_remove(pending_entry_connections, entry_conn); } #if 1 /* Check to make sure that this isn't in pending_entry_connections if it * didn't actually belong there. */ if (TO_CONN(edge_conn)->type == CONN_TYPE_AP) { connection_ap_warn_and_unmark_if_pending_circ(entry_conn, "about_to_close"); } #endif control_event_stream_bandwidth(edge_conn); control_event_stream_status(entry_conn, STREAM_EVENT_CLOSED, edge_conn->end_reason); circ = circuit_get_by_edge_conn(edge_conn); if (circ) circuit_detach_stream(circ, edge_conn); } /** Called when we're about to finally unlink and free an exit * connection: perform necessary accounting and cleanup */ void connection_exit_about_to_close(edge_connection_t *edge_conn) { circuit_t *circ; connection_t *conn = TO_CONN(edge_conn); connection_edge_about_to_close(edge_conn); circ = circuit_get_by_edge_conn(edge_conn); if (circ) circuit_detach_stream(circ, edge_conn); if (conn->state == EXIT_CONN_STATE_RESOLVING) { connection_dns_remove(edge_conn); } } /** Define a schedule for how long to wait between retrying * application connections. Rather than waiting a fixed amount of * time between each retry, we wait 10 seconds each for the first * two tries, and 15 seconds for each retry after * that. Hopefully this will improve the expected user experience. */ static int compute_retry_timeout(entry_connection_t *conn) { int timeout = get_options()->CircuitStreamTimeout; if (timeout) /* if our config options override the default, use them */ return timeout; if (conn->num_socks_retries < 2) /* try 0 and try 1 */ return 10; return 15; } /** Find all general-purpose AP streams waiting for a response that sent their * begin/resolve cell too long ago. Detach from their current circuit, and * mark their current circuit as unsuitable for new streams. Then call * connection_ap_handshake_attach_circuit() to attach to a new circuit (if * available) or launch a new one. * * For rendezvous streams, simply give up after SocksTimeout seconds (with no * retry attempt). */ void connection_ap_expire_beginning(void) { edge_connection_t *conn; entry_connection_t *entry_conn; circuit_t *circ; time_t now = time(NULL); const or_options_t *options = get_options(); int severity; int cutoff; int seconds_idle, seconds_since_born; smartlist_t *conns = get_connection_array(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, base_conn) { if (base_conn->type != CONN_TYPE_AP || base_conn->marked_for_close) continue; entry_conn = TO_ENTRY_CONN(base_conn); conn = ENTRY_TO_EDGE_CONN(entry_conn); /* if it's an internal linked connection, don't yell its status. */ severity = (tor_addr_is_null(&base_conn->addr) && !base_conn->port) ? LOG_INFO : LOG_NOTICE; seconds_idle = (int)( now - base_conn->timestamp_lastread ); seconds_since_born = (int)( now - base_conn->timestamp_created ); if (base_conn->state == AP_CONN_STATE_OPEN) continue; /* We already consider SocksTimeout in * connection_ap_handshake_attach_circuit(), but we need to consider * it here too because controllers that put streams in controller_wait * state never ask Tor to attach the circuit. */ if (AP_CONN_STATE_IS_UNATTACHED(base_conn->state)) { if (seconds_since_born >= options->SocksTimeout) { log_fn(severity, LD_APP, "Tried for %d seconds to get a connection to %s:%d. " "Giving up. (%s)", seconds_since_born, safe_str_client(entry_conn->socks_request->address), entry_conn->socks_request->port, conn_state_to_string(CONN_TYPE_AP, base_conn->state)); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); } continue; } /* We're in state connect_wait or resolve_wait now -- waiting for a * reply to our relay cell. See if we want to retry/give up. */ cutoff = compute_retry_timeout(entry_conn); if (seconds_idle < cutoff) continue; circ = circuit_get_by_edge_conn(conn); if (!circ) { /* it's vanished? */ log_info(LD_APP,"Conn is waiting (address %s), but lost its circ.", safe_str_client(entry_conn->socks_request->address)); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); continue; } if (circ->purpose == CIRCUIT_PURPOSE_C_REND_JOINED) { if (seconds_idle >= options->SocksTimeout) { log_fn(severity, LD_REND, "Rend stream is %d seconds late. Giving up on address" " '%s.onion'.", seconds_idle, safe_str_client(entry_conn->socks_request->address)); /* Roll back path bias use state so that we probe the circuit * if nothing else succeeds on it */ pathbias_mark_use_rollback(TO_ORIGIN_CIRCUIT(circ)); connection_edge_end(conn, END_STREAM_REASON_TIMEOUT); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); } continue; } if (circ->purpose != CIRCUIT_PURPOSE_C_GENERAL && circ->purpose != CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT && circ->purpose != CIRCUIT_PURPOSE_PATH_BIAS_TESTING) { log_warn(LD_BUG, "circuit->purpose == CIRCUIT_PURPOSE_C_GENERAL failed. " "The purpose on the circuit was %s; it was in state %s, " "path_state %s.", circuit_purpose_to_string(circ->purpose), circuit_state_to_string(circ->state), CIRCUIT_IS_ORIGIN(circ) ? pathbias_state_to_string(TO_ORIGIN_CIRCUIT(circ)->path_state) : "none"); } log_fn(cutoff < 15 ? LOG_INFO : severity, LD_APP, "We tried for %d seconds to connect to '%s' using exit %s." " Retrying on a new circuit.", seconds_idle, safe_str_client(entry_conn->socks_request->address), conn->cpath_layer ? extend_info_describe(conn->cpath_layer->extend_info): "*unnamed*"); /* send an end down the circuit */ connection_edge_end(conn, END_STREAM_REASON_TIMEOUT); /* un-mark it as ending, since we're going to reuse it */ conn->edge_has_sent_end = 0; conn->end_reason = 0; /* make us not try this circuit again, but allow * current streams on it to survive if they can */ mark_circuit_unusable_for_new_conns(TO_ORIGIN_CIRCUIT(circ)); /* give our stream another 'cutoff' seconds to try */ conn->base_.timestamp_lastread += cutoff; if (entry_conn->num_socks_retries < 250) /* avoid overflow */ entry_conn->num_socks_retries++; /* move it back into 'pending' state, and try to attach. */ if (connection_ap_detach_retriable(entry_conn, TO_ORIGIN_CIRCUIT(circ), END_STREAM_REASON_TIMEOUT)<0) { if (!base_conn->marked_for_close) connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_CANT_ATTACH); } } SMARTLIST_FOREACH_END(base_conn); } /** * As connection_ap_attach_pending, but first scans the entire connection * array to see if any elements are missing. */ void connection_ap_rescan_and_attach_pending(void) { entry_connection_t *entry_conn; smartlist_t *conns = get_connection_array(); if (PREDICT_UNLIKELY(NULL == pending_entry_connections)) pending_entry_connections = smartlist_new(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) { if (conn->marked_for_close || conn->type != CONN_TYPE_AP || conn->state != AP_CONN_STATE_CIRCUIT_WAIT) continue; entry_conn = TO_ENTRY_CONN(conn); tor_assert(entry_conn); if (! smartlist_contains(pending_entry_connections, entry_conn)) { log_warn(LD_BUG, "Found a connection %p that was supposed to be " "in pending_entry_connections, but wasn't. No worries; " "adding it.", pending_entry_connections); untried_pending_connections = 1; connection_ap_mark_as_pending_circuit(entry_conn); } } SMARTLIST_FOREACH_END(conn); connection_ap_attach_pending(1); } #ifdef DEBUGGING_17659 #define UNMARK() do { \ entry_conn->marked_pending_circ_line = 0; \ entry_conn->marked_pending_circ_file = 0; \ } while (0) #else #define UNMARK() do { } while (0) #endif /** Tell any AP streams that are listed as waiting for a new circuit to try * again. If there is an available circuit for a stream, attach it. Otherwise, * launch a new circuit. * * If <b>retry</b> is false, only check the list if it contains at least one * streams that we have not yet tried to attach to a circuit. */ void connection_ap_attach_pending(int retry) { if (PREDICT_UNLIKELY(!pending_entry_connections)) { return; } if (untried_pending_connections == 0 && !retry) return; /* Don't allow any modifications to list while we are iterating over * it. We'll put streams back on this list if we can't attach them * immediately. */ smartlist_t *pending = pending_entry_connections; pending_entry_connections = smartlist_new(); SMARTLIST_FOREACH_BEGIN(pending, entry_connection_t *, entry_conn) { connection_t *conn = ENTRY_TO_CONN(entry_conn); tor_assert(conn && entry_conn); if (conn->marked_for_close) { UNMARK(); continue; } if (conn->magic != ENTRY_CONNECTION_MAGIC) { log_warn(LD_BUG, "%p has impossible magic value %u.", entry_conn, (unsigned)conn->magic); UNMARK(); continue; } if (conn->state != AP_CONN_STATE_CIRCUIT_WAIT) { log_warn(LD_BUG, "%p is no longer in circuit_wait. Its current state " "is %s. Why is it on pending_entry_connections?", entry_conn, conn_state_to_string(conn->type, conn->state)); UNMARK(); continue; } /* Okay, we're through the sanity checks. Try to handle this stream. */ if (connection_ap_handshake_attach_circuit(entry_conn) < 0) { if (!conn->marked_for_close) connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_CANT_ATTACH); } if (! conn->marked_for_close && conn->type == CONN_TYPE_AP && conn->state == AP_CONN_STATE_CIRCUIT_WAIT) { /* Is it still waiting for a circuit? If so, we didn't attach it, * so it's still pending. Put it back on the list. */ if (!smartlist_contains(pending_entry_connections, entry_conn)) { smartlist_add(pending_entry_connections, entry_conn); continue; } } /* If we got here, then we either closed the connection, or * we attached it. */ UNMARK(); } SMARTLIST_FOREACH_END(entry_conn); smartlist_free(pending); untried_pending_connections = 0; } /** Mark <b>entry_conn</b> as needing to get attached to a circuit. * * And <b>entry_conn</b> must be in AP_CONN_STATE_CIRCUIT_WAIT, * should not already be pending a circuit. The circuit will get * launched or the connection will get attached the next time we * call connection_ap_attach_pending(). */ void connection_ap_mark_as_pending_circuit_(entry_connection_t *entry_conn, const char *fname, int lineno) { connection_t *conn = ENTRY_TO_CONN(entry_conn); tor_assert(conn->state == AP_CONN_STATE_CIRCUIT_WAIT); tor_assert(conn->magic == ENTRY_CONNECTION_MAGIC); if (conn->marked_for_close) return; if (PREDICT_UNLIKELY(NULL == pending_entry_connections)) pending_entry_connections = smartlist_new(); if (PREDICT_UNLIKELY(smartlist_contains(pending_entry_connections, entry_conn))) { log_warn(LD_BUG, "What?? pending_entry_connections already contains %p! " "(Called from %s:%d.)", entry_conn, fname, lineno); #ifdef DEBUGGING_17659 const char *f2 = entry_conn->marked_pending_circ_file; log_warn(LD_BUG, "(Previously called from %s:%d.)\n", f2 ? f2 : "<NULL>", entry_conn->marked_pending_circ_line); #endif log_backtrace(LOG_WARN, LD_BUG, "To debug, this may help"); return; } #ifdef DEBUGGING_17659 entry_conn->marked_pending_circ_line = (uint16_t) lineno; entry_conn->marked_pending_circ_file = fname; #endif untried_pending_connections = 1; smartlist_add(pending_entry_connections, entry_conn); /* Work-around for bug 19969: we handle pending_entry_connections at * the end of run_main_loop_once(), but in many cases that function will * take a very long time, if ever, to finish its call to event_base_loop(). * * So the fix is to tell it right now that it ought to finish its loop at * its next available opportunity. */ tell_event_loop_to_finish(); } /** Mark <b>entry_conn</b> as no longer waiting for a circuit. */ void connection_ap_mark_as_non_pending_circuit(entry_connection_t *entry_conn) { if (PREDICT_UNLIKELY(NULL == pending_entry_connections)) return; UNMARK(); smartlist_remove(pending_entry_connections, entry_conn); } /* DOCDOC */ void connection_ap_warn_and_unmark_if_pending_circ(entry_connection_t *entry_conn, const char *where) { if (pending_entry_connections && smartlist_contains(pending_entry_connections, entry_conn)) { log_warn(LD_BUG, "What was %p doing in pending_entry_connections in %s?", entry_conn, where); connection_ap_mark_as_non_pending_circuit(entry_conn); } } /** Tell any AP streams that are waiting for a one-hop tunnel to * <b>failed_digest</b> that they are going to fail. */ /* XXXX We should get rid of this function, and instead attach * one-hop streams to circ->p_streams so they get marked in * circuit_mark_for_close like normal p_streams. */ void connection_ap_fail_onehop(const char *failed_digest, cpath_build_state_t *build_state) { entry_connection_t *entry_conn; char digest[DIGEST_LEN]; smartlist_t *conns = get_connection_array(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) { if (conn->marked_for_close || conn->type != CONN_TYPE_AP || conn->state != AP_CONN_STATE_CIRCUIT_WAIT) continue; entry_conn = TO_ENTRY_CONN(conn); if (!entry_conn->want_onehop) continue; if (hexdigest_to_digest(entry_conn->chosen_exit_name, digest) < 0 || tor_memneq(digest, failed_digest, DIGEST_LEN)) continue; if (tor_digest_is_zero(digest)) { /* we don't know the digest; have to compare addr:port */ tor_addr_t addr; if (!build_state || !build_state->chosen_exit || !entry_conn->socks_request) { continue; } if (tor_addr_parse(&addr, entry_conn->socks_request->address)<0 || !tor_addr_eq(&build_state->chosen_exit->addr, &addr) || build_state->chosen_exit->port != entry_conn->socks_request->port) continue; } log_info(LD_APP, "Closing one-hop stream to '%s/%s' because the OR conn " "just failed.", entry_conn->chosen_exit_name, entry_conn->socks_request->address); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TIMEOUT); } SMARTLIST_FOREACH_END(conn); } /** A circuit failed to finish on its last hop <b>info</b>. If there * are any streams waiting with this exit node in mind, but they * don't absolutely require it, make them give up on it. */ void circuit_discard_optional_exit_enclaves(extend_info_t *info) { entry_connection_t *entry_conn; const node_t *r1, *r2; smartlist_t *conns = get_connection_array(); SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) { if (conn->marked_for_close || conn->type != CONN_TYPE_AP || conn->state != AP_CONN_STATE_CIRCUIT_WAIT) continue; entry_conn = TO_ENTRY_CONN(conn); if (!entry_conn->chosen_exit_optional && !entry_conn->chosen_exit_retries) continue; r1 = node_get_by_nickname(entry_conn->chosen_exit_name, 0); r2 = node_get_by_id(info->identity_digest); if (!r1 || !r2 || r1 != r2) continue; tor_assert(entry_conn->socks_request); if (entry_conn->chosen_exit_optional) { log_info(LD_APP, "Giving up on enclave exit '%s' for destination %s.", safe_str_client(entry_conn->chosen_exit_name), escaped_safe_str_client(entry_conn->socks_request->address)); entry_conn->chosen_exit_optional = 0; tor_free(entry_conn->chosen_exit_name); /* clears it */ /* if this port is dangerous, warn or reject it now that we don't * think it'll be using an enclave. */ consider_plaintext_ports(entry_conn, entry_conn->socks_request->port); } if (entry_conn->chosen_exit_retries) { if (--entry_conn->chosen_exit_retries == 0) { /* give up! */ clear_trackexithost_mappings(entry_conn->chosen_exit_name); tor_free(entry_conn->chosen_exit_name); /* clears it */ /* if this port is dangerous, warn or reject it now that we don't * think it'll be using an enclave. */ consider_plaintext_ports(entry_conn, entry_conn->socks_request->port); } } } SMARTLIST_FOREACH_END(conn); } /** The AP connection <b>conn</b> has just failed while attaching or * sending a BEGIN or resolving on <b>circ</b>, but another circuit * might work. Detach the circuit, and either reattach it, launch a * new circuit, tell the controller, or give up as appropriate. * * Returns -1 on err, 1 on success, 0 on not-yet-sure. */ int connection_ap_detach_retriable(entry_connection_t *conn, origin_circuit_t *circ, int reason) { control_event_stream_status(conn, STREAM_EVENT_FAILED_RETRIABLE, reason); ENTRY_TO_CONN(conn)->timestamp_lastread = time(NULL); /* Roll back path bias use state so that we probe the circuit * if nothing else succeeds on it */ pathbias_mark_use_rollback(circ); if (conn->pending_optimistic_data) { buf_set_to_copy(&conn->sending_optimistic_data, conn->pending_optimistic_data); } if (!get_options()->LeaveStreamsUnattached || conn->use_begindir) { /* If we're attaching streams ourself, or if this connection is * a tunneled directory connection, then just attach it. */ ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CIRCUIT_WAIT; circuit_detach_stream(TO_CIRCUIT(circ),ENTRY_TO_EDGE_CONN(conn)); connection_ap_mark_as_pending_circuit(conn); } else { CONNECTION_AP_EXPECT_NONPENDING(conn); ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CONTROLLER_WAIT; circuit_detach_stream(TO_CIRCUIT(circ),ENTRY_TO_EDGE_CONN(conn)); } return 0; } /** Check if <b>conn</b> is using a dangerous port. Then warn and/or * reject depending on our config options. */ static int consider_plaintext_ports(entry_connection_t *conn, uint16_t port) { const or_options_t *options = get_options(); int reject = smartlist_contains_int_as_string( options->RejectPlaintextPorts, port); if (smartlist_contains_int_as_string(options->WarnPlaintextPorts, port)) { log_warn(LD_APP, "Application request to port %d: this port is " "commonly used for unencrypted protocols. Please make sure " "you don't send anything you would mind the rest of the " "Internet reading!%s", port, reject ? " Closing." : ""); control_event_client_status(LOG_WARN, "DANGEROUS_PORT PORT=%d RESULT=%s", port, reject ? "REJECT" : "WARN"); } if (reject) { log_info(LD_APP, "Port %d listed in RejectPlaintextPorts. Closing.", port); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } return 0; } /** How many times do we try connecting with an exit configured via * TrackHostExits before concluding that it won't work any more and trying a * different one? */ #define TRACKHOSTEXITS_RETRIES 5 /** Call connection_ap_handshake_rewrite_and_attach() unless a controller * asked us to leave streams unattached. Return 0 in that case. * * See connection_ap_handshake_rewrite_and_attach()'s * documentation for arguments and return value. */ int connection_ap_rewrite_and_attach_if_allowed(entry_connection_t *conn, origin_circuit_t *circ, crypt_path_t *cpath) { const or_options_t *options = get_options(); if (options->LeaveStreamsUnattached) { CONNECTION_AP_EXPECT_NONPENDING(conn); ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CONTROLLER_WAIT; return 0; } return connection_ap_handshake_rewrite_and_attach(conn, circ, cpath); } /* Try to perform any map-based rewriting of the target address in * <b>conn</b>, filling in the fields of <b>out</b> as we go, and modifying * conn->socks_request.address as appropriate. */ STATIC void connection_ap_handshake_rewrite(entry_connection_t *conn, rewrite_result_t *out) { socks_request_t *socks = conn->socks_request; const or_options_t *options = get_options(); tor_addr_t addr_tmp; /* Initialize all the fields of 'out' to reasonable defaults */ out->automap = 0; out->exit_source = ADDRMAPSRC_NONE; out->map_expires = TIME_MAX; out->end_reason = 0; out->should_close = 0; out->orig_address[0] = 0; /* We convert all incoming addresses to lowercase. */ tor_strlower(socks->address); /* Remember the original address. */ strlcpy(out->orig_address, socks->address, sizeof(out->orig_address)); log_debug(LD_APP,"Client asked for %s:%d", safe_str_client(socks->address), socks->port); /* Check for whether this is a .exit address. By default, those are * disallowed when they're coming straight from the client, but you're * allowed to have them in MapAddress commands and so forth. */ if (!strcmpend(socks->address, ".exit") && !options->AllowDotExit) { log_warn(LD_APP, "The \".exit\" notation is disabled in Tor due to " "security risks. Set AllowDotExit in your torrc to enable " "it (at your own risk)."); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); out->end_reason = END_STREAM_REASON_TORPROTOCOL; out->should_close = 1; return; } /* Remember the original address so we can tell the user about what * they actually said, not just what it turned into. */ /* XXX yes, this is the same as out->orig_address above. One is * in the output, and one is in the connection. */ if (! conn->original_dest_address) { /* Is the 'if' necessary here? XXXX */ conn->original_dest_address = tor_strdup(conn->socks_request->address); } /* First, apply MapAddress and MAPADDRESS mappings. We need to do * these only for non-reverse lookups, since they don't exist for those. * We also need to do this before we consider automapping, since we might * e.g. resolve irc.oftc.net into irconionaddress.onion, at which point * we'd need to automap it. */ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR) { const unsigned rewrite_flags = AMR_FLAG_USE_MAPADDRESS; if (addressmap_rewrite(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires, &out->exit_source)) { control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_CACHE); } } /* Now see if we need to create or return an existing Hostname->IP * automapping. Automapping happens when we're asked to resolve a * hostname, and AutomapHostsOnResolve is set, and the hostname has a * suffix listed in AutomapHostsSuffixes. It's a handy feature * that lets you have Tor assign e.g. IPv6 addresses for .onion * names, and return them safely from DNSPort. */ if (socks->command == SOCKS_COMMAND_RESOLVE && tor_addr_parse(&addr_tmp, socks->address)<0 && options->AutomapHostsOnResolve) { /* Check the suffix... */ out->automap = addressmap_address_should_automap(socks->address, options); if (out->automap) { /* If we get here, then we should apply an automapping for this. */ const char *new_addr; /* We return an IPv4 address by default, or an IPv6 address if we * are allowed to do so. */ int addr_type = RESOLVED_TYPE_IPV4; if (conn->socks_request->socks_version != 4) { if (!conn->entry_cfg.ipv4_traffic || (conn->entry_cfg.ipv6_traffic && conn->entry_cfg.prefer_ipv6) || conn->entry_cfg.prefer_ipv6_virtaddr) addr_type = RESOLVED_TYPE_IPV6; } /* Okay, register the target address as automapped, and find the new * address we're supposed to give as a resolve answer. (Return a cached * value if we've looked up this address before. */ new_addr = addressmap_register_virtual_address( addr_type, tor_strdup(socks->address)); if (! new_addr) { log_warn(LD_APP, "Unable to automap address %s", escaped_safe_str(socks->address)); out->end_reason = END_STREAM_REASON_INTERNAL; out->should_close = 1; return; } log_info(LD_APP, "Automapping %s to %s", escaped_safe_str_client(socks->address), safe_str_client(new_addr)); strlcpy(socks->address, new_addr, sizeof(socks->address)); } } /* Now handle reverse lookups, if they're in the cache. This doesn't * happen too often, since client-side DNS caching is off by default, * and very deprecated. */ if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) { unsigned rewrite_flags = 0; if (conn->entry_cfg.use_cached_ipv4_answers) rewrite_flags |= AMR_FLAG_USE_IPV4_DNS; if (conn->entry_cfg.use_cached_ipv6_answers) rewrite_flags |= AMR_FLAG_USE_IPV6_DNS; if (addressmap_rewrite_reverse(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires)) { char *result = tor_strdup(socks->address); /* remember _what_ is supposed to have been resolved. */ tor_snprintf(socks->address, sizeof(socks->address), "REVERSE[%s]", out->orig_address); connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_HOSTNAME, strlen(result), (uint8_t*)result, -1, out->map_expires); tor_free(result); out->end_reason = END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED; out->should_close = 1; return; } /* Hang on, did we find an answer saying that this is a reverse lookup for * an internal address? If so, we should reject it if we're configured to * do so. */ if (options->ClientDNSRejectInternalAddresses) { /* Don't let clients try to do a reverse lookup on 10.0.0.1. */ tor_addr_t addr; int ok; ok = tor_addr_parse_PTR_name( &addr, socks->address, AF_UNSPEC, 1); if (ok == 1 && tor_addr_is_internal(&addr, 0)) { connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR, 0, NULL, -1, TIME_MAX); out->end_reason = END_STREAM_REASON_SOCKSPROTOCOL | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED; out->should_close = 1; return; } } } /* If we didn't automap it before, then this is still the address that * came straight from the user, mapped according to any * MapAddress/MAPADDRESS commands. Now apply other mappings, * including previously registered Automap entries (IP back to * hostname), TrackHostExits entries, and client-side DNS cache * entries (if they're turned on). */ if (socks->command != SOCKS_COMMAND_RESOLVE_PTR && !out->automap) { unsigned rewrite_flags = AMR_FLAG_USE_AUTOMAP | AMR_FLAG_USE_TRACKEXIT; addressmap_entry_source_t exit_source2; if (conn->entry_cfg.use_cached_ipv4_answers) rewrite_flags |= AMR_FLAG_USE_IPV4_DNS; if (conn->entry_cfg.use_cached_ipv6_answers) rewrite_flags |= AMR_FLAG_USE_IPV6_DNS; if (addressmap_rewrite(socks->address, sizeof(socks->address), rewrite_flags, &out->map_expires, &exit_source2)) { control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_CACHE); } if (out->exit_source == ADDRMAPSRC_NONE) { /* If it wasn't a .exit before, maybe it turned into a .exit. Remember * the original source of a .exit. */ out->exit_source = exit_source2; } } /* Check to see whether we're about to use an address in the virtual * range without actually having gotten it from an Automap. */ if (!out->automap && address_is_in_virtual_range(socks->address)) { /* This address was probably handed out by * client_dns_get_unmapped_address, but the mapping was discarded for some * reason. Or the user typed in a virtual address range manually. We * *don't* want to send the address through Tor; that's likely to fail, * and may leak information. */ log_warn(LD_APP,"Missing mapping for virtual address '%s'. Refusing.", safe_str_client(socks->address)); out->end_reason = END_STREAM_REASON_INTERNAL; out->should_close = 1; return; } } /** Connection <b>conn</b> just finished its socks handshake, or the * controller asked us to take care of it. If <b>circ</b> is defined, * then that's where we'll want to attach it. Otherwise we have to * figure it out ourselves. * * First, parse whether it's a .exit address, remap it, and so on. Then * if it's for a general circuit, try to attach it to a circuit (or launch * one as needed), else if it's for a rendezvous circuit, fetch a * rendezvous descriptor first (or attach/launch a circuit if the * rendezvous descriptor is already here and fresh enough). * * The stream will exit from the hop * indicated by <b>cpath</b>, or from the last hop in circ's cpath if * <b>cpath</b> is NULL. */ int connection_ap_handshake_rewrite_and_attach(entry_connection_t *conn, origin_circuit_t *circ, crypt_path_t *cpath) { socks_request_t *socks = conn->socks_request; const or_options_t *options = get_options(); connection_t *base_conn = ENTRY_TO_CONN(conn); time_t now = time(NULL); rewrite_result_t rr; /* First we'll do the rewrite part. Let's see if we get a reasonable * answer. */ memset(&rr, 0, sizeof(rr)); connection_ap_handshake_rewrite(conn,&rr); if (rr.should_close) { /* connection_ap_handshake_rewrite told us to close the connection: * either because it sent back an answer, or because it sent back an * error */ connection_mark_unattached_ap(conn, rr.end_reason); if (END_STREAM_REASON_DONE == (rr.end_reason & END_STREAM_REASON_MASK)) return 0; else return -1; } const time_t map_expires = rr.map_expires; const int automap = rr.automap; const addressmap_entry_source_t exit_source = rr.exit_source; /* Now, we parse the address to see if it's an .onion or .exit or * other special address. */ const hostname_type_t addresstype = parse_extended_hostname(socks->address); /* Now see whether the hostname is bogus. This could happen because of an * onion hostname whose format we don't recognize. */ if (addresstype == BAD_HOSTNAME) { control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* If this is a .exit hostname, strip off the .name.exit part, and * see whether we're willing to connect there, and and otherwise handle the * .exit address. * * We'll set chosen_exit_name and/or close the connection as appropriate. */ if (addresstype == EXIT_HOSTNAME) { /* If StrictNodes is not set, then .exit overrides ExcludeNodes but * not ExcludeExitNodes. */ routerset_t *excludeset = options->StrictNodes ? options->ExcludeExitNodesUnion_ : options->ExcludeExitNodes; const node_t *node = NULL; /* If this .exit was added by an AUTOMAP, then it came straight from * a user. Make sure that options->AllowDotExit permits that! */ if (exit_source == ADDRMAPSRC_AUTOMAP && !options->AllowDotExit) { /* Whoops; this one is stale. It must have gotten added earlier, * when AllowDotExit was on. */ log_warn(LD_APP,"Stale automapped address for '%s.exit', with " "AllowDotExit disabled. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* Double-check to make sure there are no .exits coming from * impossible/weird sources. */ if (exit_source == ADDRMAPSRC_DNS || (exit_source == ADDRMAPSRC_NONE && !options->AllowDotExit)) { /* It shouldn't be possible to get a .exit address from any of these * sources. */ log_warn(LD_BUG,"Address '%s.exit', with impossible source for the " ".exit part. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } tor_assert(!automap); /* Now, find the character before the .(name) part. * (The ".exit" part got stripped off by "parse_extended_hostname"). * * We're going to put the exit name into conn->chosen_exit_name, and * look up a node correspondingly. */ char *s = strrchr(socks->address,'.'); if (s) { /* The address was of the form "(stuff).(name).exit */ if (s[1] != '\0') { /* Looks like a real .exit one. */ conn->chosen_exit_name = tor_strdup(s+1); node = node_get_by_nickname(conn->chosen_exit_name, 1); if (exit_source == ADDRMAPSRC_TRACKEXIT) { /* We 5 tries before it expires the addressmap */ conn->chosen_exit_retries = TRACKHOSTEXITS_RETRIES; } *s = 0; } else { /* Oops, the address was (stuff)..exit. That's not okay. */ log_warn(LD_APP,"Malformed exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } } else { /* It looks like they just asked for "foo.exit". That's a special * form that means (foo's address).foo.exit. */ conn->chosen_exit_name = tor_strdup(socks->address); node = node_get_by_nickname(conn->chosen_exit_name, 1); if (node) { *socks->address = 0; node_get_address_string(node, socks->address, sizeof(socks->address)); } } /* Now make sure that the chosen exit exists... */ if (!node) { log_warn(LD_APP, "Unrecognized relay in exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* ...and make sure that it isn't excluded. */ if (routerset_contains_node(excludeset, node)) { log_warn(LD_APP, "Excluded relay in exit address '%s.exit'. Refusing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* XXXX-1090 Should we also allow foo.bar.exit if ExitNodes is set and Bar is not listed in it? I say yes, but our revised manpage branch implies no. */ } /* Now, we handle everything that isn't a .onion address. */ if (addresstype != ONION_HOSTNAME) { /* Not a hidden-service request. It's either a hostname or an IP, * possibly with a .exit that we stripped off. We're going to check * if we're allowed to connect/resolve there, and then launch the * appropriate request. */ /* Check for funny characters in the address. */ if (address_is_invalid_destination(socks->address, 1)) { control_event_client_status(LOG_WARN, "SOCKS_BAD_HOSTNAME HOSTNAME=%s", escaped(socks->address)); log_warn(LD_APP, "Destination '%s' seems to be an invalid hostname. Failing.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } #ifdef ENABLE_TOR2WEB_MODE /* If we're running in Tor2webMode, we don't allow anything BUT .onion * addresses. */ if (options->Tor2webMode) { log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname " "or IP address %s because tor2web mode is enabled.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } #endif /* socks->address is a non-onion hostname or IP address. * If we can't do any non-onion requests, refuse the connection. * If we have a hostname but can't do DNS, refuse the connection. * If we have an IP address, but we can't use that address family, * refuse the connection. * * If we can do DNS requests, and we can use at least one address family, * then we have to resolve the address first. Then we'll know if it * resolves to a usable address family. */ /* First, check if all non-onion traffic is disabled */ if (!conn->entry_cfg.dns_request && !conn->entry_cfg.ipv4_traffic && !conn->entry_cfg.ipv6_traffic) { log_warn(LD_APP, "Refusing to connect to non-hidden-service hostname " "or IP address %s because Port has OnionTrafficOnly set (or " "NoDNSRequest, NoIPv4Traffic, and NoIPv6Traffic).", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } /* Then check if we have a hostname or IP address, and whether DNS or * the IP address family are permitted. Reject if not. */ tor_addr_t dummy_addr; int socks_family = tor_addr_parse(&dummy_addr, socks->address); /* family will be -1 for a non-onion hostname that's not an IP */ if (socks_family == -1) { if (!conn->entry_cfg.dns_request) { log_warn(LD_APP, "Refusing to connect to hostname %s " "because Port has NoDNSRequest set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else if (socks_family == AF_INET) { if (!conn->entry_cfg.ipv4_traffic) { log_warn(LD_APP, "Refusing to connect to IPv4 address %s because " "Port has NoIPv4Traffic set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else if (socks_family == AF_INET6) { if (!conn->entry_cfg.ipv6_traffic) { log_warn(LD_APP, "Refusing to connect to IPv6 address %s because " "Port has NoIPv6Traffic set.", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } } else { tor_assert_nonfatal_unreached_once(); } /* See if this is a hostname lookup that we can answer immediately. * (For example, an attempt to look up the IP address for an IP address.) */ if (socks->command == SOCKS_COMMAND_RESOLVE) { tor_addr_t answer; /* Reply to resolves immediately if we can. */ if (tor_addr_parse(&answer, socks->address) >= 0) {/* is it an IP? */ /* remember _what_ is supposed to have been resolved. */ strlcpy(socks->address, rr.orig_address, sizeof(socks->address)); connection_ap_handshake_socks_resolved_addr(conn, &answer, -1, map_expires); connection_mark_unattached_ap(conn, END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return 0; } tor_assert(!automap); rep_hist_note_used_resolve(now); /* help predict this next time */ } else if (socks->command == SOCKS_COMMAND_CONNECT) { /* Now see if this is a connect request that we can reject immediately */ tor_assert(!automap); /* Don't allow connections to port 0. */ if (socks->port == 0) { log_notice(LD_APP,"Application asked to connect to port 0. Refusing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* You can't make connections to internal addresses, by default. * Exceptions are begindir requests (where the address is meaningless), * or cases where you've hand-configured a particular exit, thereby * making the local address meaningful. */ if (options->ClientRejectInternalAddresses && !conn->use_begindir && !conn->chosen_exit_name && !circ) { /* If we reach this point then we don't want to allow internal * addresses. Check if we got one. */ tor_addr_t addr; if (tor_addr_hostname_is_local(socks->address) || (tor_addr_parse(&addr, socks->address) >= 0 && tor_addr_is_internal(&addr, 0))) { /* If this is an explicit private address with no chosen exit node, * then we really don't want to try to connect to it. That's * probably an error. */ if (conn->is_transparent_ap) { #define WARN_INTRVL_LOOP 300 static ratelim_t loop_warn_limit = RATELIM_INIT(WARN_INTRVL_LOOP); char *m; if ((m = rate_limit_log(&loop_warn_limit, approx_time()))) { log_warn(LD_NET, "Rejecting request for anonymous connection to private " "address %s on a TransPort or NATDPort. Possible loop " "in your NAT rules?%s", safe_str_client(socks->address), m); tor_free(m); } } else { #define WARN_INTRVL_PRIV 300 static ratelim_t priv_warn_limit = RATELIM_INIT(WARN_INTRVL_PRIV); char *m; if ((m = rate_limit_log(&priv_warn_limit, approx_time()))) { log_warn(LD_NET, "Rejecting SOCKS request for anonymous connection to " "private address %s.%s", safe_str_client(socks->address),m); tor_free(m); } } connection_mark_unattached_ap(conn, END_STREAM_REASON_PRIVATE_ADDR); return -1; } } /* end "if we should check for internal addresses" */ /* Okay. We're still doing a CONNECT, and it wasn't a private * address. Here we do special handling for literal IP addresses, * to see if we should reject this preemptively, and to set up * fields in conn->entry_cfg to tell the exit what AF we want. */ { tor_addr_t addr; /* XXX Duplicate call to tor_addr_parse. */ if (tor_addr_parse(&addr, socks->address) >= 0) { /* If we reach this point, it's an IPv4 or an IPv6 address. */ sa_family_t family = tor_addr_family(&addr); if ((family == AF_INET && ! conn->entry_cfg.ipv4_traffic) || (family == AF_INET6 && ! conn->entry_cfg.ipv6_traffic)) { /* You can't do an IPv4 address on a v6-only socks listener, * or vice versa. */ log_warn(LD_NET, "Rejecting SOCKS request for an IP address " "family that this listener does not support."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (family == AF_INET6 && socks->socks_version == 4) { /* You can't make a socks4 request to an IPv6 address. Socks4 * doesn't support that. */ log_warn(LD_NET, "Rejecting SOCKS4 request for an IPv6 address."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (socks->socks_version == 4 && !conn->entry_cfg.ipv4_traffic) { /* You can't do any kind of Socks4 request when IPv4 is forbidden. * * XXX raise this check outside the enclosing block? */ log_warn(LD_NET, "Rejecting SOCKS4 request on a listener with " "no IPv4 traffic supported."); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } else if (family == AF_INET6) { /* Tell the exit: we won't accept any ipv4 connection to an IPv6 * address. */ conn->entry_cfg.ipv4_traffic = 0; } else if (family == AF_INET) { /* Tell the exit: we won't accept any ipv6 connection to an IPv4 * address. */ conn->entry_cfg.ipv6_traffic = 0; } } } /* we never allow IPv6 answers on socks4. (TODO: Is this smart?) */ if (socks->socks_version == 4) conn->entry_cfg.ipv6_traffic = 0; /* Still handling CONNECT. Now, check for exit enclaves. (Which we * don't do on BEGINDIR, or when there is a chosen exit.) * * TODO: Should we remove this? Exit enclaves are nutty and don't * work very well */ if (!conn->use_begindir && !conn->chosen_exit_name && !circ) { /* see if we can find a suitable enclave exit */ const node_t *r = router_find_exact_exit_enclave(socks->address, socks->port); if (r) { log_info(LD_APP, "Redirecting address %s to exit at enclave router %s", safe_str_client(socks->address), node_describe(r)); /* use the hex digest, not nickname, in case there are two routers with this nickname */ conn->chosen_exit_name = tor_strdup(hex_str(r->identity, DIGEST_LEN)); conn->chosen_exit_optional = 1; } } /* Still handling CONNECT: warn or reject if it's using a dangerous * port. */ if (!conn->use_begindir && !conn->chosen_exit_name && !circ) if (consider_plaintext_ports(conn, socks->port) < 0) return -1; /* Remember the port so that we will predict that more requests there will happen in the future. */ if (!conn->use_begindir) { /* help predict this next time */ rep_hist_note_used_port(now, socks->port); } } else if (socks->command == SOCKS_COMMAND_RESOLVE_PTR) { rep_hist_note_used_resolve(now); /* help predict this next time */ /* no extra processing needed */ } else { /* We should only be doing CONNECT, RESOLVE, or RESOLVE_PTR! */ tor_fragile_assert(); } /* Okay. At this point we've set chosen_exit_name if needed, rewritten the * address, and decided not to reject it for any number of reasons. Now * mark the connection as waiting for a circuit, and try to attach it! */ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT; /* If we were given a circuit to attach to, try to attach. Otherwise, * try to find a good one and attach to that. */ int rv; if (circ) { rv = connection_ap_handshake_attach_chosen_circuit(conn, circ, cpath); } else { /* We'll try to attach it at the next event loop, or whenever * we call connection_ap_attach_pending() */ connection_ap_mark_as_pending_circuit(conn); rv = 0; } /* If the above function returned 0 then we're waiting for a circuit. * if it returned 1, we're attached. Both are okay. But if it returned * -1, there was an error, so make sure the connection is marked, and * return -1. */ if (rv < 0) { if (!base_conn->marked_for_close) connection_mark_unattached_ap(conn, END_STREAM_REASON_CANT_ATTACH); return -1; } return 0; } else { /* If we get here, it's a request for a .onion address! */ tor_assert(!automap); /* If .onion address requests are disabled, refuse the request */ if (!conn->entry_cfg.onion_traffic) { log_warn(LD_APP, "Onion address %s requested from a port with .onion " "disabled", safe_str_client(socks->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_ENTRYPOLICY); return -1; } /* Check whether it's RESOLVE or RESOLVE_PTR. We don't handle those * for hidden service addresses. */ if (SOCKS_COMMAND_IS_RESOLVE(socks->command)) { /* if it's a resolve request, fail it right now, rather than * building all the circuits and then realizing it won't work. */ log_warn(LD_APP, "Resolve requests to hidden services not allowed. Failing."); connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_ERROR, 0,NULL,-1,TIME_MAX); connection_mark_unattached_ap(conn, END_STREAM_REASON_SOCKSPROTOCOL | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return -1; } /* If we were passed a circuit, then we need to fail. .onion addresses * only work when we launch our own circuits for now. */ if (circ) { log_warn(LD_CONTROL, "Attachstream to a circuit is not " "supported for .onion addresses currently. Failing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; } /* Look up if we have client authorization configured for this hidden * service. If we do, associate it with the rend_data. */ rend_service_authorization_t *client_auth = rend_client_lookup_service_authorization(socks->address); const uint8_t *cookie = NULL; rend_auth_type_t auth_type = REND_NO_AUTH; if (client_auth) { log_info(LD_REND, "Using previously configured client authorization " "for hidden service request."); auth_type = client_auth->auth_type; cookie = client_auth->descriptor_cookie; } /* Fill in the rend_data field so we can start doing a connection to * a hidden service. */ rend_data_t *rend_data = ENTRY_TO_EDGE_CONN(conn)->rend_data = rend_data_client_create(socks->address, NULL, (char *) cookie, auth_type); if (rend_data == NULL) { return -1; } const char *onion_address = rend_data_get_address(rend_data); log_info(LD_REND,"Got a hidden service request for ID '%s'", safe_str_client(onion_address)); /* Lookup the given onion address. If invalid, stop right now. * Otherwise, we might have it in the cache or not. */ unsigned int refetch_desc = 0; rend_cache_entry_t *entry = NULL; const int rend_cache_lookup_result = rend_cache_lookup_entry(onion_address, -1, &entry); if (rend_cache_lookup_result < 0) { switch (-rend_cache_lookup_result) { case EINVAL: /* We should already have rejected this address! */ log_warn(LD_BUG,"Invalid service name '%s'", safe_str_client(onion_address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return -1; case ENOENT: /* We didn't have this; we should look it up. */ refetch_desc = 1; break; default: log_warn(LD_BUG, "Unknown cache lookup error %d", rend_cache_lookup_result); return -1; } } /* Help predict that we'll want to do hidden service circuits in the * future. We're not sure if it will need a stable circuit yet, but * we know we'll need *something*. */ rep_hist_note_used_internal(now, 0, 1); /* Now we have a descriptor but is it usable or not? If not, refetch. * Also, a fetch could have been requested if the onion address was not * found in the cache previously. */ if (refetch_desc || !rend_client_any_intro_points_usable(entry)) { connection_ap_mark_as_non_pending_circuit(conn); base_conn->state = AP_CONN_STATE_RENDDESC_WAIT; log_info(LD_REND, "Unknown descriptor %s. Fetching.", safe_str_client(onion_address)); rend_client_refetch_v2_renddesc(rend_data); return 0; } /* We have the descriptor! So launch a connection to the HS. */ base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT; log_info(LD_REND, "Descriptor is here. Great."); /* We'll try to attach it at the next event loop, or whenever * we call connection_ap_attach_pending() */ connection_ap_mark_as_pending_circuit(conn); return 0; } return 0; /* unreached but keeps the compiler happy */ } #ifdef TRANS_PF static int pf_socket = -1; int get_pf_socket(void) { int pf; /* This should be opened before dropping privileges. */ if (pf_socket >= 0) return pf_socket; #if defined(OpenBSD) /* only works on OpenBSD */ pf = tor_open_cloexec("/dev/pf", O_RDONLY, 0); #else /* works on NetBSD and FreeBSD */ pf = tor_open_cloexec("/dev/pf", O_RDWR, 0); #endif if (pf < 0) { log_warn(LD_NET, "open(\"/dev/pf\") failed: %s", strerror(errno)); return -1; } pf_socket = pf; return pf_socket; } #endif #if defined(TRANS_NETFILTER) || defined(TRANS_PF) || defined(TRANS_TPROXY) /** Try fill in the address of <b>req</b> from the socket configured * with <b>conn</b>. */ static int destination_from_socket(entry_connection_t *conn, socks_request_t *req) { struct sockaddr_storage orig_dst; socklen_t orig_dst_len = sizeof(orig_dst); tor_addr_t addr; #ifdef TRANS_TRPOXY if (options->TransProxyType_parsed == TPT_TPROXY) { if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&orig_dst, &orig_dst_len) < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockname() failed: %s", tor_socket_strerror(e)); return -1; } goto done; } #endif #ifdef TRANS_NETFILTER int rv = -1; switch (ENTRY_TO_CONN(conn)->socket_family) { #ifdef TRANS_NETFILTER_IPV4 case AF_INET: rv = getsockopt(ENTRY_TO_CONN(conn)->s, SOL_IP, SO_ORIGINAL_DST, (struct sockaddr*)&orig_dst, &orig_dst_len); break; #endif #ifdef TRANS_NETFILTER_IPV6 case AF_INET6: rv = getsockopt(ENTRY_TO_CONN(conn)->s, SOL_IPV6, IP6T_SO_ORIGINAL_DST, (struct sockaddr*)&orig_dst, &orig_dst_len); break; #endif default: log_warn(LD_BUG, "Received transparent data from an unsuported socket family %d", ENTRY_TO_CONN(conn)->socket_family); return -1; } if (rv < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockopt() failed: %s", tor_socket_strerror(e)); return -1; } goto done; #elif defined(TRANS_PF) if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&orig_dst, &orig_dst_len) < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockname() failed: %s", tor_socket_strerror(e)); return -1; } goto done; #else (void)conn; (void)req; log_warn(LD_BUG, "Unable to determine destination from socket."); return -1; #endif done: tor_addr_from_sockaddr(&addr, (struct sockaddr*)&orig_dst, &req->port); tor_addr_to_str(req->address, &addr, sizeof(req->address), 1); return 0; } #endif #ifdef TRANS_PF static int destination_from_pf(entry_connection_t *conn, socks_request_t *req) { struct sockaddr_storage proxy_addr; socklen_t proxy_addr_len = sizeof(proxy_addr); struct sockaddr *proxy_sa = (struct sockaddr*) &proxy_addr; struct pfioc_natlook pnl; tor_addr_t addr; int pf = -1; if (getsockname(ENTRY_TO_CONN(conn)->s, (struct sockaddr*)&proxy_addr, &proxy_addr_len) < 0) { int e = tor_socket_errno(ENTRY_TO_CONN(conn)->s); log_warn(LD_NET, "getsockname() to determine transocks destination " "failed: %s", tor_socket_strerror(e)); return -1; } #ifdef __FreeBSD__ if (get_options()->TransProxyType_parsed == TPT_IPFW) { /* ipfw(8) is used and in this case getsockname returned the original destination */ if (tor_addr_from_sockaddr(&addr, proxy_sa, &req->port) < 0) { tor_fragile_assert(); return -1; } tor_addr_to_str(req->address, &addr, sizeof(req->address), 0); return 0; } #endif memset(&pnl, 0, sizeof(pnl)); pnl.proto = IPPROTO_TCP; pnl.direction = PF_OUT; if (proxy_sa->sa_family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)proxy_sa; pnl.af = AF_INET; pnl.saddr.v4.s_addr = tor_addr_to_ipv4n(&ENTRY_TO_CONN(conn)->addr); pnl.sport = htons(ENTRY_TO_CONN(conn)->port); pnl.daddr.v4.s_addr = sin->sin_addr.s_addr; pnl.dport = sin->sin_port; } else if (proxy_sa->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)proxy_sa; pnl.af = AF_INET6; memcpy(&pnl.saddr.v6, tor_addr_to_in6(&ENTRY_TO_CONN(conn)->addr), sizeof(struct in6_addr)); pnl.sport = htons(ENTRY_TO_CONN(conn)->port); memcpy(&pnl.daddr.v6, &sin6->sin6_addr, sizeof(struct in6_addr)); pnl.dport = sin6->sin6_port; } else { log_warn(LD_NET, "getsockname() gave an unexpected address family (%d)", (int)proxy_sa->sa_family); return -1; } pf = get_pf_socket(); if (pf<0) return -1; if (ioctl(pf, DIOCNATLOOK, &pnl) < 0) { log_warn(LD_NET, "ioctl(DIOCNATLOOK) failed: %s", strerror(errno)); return -1; } if (pnl.af == AF_INET) { tor_addr_from_ipv4n(&addr, pnl.rdaddr.v4.s_addr); } else if (pnl.af == AF_INET6) { tor_addr_from_in6(&addr, &pnl.rdaddr.v6); } else { tor_fragile_assert(); return -1; } tor_addr_to_str(req->address, &addr, sizeof(req->address), 1); req->port = ntohs(pnl.rdport); return 0; } #endif /** Fetch the original destination address and port from a * system-specific interface and put them into a * socks_request_t as if they came from a socks request. * * Return -1 if an error prevents fetching the destination, * else return 0. */ static int connection_ap_get_original_destination(entry_connection_t *conn, socks_request_t *req) { #ifdef TRANS_NETFILTER return destination_from_socket(conn, req); #elif defined(TRANS_PF) const or_options_t *options = get_options(); if (options->TransProxyType_parsed == TPT_PF_DIVERT) return destination_from_socket(conn, req); if (options->TransProxyType_parsed == TPT_DEFAULT || options->TransProxyType_parsed == TPT_IPFW) return destination_from_pf(conn, req); (void)conn; (void)req; log_warn(LD_BUG, "Proxy destination determination mechanism %s unknown.", options->TransProxyType); return -1; #else (void)conn; (void)req; log_warn(LD_BUG, "Called connection_ap_get_original_destination, but no " "transparent proxy method was configured."); return -1; #endif } /** connection_edge_process_inbuf() found a conn in state * socks_wait. See if conn->inbuf has the right bytes to proceed with * the socks handshake. * * If the handshake is complete, send it to * connection_ap_handshake_rewrite_and_attach(). * * Return -1 if an unexpected error with conn occurs (and mark it for close), * else return 0. */ static int connection_ap_handshake_process_socks(entry_connection_t *conn) { socks_request_t *socks; int sockshere; const or_options_t *options = get_options(); int had_reply = 0; connection_t *base_conn = ENTRY_TO_CONN(conn); tor_assert(conn); tor_assert(base_conn->type == CONN_TYPE_AP); tor_assert(base_conn->state == AP_CONN_STATE_SOCKS_WAIT); tor_assert(conn->socks_request); socks = conn->socks_request; log_debug(LD_APP,"entered."); sockshere = fetch_from_buf_socks(base_conn->inbuf, socks, options->TestSocks, options->SafeSocks); if (socks->replylen) { had_reply = 1; connection_write_to_buf((const char*)socks->reply, socks->replylen, base_conn); socks->replylen = 0; if (sockshere == -1) { /* An invalid request just got a reply, no additional * one is necessary. */ socks->has_finished = 1; } } if (sockshere == 0) { log_debug(LD_APP,"socks handshake not all here yet."); return 0; } else if (sockshere == -1) { if (!had_reply) { log_warn(LD_APP,"Fetching socks handshake failed. Closing."); connection_ap_handshake_socks_reply(conn, NULL, 0, END_STREAM_REASON_SOCKSPROTOCOL); } connection_mark_unattached_ap(conn, END_STREAM_REASON_SOCKSPROTOCOL | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return -1; } /* else socks handshake is done, continue processing */ if (SOCKS_COMMAND_IS_CONNECT(socks->command)) control_event_stream_status(conn, STREAM_EVENT_NEW, 0); else control_event_stream_status(conn, STREAM_EVENT_NEW_RESOLVE, 0); return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL); } /** connection_init_accepted_conn() found a new trans AP conn. * Get the original destination and send it to * connection_ap_handshake_rewrite_and_attach(). * * Return -1 if an unexpected error with conn (and it should be marked * for close), else return 0. */ int connection_ap_process_transparent(entry_connection_t *conn) { socks_request_t *socks; tor_assert(conn); tor_assert(conn->socks_request); socks = conn->socks_request; /* pretend that a socks handshake completed so we don't try to * send a socks reply down a transparent conn */ socks->command = SOCKS_COMMAND_CONNECT; socks->has_finished = 1; log_debug(LD_APP,"entered."); if (connection_ap_get_original_destination(conn, socks) < 0) { log_warn(LD_APP,"Fetching original destination failed. Closing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_CANT_FETCH_ORIG_DEST); return -1; } /* we have the original destination */ control_event_stream_status(conn, STREAM_EVENT_NEW, 0); return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL); } /** connection_edge_process_inbuf() found a conn in state natd_wait. See if * conn-\>inbuf has the right bytes to proceed. See FreeBSD's libalias(3) and * ProxyEncodeTcpStream() in src/lib/libalias/alias_proxy.c for the encoding * form of the original destination. * * If the original destination is complete, send it to * connection_ap_handshake_rewrite_and_attach(). * * Return -1 if an unexpected error with conn (and it should be marked * for close), else return 0. */ static int connection_ap_process_natd(entry_connection_t *conn) { char tmp_buf[36], *tbuf, *daddr; size_t tlen = 30; int err, port_ok; socks_request_t *socks; tor_assert(conn); tor_assert(ENTRY_TO_CONN(conn)->state == AP_CONN_STATE_NATD_WAIT); tor_assert(conn->socks_request); socks = conn->socks_request; log_debug(LD_APP,"entered."); /* look for LF-terminated "[DEST ip_addr port]" * where ip_addr is a dotted-quad and port is in string form */ err = connection_fetch_from_buf_line(ENTRY_TO_CONN(conn), tmp_buf, &tlen); if (err == 0) return 0; if (err < 0) { log_warn(LD_APP,"NATD handshake failed (DEST too long). Closing"); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } if (strcmpstart(tmp_buf, "[DEST ")) { log_warn(LD_APP,"NATD handshake was ill-formed; closing. The client " "said: %s", escaped(tmp_buf)); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } daddr = tbuf = &tmp_buf[0] + 6; /* after end of "[DEST " */ if (!(tbuf = strchr(tbuf, ' '))) { log_warn(LD_APP,"NATD handshake was ill-formed; closing. The client " "said: %s", escaped(tmp_buf)); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } *tbuf++ = '\0'; /* pretend that a socks handshake completed so we don't try to * send a socks reply down a natd conn */ strlcpy(socks->address, daddr, sizeof(socks->address)); socks->port = (uint16_t) tor_parse_long(tbuf, 10, 1, 65535, &port_ok, &daddr); if (!port_ok) { log_warn(LD_APP,"NATD handshake failed; port %s is ill-formed or out " "of range.", escaped(tbuf)); connection_mark_unattached_ap(conn, END_STREAM_REASON_INVALID_NATD_DEST); return -1; } socks->command = SOCKS_COMMAND_CONNECT; socks->has_finished = 1; control_event_stream_status(conn, STREAM_EVENT_NEW, 0); ENTRY_TO_CONN(conn)->state = AP_CONN_STATE_CIRCUIT_WAIT; return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL); } /** Iterate over the two bytes of stream_id until we get one that is not * already in use; return it. Return 0 if can't get a unique stream_id. */ streamid_t get_unique_stream_id_by_circ(origin_circuit_t *circ) { edge_connection_t *tmpconn; streamid_t test_stream_id; uint32_t attempts=0; again: test_stream_id = circ->next_stream_id++; if (++attempts > 1<<16) { /* Make sure we don't loop forever if all stream_id's are used. */ log_warn(LD_APP,"No unused stream IDs. Failing."); return 0; } if (test_stream_id == 0) goto again; for (tmpconn = circ->p_streams; tmpconn; tmpconn=tmpconn->next_stream) if (tmpconn->stream_id == test_stream_id) goto again; return test_stream_id; } /** Return true iff <b>conn</b> is linked to a circuit and configured to use * an exit that supports optimistic data. */ static int connection_ap_supports_optimistic_data(const entry_connection_t *conn) { const edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn); /* We can only send optimistic data if we're connected to an open general circuit. */ if (edge_conn->on_circuit == NULL || edge_conn->on_circuit->state != CIRCUIT_STATE_OPEN || (edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_GENERAL && edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_REND_JOINED)) return 0; return conn->may_use_optimistic_data; } /** Return a bitmask of BEGIN_FLAG_* flags that we should transmit in the * RELAY_BEGIN cell for <b>ap_conn</b>. */ static uint32_t connection_ap_get_begincell_flags(entry_connection_t *ap_conn) { edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn); const node_t *exitnode = NULL; const crypt_path_t *cpath_layer = edge_conn->cpath_layer; uint32_t flags = 0; /* No flags for begindir */ if (ap_conn->use_begindir) return 0; /* No flags for hidden services. */ if (edge_conn->on_circuit->purpose != CIRCUIT_PURPOSE_C_GENERAL) return 0; /* If only IPv4 is supported, no flags */ if (ap_conn->entry_cfg.ipv4_traffic && !ap_conn->entry_cfg.ipv6_traffic) return 0; if (! cpath_layer || ! cpath_layer->extend_info) return 0; if (!ap_conn->entry_cfg.ipv4_traffic) flags |= BEGIN_FLAG_IPV4_NOT_OK; exitnode = node_get_by_id(cpath_layer->extend_info->identity_digest); if (ap_conn->entry_cfg.ipv6_traffic && exitnode) { tor_addr_t a; tor_addr_make_null(&a, AF_INET6); if (compare_tor_addr_to_node_policy(&a, ap_conn->socks_request->port, exitnode) != ADDR_POLICY_REJECTED) { /* Only say "IPv6 OK" if the exit node supports IPv6. Otherwise there's * no point. */ flags |= BEGIN_FLAG_IPV6_OK; } } if (flags == BEGIN_FLAG_IPV6_OK) { /* When IPv4 and IPv6 are both allowed, consider whether to say we * prefer IPv6. Otherwise there's no point in declaring a preference */ if (ap_conn->entry_cfg.prefer_ipv6) flags |= BEGIN_FLAG_IPV6_PREFERRED; } if (flags == BEGIN_FLAG_IPV4_NOT_OK) { log_warn(LD_EDGE, "I'm about to ask a node for a connection that I " "am telling it to fulfil with neither IPv4 nor IPv6. That's " "not going to work. Did you perhaps ask for an IPv6 address " "on an IPv4Only port, or vice versa?"); } return flags; } /** Write a relay begin cell, using destaddr and destport from ap_conn's * socks_request field, and send it down circ. * * If ap_conn is broken, mark it for close and return -1. Else return 0. */ int connection_ap_handshake_send_begin(entry_connection_t *ap_conn) { char payload[CELL_PAYLOAD_SIZE]; int payload_len; int begin_type; const or_options_t *options = get_options(); origin_circuit_t *circ; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn); connection_t *base_conn = TO_CONN(edge_conn); tor_assert(edge_conn->on_circuit); circ = TO_ORIGIN_CIRCUIT(edge_conn->on_circuit); tor_assert(base_conn->type == CONN_TYPE_AP); tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT); tor_assert(ap_conn->socks_request); tor_assert(SOCKS_COMMAND_IS_CONNECT(ap_conn->socks_request->command)); edge_conn->stream_id = get_unique_stream_id_by_circ(circ); if (edge_conn->stream_id==0) { /* XXXX+ Instead of closing this stream, we should make it get * retried on another circuit. */ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); /* Mark this circuit "unusable for new streams". */ mark_circuit_unusable_for_new_conns(circ); return -1; } /* Set up begin cell flags. */ edge_conn->begincell_flags = connection_ap_get_begincell_flags(ap_conn); tor_snprintf(payload,RELAY_PAYLOAD_SIZE, "%s:%d", (circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL) ? ap_conn->socks_request->address : "", ap_conn->socks_request->port); payload_len = (int)strlen(payload)+1; if (payload_len <= RELAY_PAYLOAD_SIZE - 4 && edge_conn->begincell_flags) { set_uint32(payload + payload_len, htonl(edge_conn->begincell_flags)); payload_len += 4; } log_info(LD_APP, "Sending relay cell %d on circ %u to begin stream %d.", (int)ap_conn->use_begindir, (unsigned)circ->base_.n_circ_id, edge_conn->stream_id); begin_type = ap_conn->use_begindir ? RELAY_COMMAND_BEGIN_DIR : RELAY_COMMAND_BEGIN; /* Check that circuits are anonymised, based on their type. */ if (begin_type == RELAY_COMMAND_BEGIN) { /* This connection is a standard OR connection. * Make sure its path length is anonymous, or that we're in a * non-anonymous mode. */ assert_circ_anonymity_ok(circ, options); } else if (begin_type == RELAY_COMMAND_BEGIN_DIR) { /* This connection is a begindir directory connection. * Look at the linked directory connection to access the directory purpose. * If a BEGINDIR connection is ever not linked, that's a bug. */ if (BUG(!base_conn->linked)) { return -1; } connection_t *linked_dir_conn_base = base_conn->linked_conn; /* If the linked connection has been unlinked by other code, we can't send * a begin cell on it. */ if (!linked_dir_conn_base) { return -1; } /* Sensitive directory connections must have an anonymous path length. * Otherwise, directory connections are typically one-hop. * This matches the earlier check for directory connection path anonymity * in directory_initiate_command_rend(). */ if (purpose_needs_anonymity(linked_dir_conn_base->purpose, TO_DIR_CONN(linked_dir_conn_base)->router_purpose, TO_DIR_CONN(linked_dir_conn_base)->requested_resource)) { assert_circ_anonymity_ok(circ, options); } } else { /* This code was written for the two connection types BEGIN and BEGIN_DIR */ tor_assert_unreached(); } if (connection_edge_send_command(edge_conn, begin_type, begin_type == RELAY_COMMAND_BEGIN ? payload : NULL, begin_type == RELAY_COMMAND_BEGIN ? payload_len : 0) < 0) return -1; /* circuit is closed, don't continue */ edge_conn->package_window = STREAMWINDOW_START; edge_conn->deliver_window = STREAMWINDOW_START; base_conn->state = AP_CONN_STATE_CONNECT_WAIT; log_info(LD_APP,"Address/port sent, ap socket "TOR_SOCKET_T_FORMAT ", n_circ_id %u", base_conn->s, (unsigned)circ->base_.n_circ_id); control_event_stream_status(ap_conn, STREAM_EVENT_SENT_CONNECT, 0); /* If there's queued-up data, send it now */ if ((connection_get_inbuf_len(base_conn) || ap_conn->sending_optimistic_data) && connection_ap_supports_optimistic_data(ap_conn)) { log_info(LD_APP, "Sending up to %ld + %ld bytes of queued-up data", (long)connection_get_inbuf_len(base_conn), ap_conn->sending_optimistic_data ? (long)buf_datalen(ap_conn->sending_optimistic_data) : 0); if (connection_edge_package_raw_inbuf(edge_conn, 1, NULL) < 0) { connection_mark_for_close(base_conn); } } return 0; } /** Write a relay resolve cell, using destaddr and destport from ap_conn's * socks_request field, and send it down circ. * * If ap_conn is broken, mark it for close and return -1. Else return 0. */ int connection_ap_handshake_send_resolve(entry_connection_t *ap_conn) { int payload_len, command; const char *string_addr; char inaddr_buf[REVERSE_LOOKUP_NAME_BUF_LEN]; origin_circuit_t *circ; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(ap_conn); connection_t *base_conn = TO_CONN(edge_conn); tor_assert(edge_conn->on_circuit); circ = TO_ORIGIN_CIRCUIT(edge_conn->on_circuit); tor_assert(base_conn->type == CONN_TYPE_AP); tor_assert(base_conn->state == AP_CONN_STATE_CIRCUIT_WAIT); tor_assert(ap_conn->socks_request); tor_assert(circ->base_.purpose == CIRCUIT_PURPOSE_C_GENERAL); command = ap_conn->socks_request->command; tor_assert(SOCKS_COMMAND_IS_RESOLVE(command)); edge_conn->stream_id = get_unique_stream_id_by_circ(circ); if (edge_conn->stream_id==0) { /* XXXX+ Instead of closing this stream, we should make it get * retried on another circuit. */ connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); /* Mark this circuit "unusable for new streams". */ mark_circuit_unusable_for_new_conns(circ); return -1; } if (command == SOCKS_COMMAND_RESOLVE) { string_addr = ap_conn->socks_request->address; payload_len = (int)strlen(string_addr)+1; } else { /* command == SOCKS_COMMAND_RESOLVE_PTR */ const char *a = ap_conn->socks_request->address; tor_addr_t addr; int r; /* We're doing a reverse lookup. The input could be an IP address, or * could be an .in-addr.arpa or .ip6.arpa address */ r = tor_addr_parse_PTR_name(&addr, a, AF_UNSPEC, 1); if (r <= 0) { log_warn(LD_APP, "Rejecting ill-formed reverse lookup of %s", safe_str_client(a)); connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); return -1; } r = tor_addr_to_PTR_name(inaddr_buf, sizeof(inaddr_buf), &addr); if (r < 0) { log_warn(LD_BUG, "Couldn't generate reverse lookup hostname of %s", safe_str_client(a)); connection_mark_unattached_ap(ap_conn, END_STREAM_REASON_INTERNAL); return -1; } string_addr = inaddr_buf; payload_len = (int)strlen(inaddr_buf)+1; tor_assert(payload_len <= (int)sizeof(inaddr_buf)); } log_debug(LD_APP, "Sending relay cell to begin stream %d.", edge_conn->stream_id); if (connection_edge_send_command(edge_conn, RELAY_COMMAND_RESOLVE, string_addr, payload_len) < 0) return -1; /* circuit is closed, don't continue */ if (!base_conn->address) { /* This might be unnecessary. XXXX */ base_conn->address = tor_addr_to_str_dup(&base_conn->addr); } base_conn->state = AP_CONN_STATE_RESOLVE_WAIT; log_info(LD_APP,"Address sent for resolve, ap socket "TOR_SOCKET_T_FORMAT ", n_circ_id %u", base_conn->s, (unsigned)circ->base_.n_circ_id); control_event_stream_status(ap_conn, STREAM_EVENT_SENT_RESOLVE, 0); return 0; } /** Make an AP connection_t linked to the connection_t <b>partner</b>. make a * new linked connection pair, and attach one side to the conn, connection_add * it, initialize it to circuit_wait, and call * connection_ap_handshake_attach_circuit(conn) on it. * * Return the newly created end of the linked connection pair, or -1 if error. */ entry_connection_t * connection_ap_make_link(connection_t *partner, char *address, uint16_t port, const char *digest, int session_group, int isolation_flags, int use_begindir, int want_onehop) { entry_connection_t *conn; connection_t *base_conn; log_info(LD_APP,"Making internal %s tunnel to %s:%d ...", want_onehop ? "direct" : "anonymized", safe_str_client(address), port); conn = entry_connection_new(CONN_TYPE_AP, tor_addr_family(&partner->addr)); base_conn = ENTRY_TO_CONN(conn); base_conn->linked = 1; /* so that we can add it safely below. */ /* populate conn->socks_request */ /* leave version at zero, so the socks_reply is empty */ conn->socks_request->socks_version = 0; conn->socks_request->has_finished = 0; /* waiting for 'connected' */ strlcpy(conn->socks_request->address, address, sizeof(conn->socks_request->address)); conn->socks_request->port = port; conn->socks_request->command = SOCKS_COMMAND_CONNECT; conn->want_onehop = want_onehop; conn->use_begindir = use_begindir; if (use_begindir) { conn->chosen_exit_name = tor_malloc(HEX_DIGEST_LEN+2); conn->chosen_exit_name[0] = '$'; tor_assert(digest); base16_encode(conn->chosen_exit_name+1,HEX_DIGEST_LEN+1, digest, DIGEST_LEN); } /* Populate isolation fields. */ conn->socks_request->listener_type = CONN_TYPE_DIR_LISTENER; conn->original_dest_address = tor_strdup(address); conn->entry_cfg.session_group = session_group; conn->entry_cfg.isolation_flags = isolation_flags; base_conn->address = tor_strdup("(Tor_internal)"); tor_addr_make_unspec(&base_conn->addr); base_conn->port = 0; connection_link_connections(partner, base_conn); if (connection_add(base_conn) < 0) { /* no space, forget it */ connection_free(base_conn); return NULL; } base_conn->state = AP_CONN_STATE_CIRCUIT_WAIT; control_event_stream_status(conn, STREAM_EVENT_NEW, 0); /* attaching to a dirty circuit is fine */ connection_ap_mark_as_pending_circuit(conn); log_info(LD_APP,"... application connection created and linked."); return conn; } /** Notify any interested controller connections about a new hostname resolve * or resolve error. Takes the same arguments as does * connection_ap_handshake_socks_resolved(). */ static void tell_controller_about_resolved_result(entry_connection_t *conn, int answer_type, size_t answer_len, const char *answer, int ttl, time_t expires) { expires = time(NULL) + ttl; if (answer_type == RESOLVED_TYPE_IPV4 && answer_len >= 4) { char *cp = tor_dup_ip(ntohl(get_uint32(answer))); control_event_address_mapped(conn->socks_request->address, cp, expires, NULL, 0); tor_free(cp); } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) { char *cp = tor_strndup(answer, answer_len); control_event_address_mapped(conn->socks_request->address, cp, expires, NULL, 0); tor_free(cp); } else { control_event_address_mapped(conn->socks_request->address, "<error>", time(NULL)+ttl, "error=yes", 0); } } /** * As connection_ap_handshake_socks_resolved, but take a tor_addr_t to send * as the answer. */ void connection_ap_handshake_socks_resolved_addr(entry_connection_t *conn, const tor_addr_t *answer, int ttl, time_t expires) { if (tor_addr_family(answer) == AF_INET) { uint32_t a = tor_addr_to_ipv4n(answer); /* network order */ connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_IPV4,4, (uint8_t*)&a, ttl, expires); } else if (tor_addr_family(answer) == AF_INET6) { const uint8_t *a = tor_addr_to_in6_addr8(answer); connection_ap_handshake_socks_resolved(conn,RESOLVED_TYPE_IPV6,16, a, ttl, expires); } else { log_warn(LD_BUG, "Got called with address of unexpected family %d", tor_addr_family(answer)); connection_ap_handshake_socks_resolved(conn, RESOLVED_TYPE_ERROR,0,NULL,-1,-1); } } /** Send an answer to an AP connection that has requested a DNS lookup via * SOCKS. The type should be one of RESOLVED_TYPE_(IPV4|IPV6|HOSTNAME) or -1 * for unreachable; the answer should be in the format specified in the socks * extensions document. <b>ttl</b> is the ttl for the answer, or -1 on * certain errors or for values that didn't come via DNS. <b>expires</b> is * a time when the answer expires, or -1 or TIME_MAX if there's a good TTL. **/ /* XXXX the use of the ttl and expires fields is nutty. Let's make this * interface and those that use it less ugly. */ MOCK_IMPL(void, connection_ap_handshake_socks_resolved,(entry_connection_t *conn, int answer_type, size_t answer_len, const uint8_t *answer, int ttl, time_t expires)) { char buf[384]; size_t replylen; if (ttl >= 0) { if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { tor_addr_t a; tor_addr_from_ipv4n(&a, get_uint32(answer)); if (! tor_addr_is_null(&a)) { client_dns_set_addressmap(conn, conn->socks_request->address, &a, conn->chosen_exit_name, ttl); } } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) { tor_addr_t a; tor_addr_from_ipv6_bytes(&a, (char*)answer); if (! tor_addr_is_null(&a)) { client_dns_set_addressmap(conn, conn->socks_request->address, &a, conn->chosen_exit_name, ttl); } } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) { char *cp = tor_strndup((char*)answer, answer_len); client_dns_set_reverse_addressmap(conn, conn->socks_request->address, cp, conn->chosen_exit_name, ttl); tor_free(cp); } } if (ENTRY_TO_EDGE_CONN(conn)->is_dns_request) { if (conn->dns_server_request) { /* We had a request on our DNS port: answer it. */ dnsserv_resolved(conn, answer_type, answer_len, (char*)answer, ttl); conn->socks_request->has_finished = 1; return; } else { /* This must be a request from the controller. Since answers to those * requests are not cached, they do not generate an ADDRMAP event on * their own. */ tell_controller_about_resolved_result(conn, answer_type, answer_len, (char*)answer, ttl, expires); conn->socks_request->has_finished = 1; return; } /* We shouldn't need to free conn here; it gets marked by the caller. */ } if (conn->socks_request->socks_version == 4) { buf[0] = 0x00; /* version */ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { buf[1] = SOCKS4_GRANTED; set_uint16(buf+2, 0); memcpy(buf+4, answer, 4); /* address */ replylen = SOCKS4_NETWORK_LEN; } else { /* "error" */ buf[1] = SOCKS4_REJECT; memset(buf+2, 0, 6); replylen = SOCKS4_NETWORK_LEN; } } else if (conn->socks_request->socks_version == 5) { /* SOCKS5 */ buf[0] = 0x05; /* version */ if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { buf[1] = SOCKS5_SUCCEEDED; buf[2] = 0; /* reserved */ buf[3] = 0x01; /* IPv4 address type */ memcpy(buf+4, answer, 4); /* address */ set_uint16(buf+8, 0); /* port == 0. */ replylen = 10; } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) { buf[1] = SOCKS5_SUCCEEDED; buf[2] = 0; /* reserved */ buf[3] = 0x04; /* IPv6 address type */ memcpy(buf+4, answer, 16); /* address */ set_uint16(buf+20, 0); /* port == 0. */ replylen = 22; } else if (answer_type == RESOLVED_TYPE_HOSTNAME && answer_len < 256) { buf[1] = SOCKS5_SUCCEEDED; buf[2] = 0; /* reserved */ buf[3] = 0x03; /* Domainname address type */ buf[4] = (char)answer_len; memcpy(buf+5, answer, answer_len); /* address */ set_uint16(buf+5+answer_len, 0); /* port == 0. */ replylen = 5+answer_len+2; } else { buf[1] = SOCKS5_HOST_UNREACHABLE; memset(buf+2, 0, 8); replylen = 10; } } else { /* no socks version info; don't send anything back */ return; } connection_ap_handshake_socks_reply(conn, buf, replylen, (answer_type == RESOLVED_TYPE_IPV4 || answer_type == RESOLVED_TYPE_IPV6 || answer_type == RESOLVED_TYPE_HOSTNAME) ? 0 : END_STREAM_REASON_RESOLVEFAILED); } /** Send a socks reply to stream <b>conn</b>, using the appropriate * socks version, etc, and mark <b>conn</b> as completed with SOCKS * handshaking. * * If <b>reply</b> is defined, then write <b>replylen</b> bytes of it to conn * and return, else reply based on <b>endreason</b> (one of * END_STREAM_REASON_*). If <b>reply</b> is undefined, <b>endreason</b> can't * be 0 or REASON_DONE. Send endreason to the controller, if appropriate. */ void connection_ap_handshake_socks_reply(entry_connection_t *conn, char *reply, size_t replylen, int endreason) { char buf[256]; socks5_reply_status_t status = stream_end_reason_to_socks5_response(endreason); tor_assert(conn->socks_request); /* make sure it's an AP stream */ if (!SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) { control_event_stream_status(conn, status==SOCKS5_SUCCEEDED ? STREAM_EVENT_SUCCEEDED : STREAM_EVENT_FAILED, endreason); } /* Flag this stream's circuit as having completed a stream successfully * (for path bias) */ if (status == SOCKS5_SUCCEEDED || endreason == END_STREAM_REASON_RESOLVEFAILED || endreason == END_STREAM_REASON_CONNECTREFUSED || endreason == END_STREAM_REASON_CONNRESET || endreason == END_STREAM_REASON_NOROUTE || endreason == END_STREAM_REASON_RESOURCELIMIT) { if (!conn->edge_.on_circuit || !CIRCUIT_IS_ORIGIN(conn->edge_.on_circuit)) { if (endreason != END_STREAM_REASON_RESOLVEFAILED) { log_info(LD_BUG, "No origin circuit for successful SOCKS stream "U64_FORMAT ". Reason: %d", U64_PRINTF_ARG(ENTRY_TO_CONN(conn)->global_identifier), endreason); } /* * Else DNS remaps and failed hidden service lookups can send us * here with END_STREAM_REASON_RESOLVEFAILED; ignore it * * Perhaps we could make the test more precise; we can tell hidden * services by conn->edge_.renddata != NULL; anything analogous for * the DNS remap case? */ } else { // XXX: Hrmm. It looks like optimistic data can't go through this // codepath, but someone should probably test it and make sure. // We don't want to mark optimistically opened streams as successful. pathbias_mark_use_success(TO_ORIGIN_CIRCUIT(conn->edge_.on_circuit)); } } if (conn->socks_request->has_finished) { log_warn(LD_BUG, "(Harmless.) duplicate calls to " "connection_ap_handshake_socks_reply."); return; } if (replylen) { /* we already have a reply in mind */ connection_write_to_buf(reply, replylen, ENTRY_TO_CONN(conn)); conn->socks_request->has_finished = 1; return; } if (conn->socks_request->socks_version == 4) { memset(buf,0,SOCKS4_NETWORK_LEN); buf[1] = (status==SOCKS5_SUCCEEDED ? SOCKS4_GRANTED : SOCKS4_REJECT); /* leave version, destport, destip zero */ connection_write_to_buf(buf, SOCKS4_NETWORK_LEN, ENTRY_TO_CONN(conn)); } else if (conn->socks_request->socks_version == 5) { size_t buf_len; memset(buf,0,sizeof(buf)); if (tor_addr_family(&conn->edge_.base_.addr) == AF_INET) { buf[0] = 5; /* version 5 */ buf[1] = (char)status; buf[2] = 0; buf[3] = 1; /* ipv4 addr */ /* 4 bytes for the header, 2 bytes for the port, 4 for the address. */ buf_len = 10; } else { /* AF_INET6. */ buf[0] = 5; /* version 5 */ buf[1] = (char)status; buf[2] = 0; buf[3] = 4; /* ipv6 addr */ /* 4 bytes for the header, 2 bytes for the port, 16 for the address. */ buf_len = 22; } connection_write_to_buf(buf,buf_len,ENTRY_TO_CONN(conn)); } /* If socks_version isn't 4 or 5, don't send anything. * This can happen in the case of AP bridges. */ conn->socks_request->has_finished = 1; return; } /** Read a RELAY_BEGIN or RELAY_BEGINDIR cell from <b>cell</b>, decode it, and * place the result in <b>bcell</b>. On success return 0; on failure return * <0 and set *<b>end_reason_out</b> to the end reason we should send back to * the client. * * Return -1 in the case where want to send a RELAY_END cell, and < -1 when * we don't. **/ STATIC int begin_cell_parse(const cell_t *cell, begin_cell_t *bcell, uint8_t *end_reason_out) { relay_header_t rh; const uint8_t *body, *nul; memset(bcell, 0, sizeof(*bcell)); *end_reason_out = END_STREAM_REASON_MISC; relay_header_unpack(&rh, cell->payload); if (rh.length > RELAY_PAYLOAD_SIZE) { return -2; /*XXXX why not TORPROTOCOL? */ } bcell->stream_id = rh.stream_id; if (rh.command == RELAY_COMMAND_BEGIN_DIR) { bcell->is_begindir = 1; return 0; } else if (rh.command != RELAY_COMMAND_BEGIN) { log_warn(LD_BUG, "Got an unexpected command %d", (int)rh.command); *end_reason_out = END_STREAM_REASON_INTERNAL; return -1; } body = cell->payload + RELAY_HEADER_SIZE; nul = memchr(body, 0, rh.length); if (! nul) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay begin cell has no \\0. Closing."); *end_reason_out = END_STREAM_REASON_TORPROTOCOL; return -1; } if (tor_addr_port_split(LOG_PROTOCOL_WARN, (char*)(body), &bcell->address,&bcell->port)<0) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Unable to parse addr:port in relay begin cell. Closing."); *end_reason_out = END_STREAM_REASON_TORPROTOCOL; return -1; } if (bcell->port == 0) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Missing port in relay begin cell. Closing."); tor_free(bcell->address); *end_reason_out = END_STREAM_REASON_TORPROTOCOL; return -1; } if (body + rh.length >= nul + 4) bcell->flags = ntohl(get_uint32(nul+1)); return 0; } /** A relay 'begin' or 'begin_dir' cell has arrived, and either we are * an exit hop for the circuit, or we are the origin and it is a * rendezvous begin. * * Launch a new exit connection and initialize things appropriately. * * If it's a rendezvous stream, call connection_exit_connect() on * it. * * For general streams, call dns_resolve() on it first, and only call * connection_exit_connect() if the dns answer is already known. * * Note that we don't call connection_add() on the new stream! We wait * for connection_exit_connect() to do that. * * Return -(some circuit end reason) if we want to tear down <b>circ</b>. * Else return 0. */ int connection_exit_begin_conn(cell_t *cell, circuit_t *circ) { edge_connection_t *n_stream; relay_header_t rh; char *address = NULL; uint16_t port = 0; or_circuit_t *or_circ = NULL; const or_options_t *options = get_options(); begin_cell_t bcell; int rv; uint8_t end_reason=0; assert_circuit_ok(circ); if (!CIRCUIT_IS_ORIGIN(circ)) or_circ = TO_OR_CIRCUIT(circ); relay_header_unpack(&rh, cell->payload); if (rh.length > RELAY_PAYLOAD_SIZE) return -END_CIRC_REASON_TORPROTOCOL; /* Note: we have to use relay_send_command_from_edge here, not * connection_edge_end or connection_edge_send_command, since those require * that we have a stream connected to a circuit, and we don't connect to a * circuit until we have a pending/successful resolve. */ if (!server_mode(options) && circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay begin cell at non-server. Closing."); relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_EXITPOLICY, NULL); return 0; } rv = begin_cell_parse(cell, &bcell, &end_reason); if (rv < -1) { return -END_CIRC_REASON_TORPROTOCOL; } else if (rv == -1) { tor_free(bcell.address); relay_send_end_cell_from_edge(rh.stream_id, circ, end_reason, NULL); return 0; } if (! bcell.is_begindir) { /* Steal reference */ address = bcell.address; port = bcell.port; if (or_circ && or_circ->p_chan) { if (!options->AllowSingleHopExits && (or_circ->is_first_hop || (!connection_or_digest_is_known_relay( or_circ->p_chan->identity_digest) && should_refuse_unknown_exits(options)))) { /* Don't let clients use us as a single-hop proxy, unless the user * has explicitly allowed that in the config. It attracts attackers * and users who'd be better off with, well, single-hop proxies. */ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Attempt by %s to open a stream %s. Closing.", safe_str(channel_get_canonical_remote_descr(or_circ->p_chan)), or_circ->is_first_hop ? "on first hop of circuit" : "from unknown relay"); relay_send_end_cell_from_edge(rh.stream_id, circ, or_circ->is_first_hop ? END_STREAM_REASON_TORPROTOCOL : END_STREAM_REASON_MISC, NULL); tor_free(address); return 0; } } } else if (rh.command == RELAY_COMMAND_BEGIN_DIR) { if (!directory_permits_begindir_requests(options) || circ->purpose != CIRCUIT_PURPOSE_OR) { relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_NOTDIRECTORY, NULL); return 0; } /* Make sure to get the 'real' address of the previous hop: the * caller might want to know whether the remote IP address has changed, * and we might already have corrected base_.addr[ess] for the relay's * canonical IP address. */ if (or_circ && or_circ->p_chan) address = tor_strdup(channel_get_actual_remote_address(or_circ->p_chan)); else address = tor_strdup("127.0.0.1"); port = 1; /* XXXX This value is never actually used anywhere, and there * isn't "really" a connection here. But we * need to set it to something nonzero. */ } else { log_warn(LD_BUG, "Got an unexpected command %d", (int)rh.command); relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_INTERNAL, NULL); return 0; } if (! options->IPv6Exit) { /* I don't care if you prefer IPv6; I can't give you any. */ bcell.flags &= ~BEGIN_FLAG_IPV6_PREFERRED; /* If you don't want IPv4, I can't help. */ if (bcell.flags & BEGIN_FLAG_IPV4_NOT_OK) { tor_free(address); relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_EXITPOLICY, NULL); return 0; } } log_debug(LD_EXIT,"Creating new exit connection."); /* The 'AF_INET' here is temporary; we might need to change it later in * connection_exit_connect(). */ n_stream = edge_connection_new(CONN_TYPE_EXIT, AF_INET); /* Remember the tunneled request ID in the new edge connection, so that * we can measure download times. */ n_stream->dirreq_id = circ->dirreq_id; n_stream->base_.purpose = EXIT_PURPOSE_CONNECT; n_stream->begincell_flags = bcell.flags; n_stream->stream_id = rh.stream_id; n_stream->base_.port = port; /* leave n_stream->s at -1, because it's not yet valid */ n_stream->package_window = STREAMWINDOW_START; n_stream->deliver_window = STREAMWINDOW_START; if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED) { origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ); log_info(LD_REND,"begin is for rendezvous. configuring stream."); n_stream->base_.address = tor_strdup("(rendezvous)"); n_stream->base_.state = EXIT_CONN_STATE_CONNECTING; n_stream->rend_data = rend_data_dup(origin_circ->rend_data); tor_assert(connection_edge_is_rendezvous_stream(n_stream)); assert_circuit_ok(circ); const int r = rend_service_set_connection_addr_port(n_stream, origin_circ); if (r < 0) { log_info(LD_REND,"Didn't find rendezvous service (port %d)", n_stream->base_.port); /* Send back reason DONE because we want to make hidden service port * scanning harder thus instead of returning that the exit policy * didn't match, which makes it obvious that the port is closed, * return DONE and kill the circuit. That way, a user (malicious or * not) needs one circuit per bad port unless it matches the policy of * the hidden service. */ relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_DONE, origin_circ->cpath->prev); connection_free(TO_CONN(n_stream)); tor_free(address); /* Drop the circuit here since it might be someone deliberately * scanning the hidden service ports. Note that this mitigates port * scanning by adding more work on the attacker side to successfully * scan but does not fully solve it. */ if (r < -1) return END_CIRC_AT_ORIGIN; else return 0; } assert_circuit_ok(circ); log_debug(LD_REND,"Finished assigning addr/port"); n_stream->cpath_layer = origin_circ->cpath->prev; /* link it */ /* add it into the linked list of p_streams on this circuit */ n_stream->next_stream = origin_circ->p_streams; n_stream->on_circuit = circ; origin_circ->p_streams = n_stream; assert_circuit_ok(circ); origin_circ->rend_data->nr_streams++; connection_exit_connect(n_stream); /* For path bias: This circuit was used successfully */ pathbias_mark_use_success(origin_circ); tor_free(address); return 0; } tor_strlower(address); n_stream->base_.address = address; n_stream->base_.state = EXIT_CONN_STATE_RESOLVEFAILED; /* default to failed, change in dns_resolve if it turns out not to fail */ if (we_are_hibernating()) { relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_HIBERNATING, NULL); connection_free(TO_CONN(n_stream)); return 0; } n_stream->on_circuit = circ; if (rh.command == RELAY_COMMAND_BEGIN_DIR) { tor_addr_t tmp_addr; tor_assert(or_circ); if (or_circ->p_chan && channel_get_addr_if_possible(or_circ->p_chan, &tmp_addr)) { tor_addr_copy(&n_stream->base_.addr, &tmp_addr); } return connection_exit_connect_dir(n_stream); } log_debug(LD_EXIT,"about to start the dns_resolve()."); /* send it off to the gethostbyname farm */ switch (dns_resolve(n_stream)) { case 1: /* resolve worked; now n_stream is attached to circ. */ assert_circuit_ok(circ); log_debug(LD_EXIT,"about to call connection_exit_connect()."); connection_exit_connect(n_stream); return 0; case -1: /* resolve failed */ relay_send_end_cell_from_edge(rh.stream_id, circ, END_STREAM_REASON_RESOLVEFAILED, NULL); /* n_stream got freed. don't touch it. */ break; case 0: /* resolve added to pending list */ assert_circuit_ok(circ); break; } return 0; } /** * Called when we receive a RELAY_COMMAND_RESOLVE cell 'cell' along the * circuit <b>circ</b>; * begin resolving the hostname, and (eventually) reply with a RESOLVED cell. */ int connection_exit_begin_resolve(cell_t *cell, or_circuit_t *circ) { edge_connection_t *dummy_conn; relay_header_t rh; assert_circuit_ok(TO_CIRCUIT(circ)); relay_header_unpack(&rh, cell->payload); if (rh.length > RELAY_PAYLOAD_SIZE) return -1; /* This 'dummy_conn' only exists to remember the stream ID * associated with the resolve request; and to make the * implementation of dns.c more uniform. (We really only need to * remember the circuit, the stream ID, and the hostname to be * resolved; but if we didn't store them in a connection like this, * the housekeeping in dns.c would get way more complicated.) */ dummy_conn = edge_connection_new(CONN_TYPE_EXIT, AF_INET); dummy_conn->stream_id = rh.stream_id; dummy_conn->base_.address = tor_strndup( (char*)cell->payload+RELAY_HEADER_SIZE, rh.length); dummy_conn->base_.port = 0; dummy_conn->base_.state = EXIT_CONN_STATE_RESOLVEFAILED; dummy_conn->base_.purpose = EXIT_PURPOSE_RESOLVE; dummy_conn->on_circuit = TO_CIRCUIT(circ); /* send it off to the gethostbyname farm */ switch (dns_resolve(dummy_conn)) { case -1: /* Impossible to resolve; a resolved cell was sent. */ /* Connection freed; don't touch it. */ return 0; case 1: /* The result was cached; a resolved cell was sent. */ if (!dummy_conn->base_.marked_for_close) connection_free(TO_CONN(dummy_conn)); return 0; case 0: /* resolve added to pending list */ assert_circuit_ok(TO_CIRCUIT(circ)); break; } return 0; } /** Helper: Return true and set *<b>why_rejected</b> to an optional clarifying * message message iff we do not allow connections to <b>addr</b>:<b>port</b>. */ static int my_exit_policy_rejects(const tor_addr_t *addr, uint16_t port, const char **why_rejected) { if (router_compare_to_my_exit_policy(addr, port)) { *why_rejected = ""; return 1; } else if (tor_addr_family(addr) == AF_INET6 && !get_options()->IPv6Exit) { *why_rejected = " (IPv6 address without IPv6Exit configured)"; return 1; } return 0; } /** Connect to conn's specified addr and port. If it worked, conn * has now been added to the connection_array. * * Send back a connected cell. Include the resolved IP of the destination * address, but <em>only</em> if it's a general exit stream. (Rendezvous * streams must not reveal what IP they connected to.) */ void connection_exit_connect(edge_connection_t *edge_conn) { const tor_addr_t *addr; uint16_t port; connection_t *conn = TO_CONN(edge_conn); int socket_error = 0, result; const char *why_failed_exit_policy = NULL; /* Apply exit policy to non-rendezvous connections. */ if (! connection_edge_is_rendezvous_stream(edge_conn) && my_exit_policy_rejects(&edge_conn->base_.addr, edge_conn->base_.port, &why_failed_exit_policy)) { if (BUG(!why_failed_exit_policy)) why_failed_exit_policy = ""; log_info(LD_EXIT,"%s:%d failed exit policy%s. Closing.", escaped_safe_str_client(conn->address), conn->port, why_failed_exit_policy); connection_edge_end(edge_conn, END_STREAM_REASON_EXITPOLICY); circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn); connection_free(conn); return; } #ifdef HAVE_SYS_UN_H if (conn->socket_family != AF_UNIX) { #else { #endif /* defined(HAVE_SYS_UN_H) */ addr = &conn->addr; port = conn->port; if (tor_addr_family(addr) == AF_INET6) conn->socket_family = AF_INET6; log_debug(LD_EXIT, "about to try connecting"); result = connection_connect(conn, conn->address, addr, port, &socket_error); #ifdef HAVE_SYS_UN_H } else { /* * In the AF_UNIX case, we expect to have already had conn->port = 1, * tor_addr_make_unspec(conn->addr) (cf. the way we mark in the incoming * case in connection_handle_listener_read()), and conn->address should * have the socket path to connect to. */ tor_assert(conn->address && strlen(conn->address) > 0); log_debug(LD_EXIT, "about to try connecting"); result = connection_connect_unix(conn, conn->address, &socket_error); #endif /* defined(HAVE_SYS_UN_H) */ } switch (result) { case -1: { int reason = errno_to_stream_end_reason(socket_error); connection_edge_end(edge_conn, reason); circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn); connection_free(conn); return; } case 0: conn->state = EXIT_CONN_STATE_CONNECTING; connection_watch_events(conn, READ_EVENT | WRITE_EVENT); /* writable indicates finish; * readable/error indicates broken link in windows-land. */ return; /* case 1: fall through */ } conn->state = EXIT_CONN_STATE_OPEN; if (connection_get_outbuf_len(conn)) { /* in case there are any queued data cells, from e.g. optimistic data */ connection_watch_events(conn, READ_EVENT|WRITE_EVENT); } else { connection_watch_events(conn, READ_EVENT); } /* also, deliver a 'connected' cell back through the circuit. */ if (connection_edge_is_rendezvous_stream(edge_conn)) { /* don't send an address back! */ connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, NULL, 0); } else { /* normal stream */ uint8_t connected_payload[MAX_CONNECTED_CELL_PAYLOAD_LEN]; int connected_payload_len = connected_cell_format_payload(connected_payload, &conn->addr, edge_conn->address_ttl); if (connected_payload_len < 0) { connection_edge_end(edge_conn, END_STREAM_REASON_INTERNAL); circuit_detach_stream(circuit_get_by_edge_conn(edge_conn), edge_conn); connection_free(conn); return; } connection_edge_send_command(edge_conn, RELAY_COMMAND_CONNECTED, (char*)connected_payload, connected_payload_len); } } /** Given an exit conn that should attach to us as a directory server, open a * bridge connection with a linked connection pair, create a new directory * conn, and join them together. Return 0 on success (or if there was an * error we could send back an end cell for). Return -(some circuit end * reason) if the circuit needs to be torn down. Either connects * <b>exitconn</b>, frees it, or marks it, as appropriate. */ static int connection_exit_connect_dir(edge_connection_t *exitconn) { dir_connection_t *dirconn = NULL; or_circuit_t *circ = TO_OR_CIRCUIT(exitconn->on_circuit); log_info(LD_EXIT, "Opening local connection for anonymized directory exit"); exitconn->base_.state = EXIT_CONN_STATE_OPEN; dirconn = dir_connection_new(tor_addr_family(&exitconn->base_.addr)); tor_addr_copy(&dirconn->base_.addr, &exitconn->base_.addr); dirconn->base_.port = 0; dirconn->base_.address = tor_strdup(exitconn->base_.address); dirconn->base_.type = CONN_TYPE_DIR; dirconn->base_.purpose = DIR_PURPOSE_SERVER; dirconn->base_.state = DIR_CONN_STATE_SERVER_COMMAND_WAIT; /* Note that the new dir conn belongs to the same tunneled request as * the edge conn, so that we can measure download times. */ dirconn->dirreq_id = exitconn->dirreq_id; connection_link_connections(TO_CONN(dirconn), TO_CONN(exitconn)); if (connection_add(TO_CONN(exitconn))<0) { connection_edge_end(exitconn, END_STREAM_REASON_RESOURCELIMIT); connection_free(TO_CONN(exitconn)); connection_free(TO_CONN(dirconn)); return 0; } /* link exitconn to circ, now that we know we can use it. */ exitconn->next_stream = circ->n_streams; circ->n_streams = exitconn; if (connection_add(TO_CONN(dirconn))<0) { connection_edge_end(exitconn, END_STREAM_REASON_RESOURCELIMIT); connection_close_immediate(TO_CONN(exitconn)); connection_mark_for_close(TO_CONN(exitconn)); connection_free(TO_CONN(dirconn)); return 0; } connection_start_reading(TO_CONN(dirconn)); connection_start_reading(TO_CONN(exitconn)); if (connection_edge_send_command(exitconn, RELAY_COMMAND_CONNECTED, NULL, 0) < 0) { connection_mark_for_close(TO_CONN(exitconn)); connection_mark_for_close(TO_CONN(dirconn)); return 0; } return 0; } /** Return 1 if <b>conn</b> is a rendezvous stream, or 0 if * it is a general stream. */ int connection_edge_is_rendezvous_stream(const edge_connection_t *conn) { tor_assert(conn); if (conn->rend_data) return 1; return 0; } /** Return 1 if router <b>exit_node</b> is likely to allow stream <b>conn</b> * to exit from it, or 0 if it probably will not allow it. * (We might be uncertain if conn's destination address has not yet been * resolved.) */ int connection_ap_can_use_exit(const entry_connection_t *conn, const node_t *exit_node) { const or_options_t *options = get_options(); tor_assert(conn); tor_assert(conn->socks_request); tor_assert(exit_node); /* If a particular exit node has been requested for the new connection, * make sure the exit node of the existing circuit matches exactly. */ if (conn->chosen_exit_name) { const node_t *chosen_exit = node_get_by_nickname(conn->chosen_exit_name, 1); if (!chosen_exit || tor_memneq(chosen_exit->identity, exit_node->identity, DIGEST_LEN)) { /* doesn't match */ // log_debug(LD_APP,"Requested node '%s', considering node '%s'. No.", // conn->chosen_exit_name, exit->nickname); return 0; } } if (conn->use_begindir) { /* Internal directory fetches do not count as exiting. */ return 1; } if (conn->socks_request->command == SOCKS_COMMAND_CONNECT) { tor_addr_t addr, *addrp = NULL; addr_policy_result_t r; if (0 == tor_addr_parse(&addr, conn->socks_request->address)) { addrp = &addr; } else if (!conn->entry_cfg.ipv4_traffic && conn->entry_cfg.ipv6_traffic) { tor_addr_make_null(&addr, AF_INET6); addrp = &addr; } else if (conn->entry_cfg.ipv4_traffic && !conn->entry_cfg.ipv6_traffic) { tor_addr_make_null(&addr, AF_INET); addrp = &addr; } r = compare_tor_addr_to_node_policy(addrp, conn->socks_request->port, exit_node); if (r == ADDR_POLICY_REJECTED) return 0; /* We know the address, and the exit policy rejects it. */ if (r == ADDR_POLICY_PROBABLY_REJECTED && !conn->chosen_exit_name) return 0; /* We don't know the addr, but the exit policy rejects most * addresses with this port. Since the user didn't ask for * this node, err on the side of caution. */ } else if (SOCKS_COMMAND_IS_RESOLVE(conn->socks_request->command)) { /* Don't send DNS requests to non-exit servers by default. */ if (!conn->chosen_exit_name && node_exit_policy_rejects_all(exit_node)) return 0; } if (routerset_contains_node(options->ExcludeExitNodesUnion_, exit_node)) { /* Not a suitable exit. Refuse it. */ return 0; } return 1; } /** If address is of the form "y.onion" with a well-formed handle y: * Put a NUL after y, lower-case it, and return ONION_HOSTNAME. * * If address is of the form "x.y.onion" with a well-formed handle x: * Drop "x.", put a NUL after y, lower-case it, and return ONION_HOSTNAME. * * If address is of the form "y.onion" with a badly-formed handle y: * Return BAD_HOSTNAME and log a message. * * If address is of the form "y.exit": * Put a NUL after y and return EXIT_HOSTNAME. * * Otherwise: * Return NORMAL_HOSTNAME and change nothing. */ hostname_type_t parse_extended_hostname(char *address) { char *s; char *q; char query[REND_SERVICE_ID_LEN_BASE32+1]; s = strrchr(address,'.'); if (!s) return NORMAL_HOSTNAME; /* no dot, thus normal */ if (!strcmp(s+1,"exit")) { *s = 0; /* NUL-terminate it */ return EXIT_HOSTNAME; /* .exit */ } if (strcmp(s+1,"onion")) return NORMAL_HOSTNAME; /* neither .exit nor .onion, thus normal */ /* so it is .onion */ *s = 0; /* NUL-terminate it */ /* locate a 'sub-domain' component, in order to remove it */ q = strrchr(address, '.'); if (q == address) { goto failed; /* reject sub-domain, as DNS does */ } q = (NULL == q) ? address : q + 1; if (strlcpy(query, q, REND_SERVICE_ID_LEN_BASE32+1) >= REND_SERVICE_ID_LEN_BASE32+1) goto failed; if (q != address) { memmove(address, q, strlen(q) + 1 /* also get \0 */); } if (rend_valid_service_id(query)) { return ONION_HOSTNAME; /* success */ } failed: /* otherwise, return to previous state and return 0 */ *s = '.'; log_warn(LD_APP, "Invalid onion hostname %s; rejecting", safe_str_client(address)); return BAD_HOSTNAME; } /** Return true iff the (possibly NULL) <b>alen</b>-byte chunk of memory at * <b>a</b> is equal to the (possibly NULL) <b>blen</b>-byte chunk of memory * at <b>b</b>. */ static int memeq_opt(const char *a, size_t alen, const char *b, size_t blen) { if (a == NULL) { return (b == NULL); } else if (b == NULL) { return 0; } else if (alen != blen) { return 0; } else { return tor_memeq(a, b, alen); } } /** * Return true iff none of the isolation flags and fields in <b>conn</b> * should prevent it from being attached to <b>circ</b>. */ int connection_edge_compatible_with_circuit(const entry_connection_t *conn, const origin_circuit_t *circ) { const uint8_t iso = conn->entry_cfg.isolation_flags; const socks_request_t *sr = conn->socks_request; /* If circ has never been used for an isolated connection, we can * totally use it for this one. */ if (!circ->isolation_values_set) return 1; /* If circ has been used for connections having more than one value * for some field f, it will have the corresponding bit set in * isolation_flags_mixed. If isolation_flags_mixed has any bits * in common with iso, then conn must be isolated from at least * one stream that has been attached to circ. */ if ((iso & circ->isolation_flags_mixed) != 0) { /* For at least one field where conn is isolated, the circuit * already has mixed streams. */ return 0; } if (! conn->original_dest_address) { log_warn(LD_BUG, "Reached connection_edge_compatible_with_circuit without " "having set conn->original_dest_address"); ((entry_connection_t*)conn)->original_dest_address = tor_strdup(conn->socks_request->address); } if ((iso & ISO_STREAM) && (circ->associated_isolated_stream_global_id != ENTRY_TO_CONN(conn)->global_identifier)) return 0; if ((iso & ISO_DESTPORT) && conn->socks_request->port != circ->dest_port) return 0; if ((iso & ISO_DESTADDR) && strcasecmp(conn->original_dest_address, circ->dest_address)) return 0; if ((iso & ISO_SOCKSAUTH) && (! memeq_opt(sr->username, sr->usernamelen, circ->socks_username, circ->socks_username_len) || ! memeq_opt(sr->password, sr->passwordlen, circ->socks_password, circ->socks_password_len))) return 0; if ((iso & ISO_CLIENTPROTO) && (conn->socks_request->listener_type != circ->client_proto_type || conn->socks_request->socks_version != circ->client_proto_socksver)) return 0; if ((iso & ISO_CLIENTADDR) && !tor_addr_eq(&ENTRY_TO_CONN(conn)->addr, &circ->client_addr)) return 0; if ((iso & ISO_SESSIONGRP) && conn->entry_cfg.session_group != circ->session_group) return 0; if ((iso & ISO_NYM_EPOCH) && conn->nym_epoch != circ->nym_epoch) return 0; return 1; } /** * If <b>dry_run</b> is false, update <b>circ</b>'s isolation flags and fields * to reflect having had <b>conn</b> attached to it, and return 0. Otherwise, * if <b>dry_run</b> is true, then make no changes to <b>circ</b>, and return * a bitfield of isolation flags that we would have to set in * isolation_flags_mixed to add <b>conn</b> to <b>circ</b>, or -1 if * <b>circ</b> has had no streams attached to it. */ int connection_edge_update_circuit_isolation(const entry_connection_t *conn, origin_circuit_t *circ, int dry_run) { const socks_request_t *sr = conn->socks_request; if (! conn->original_dest_address) { log_warn(LD_BUG, "Reached connection_update_circuit_isolation without " "having set conn->original_dest_address"); ((entry_connection_t*)conn)->original_dest_address = tor_strdup(conn->socks_request->address); } if (!circ->isolation_values_set) { if (dry_run) return -1; circ->associated_isolated_stream_global_id = ENTRY_TO_CONN(conn)->global_identifier; circ->dest_port = conn->socks_request->port; circ->dest_address = tor_strdup(conn->original_dest_address); circ->client_proto_type = conn->socks_request->listener_type; circ->client_proto_socksver = conn->socks_request->socks_version; tor_addr_copy(&circ->client_addr, &ENTRY_TO_CONN(conn)->addr); circ->session_group = conn->entry_cfg.session_group; circ->nym_epoch = conn->nym_epoch; circ->socks_username = sr->username ? tor_memdup(sr->username, sr->usernamelen) : NULL; circ->socks_password = sr->password ? tor_memdup(sr->password, sr->passwordlen) : NULL; circ->socks_username_len = sr->usernamelen; circ->socks_password_len = sr->passwordlen; circ->isolation_values_set = 1; return 0; } else { uint8_t mixed = 0; if (conn->socks_request->port != circ->dest_port) mixed |= ISO_DESTPORT; if (strcasecmp(conn->original_dest_address, circ->dest_address)) mixed |= ISO_DESTADDR; if (!memeq_opt(sr->username, sr->usernamelen, circ->socks_username, circ->socks_username_len) || !memeq_opt(sr->password, sr->passwordlen, circ->socks_password, circ->socks_password_len)) mixed |= ISO_SOCKSAUTH; if ((conn->socks_request->listener_type != circ->client_proto_type || conn->socks_request->socks_version != circ->client_proto_socksver)) mixed |= ISO_CLIENTPROTO; if (!tor_addr_eq(&ENTRY_TO_CONN(conn)->addr, &circ->client_addr)) mixed |= ISO_CLIENTADDR; if (conn->entry_cfg.session_group != circ->session_group) mixed |= ISO_SESSIONGRP; if (conn->nym_epoch != circ->nym_epoch) mixed |= ISO_NYM_EPOCH; if (dry_run) return mixed; if ((mixed & conn->entry_cfg.isolation_flags) != 0) { log_warn(LD_BUG, "Updating a circuit with seemingly incompatible " "isolation flags."); } circ->isolation_flags_mixed |= mixed; return 0; } } /** * Clear the isolation settings on <b>circ</b>. * * This only works on an open circuit that has never had a stream attached to * it, and whose isolation settings are hypothetical. (We set hypothetical * isolation settings on circuits as we're launching them, so that we * know whether they can handle more streams or whether we need to launch * even more circuits. Once the circuit is open, if it turns out that * we no longer have any streams to attach to it, we clear the isolation flags * and data so that other streams can have a chance.) */ void circuit_clear_isolation(origin_circuit_t *circ) { if (circ->isolation_any_streams_attached) { log_warn(LD_BUG, "Tried to clear the isolation status of a dirty circuit"); return; } if (TO_CIRCUIT(circ)->state != CIRCUIT_STATE_OPEN) { log_warn(LD_BUG, "Tried to clear the isolation status of a non-open " "circuit"); return; } circ->isolation_values_set = 0; circ->isolation_flags_mixed = 0; circ->associated_isolated_stream_global_id = 0; circ->client_proto_type = 0; circ->client_proto_socksver = 0; circ->dest_port = 0; tor_addr_make_unspec(&circ->client_addr); tor_free(circ->dest_address); circ->session_group = -1; circ->nym_epoch = 0; if (circ->socks_username) { memwipe(circ->socks_username, 0x11, circ->socks_username_len); tor_free(circ->socks_username); } if (circ->socks_password) { memwipe(circ->socks_password, 0x05, circ->socks_password_len); tor_free(circ->socks_password); } circ->socks_username_len = circ->socks_password_len = 0; } /** Free all storage held in module-scoped variables for connection_edge.c */ void connection_edge_free_all(void) { untried_pending_connections = 0; smartlist_free(pending_entry_connections); pending_entry_connections = NULL; }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2488_1
crossvul-cpp_data_good_390_0
/* * Copyright (c) 2009-2017 Nicira, Inc. * Copyright (c) 2010 Jean Tourrilhes - HP-Labs. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include <errno.h> #include <inttypes.h> #include <stdbool.h> #include <stdlib.h> #include <unistd.h> #include "bitmap.h" #include "bundles.h" #include "byte-order.h" #include "classifier.h" #include "connectivity.h" #include "connmgr.h" #include "coverage.h" #include "dp-packet.h" #include "hash.h" #include "openvswitch/hmap.h" #include "netdev.h" #include "nx-match.h" #include "ofproto.h" #include "ofproto-provider.h" #include "openflow/nicira-ext.h" #include "openflow/openflow.h" #include "openvswitch/dynamic-string.h" #include "openvswitch/meta-flow.h" #include "openvswitch/ofp-actions.h" #include "openvswitch/ofp-errors.h" #include "openvswitch/ofp-msgs.h" #include "openvswitch/ofp-print.h" #include "openvswitch/ofp-util.h" #include "openvswitch/ofpbuf.h" #include "openvswitch/vlog.h" #include "ovs-rcu.h" #include "packets.h" #include "pinsched.h" #include "poll-loop.h" #include "random.h" #include "seq.h" #include "openvswitch/shash.h" #include "simap.h" #include "smap.h" #include "sset.h" #include "timeval.h" #include "tun-metadata.h" #include "unaligned.h" #include "unixctl.h" #include "util.h" VLOG_DEFINE_THIS_MODULE(ofproto); COVERAGE_DEFINE(ofproto_flush); COVERAGE_DEFINE(ofproto_packet_out); COVERAGE_DEFINE(ofproto_queue_req); COVERAGE_DEFINE(ofproto_recv_openflow); COVERAGE_DEFINE(ofproto_reinit_ports); COVERAGE_DEFINE(ofproto_update_port); /* Default fields to use for prefix tries in each flow table, unless something * else is configured. */ const enum mf_field_id default_prefix_fields[2] = { MFF_IPV4_DST, MFF_IPV4_SRC }; /* oftable. */ static void oftable_init(struct oftable *); static void oftable_destroy(struct oftable *); static void oftable_set_name(struct oftable *, const char *name); static enum ofperr evict_rules_from_table(struct oftable *) OVS_REQUIRES(ofproto_mutex); static void oftable_configure_eviction(struct oftable *, unsigned int eviction, const struct mf_subfield *fields, size_t n_fields) OVS_REQUIRES(ofproto_mutex); /* This is the only combination of OpenFlow eviction flags that OVS supports: a * combination of OF1.4+ importance, the remaining lifetime of the flow, and * fairness based on user-specified fields. */ #define OFPROTO_EVICTION_FLAGS \ (OFPTMPEF14_OTHER | OFPTMPEF14_IMPORTANCE | OFPTMPEF14_LIFETIME) /* A set of rules within a single OpenFlow table (oftable) that have the same * values for the oftable's eviction_fields. A rule to be evicted, when one is * needed, is taken from the eviction group that contains the greatest number * of rules. * * An oftable owns any number of eviction groups, each of which contains any * number of rules. * * Membership in an eviction group is imprecise, based on the hash of the * oftable's eviction_fields (in the eviction_group's id_node.hash member). * That is, if two rules have different eviction_fields, but those * eviction_fields hash to the same value, then they will belong to the same * eviction_group anyway. * * (When eviction is not enabled on an oftable, we don't track any eviction * groups, to save time and space.) */ struct eviction_group { struct hmap_node id_node; /* In oftable's "eviction_groups_by_id". */ struct heap_node size_node; /* In oftable's "eviction_groups_by_size". */ struct heap rules; /* Contains "struct rule"s. */ }; static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep) OVS_REQUIRES(ofproto_mutex); static uint64_t rule_eviction_priority(struct ofproto *ofproto, struct rule *) OVS_REQUIRES(ofproto_mutex); static void eviction_group_add_rule(struct rule *) OVS_REQUIRES(ofproto_mutex); static void eviction_group_remove_rule(struct rule *) OVS_REQUIRES(ofproto_mutex); static void rule_criteria_init(struct rule_criteria *, uint8_t table_id, const struct match *match, int priority, ovs_version_t version, ovs_be64 cookie, ovs_be64 cookie_mask, ofp_port_t out_port, uint32_t out_group); static void rule_criteria_require_rw(struct rule_criteria *, bool can_write_readonly); static void rule_criteria_destroy(struct rule_criteria *); static enum ofperr collect_rules_loose(struct ofproto *, const struct rule_criteria *, struct rule_collection *); struct learned_cookie { union { /* In struct ofproto's 'learned_cookies' hmap. */ struct hmap_node hmap_node OVS_GUARDED_BY(ofproto_mutex); /* In 'dead_cookies' list when removed from hmap. */ struct ovs_list list_node; } u; /* Key. */ ovs_be64 cookie OVS_GUARDED_BY(ofproto_mutex); uint8_t table_id OVS_GUARDED_BY(ofproto_mutex); /* Number of references from "learn" actions. * * When this drops to 0, all of the flows in 'table_id' with the specified * 'cookie' are deleted. */ int n OVS_GUARDED_BY(ofproto_mutex); }; static const struct ofpact_learn *next_learn_with_delete( const struct rule_actions *, const struct ofpact_learn *start); static void learned_cookies_inc(struct ofproto *, const struct rule_actions *) OVS_REQUIRES(ofproto_mutex); static void learned_cookies_dec(struct ofproto *, const struct rule_actions *, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex); static void learned_cookies_flush(struct ofproto *, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex); /* ofport. */ static void ofport_destroy__(struct ofport *) OVS_EXCLUDED(ofproto_mutex); static void ofport_destroy(struct ofport *, bool del); static bool ofport_is_mtu_overridden(const struct ofproto *, const struct ofport *); static int update_port(struct ofproto *, const char *devname); static int init_ports(struct ofproto *); static void reinit_ports(struct ofproto *); static long long int ofport_get_usage(const struct ofproto *, ofp_port_t ofp_port); static void ofport_set_usage(struct ofproto *, ofp_port_t ofp_port, long long int last_used); static void ofport_remove_usage(struct ofproto *, ofp_port_t ofp_port); /* Ofport usage. * * Keeps track of the currently used and recently used ofport values and is * used to prevent immediate recycling of ofport values. */ struct ofport_usage { struct hmap_node hmap_node; /* In struct ofproto's "ofport_usage" hmap. */ ofp_port_t ofp_port; /* OpenFlow port number. */ long long int last_used; /* Last time the 'ofp_port' was used. LLONG_MAX represents in-use ofports. */ }; /* rule. */ static void ofproto_rule_send_removed(struct rule *) OVS_EXCLUDED(ofproto_mutex); static bool rule_is_readonly(const struct rule *); static void ofproto_rule_insert__(struct ofproto *, struct rule *) OVS_REQUIRES(ofproto_mutex); static void ofproto_rule_remove__(struct ofproto *, struct rule *) OVS_REQUIRES(ofproto_mutex); /* The source of an OpenFlow request. * * A table modification request can be generated externally, via OpenFlow, or * internally through a function call. This structure indicates the source of * an OpenFlow-generated table modification. For an internal flow_mod, it * isn't meaningful and thus supplied as NULL. */ struct openflow_mod_requester { struct ofconn *ofconn; /* Connection on which flow_mod arrived. */ const struct ofp_header *request; }; /* OpenFlow. */ static enum ofperr ofproto_rule_create(struct ofproto *, struct cls_rule *, uint8_t table_id, ovs_be64 new_cookie, uint16_t idle_timeout, uint16_t hard_timeout, enum ofputil_flow_mod_flags flags, uint16_t importance, const struct ofpact *ofpacts, size_t ofpacts_len, uint64_t match_tlv_bitmap, uint64_t ofpacts_tlv_bitmap, struct rule **new_rule) OVS_NO_THREAD_SAFETY_ANALYSIS; static void replace_rule_start(struct ofproto *, struct ofproto_flow_mod *, struct rule *old_rule, struct rule *new_rule) OVS_REQUIRES(ofproto_mutex); static void replace_rule_revert(struct ofproto *, struct rule *old_rule, struct rule *new_rule) OVS_REQUIRES(ofproto_mutex); static void replace_rule_finish(struct ofproto *, struct ofproto_flow_mod *, const struct openflow_mod_requester *, struct rule *old_rule, struct rule *new_rule, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex); static void delete_flows__(struct rule_collection *, enum ofp_flow_removed_reason, const struct openflow_mod_requester *) OVS_REQUIRES(ofproto_mutex); static bool ofproto_group_exists(const struct ofproto *, uint32_t group_id); static void handle_openflow(struct ofconn *, const struct ofpbuf *); static enum ofperr ofproto_flow_mod_init(struct ofproto *, struct ofproto_flow_mod *, const struct ofputil_flow_mod *fm, struct rule *) OVS_EXCLUDED(ofproto_mutex); static enum ofperr ofproto_flow_mod_start(struct ofproto *, struct ofproto_flow_mod *) OVS_REQUIRES(ofproto_mutex); static void ofproto_flow_mod_revert(struct ofproto *, struct ofproto_flow_mod *) OVS_REQUIRES(ofproto_mutex); static void ofproto_flow_mod_finish(struct ofproto *, struct ofproto_flow_mod *, const struct openflow_mod_requester *) OVS_REQUIRES(ofproto_mutex); static enum ofperr handle_flow_mod__(struct ofproto *, const struct ofputil_flow_mod *, const struct openflow_mod_requester *) OVS_EXCLUDED(ofproto_mutex); static void calc_duration(long long int start, long long int now, uint32_t *sec, uint32_t *nsec); /* ofproto. */ static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); static void ofproto_destroy__(struct ofproto *); static void update_mtu(struct ofproto *, struct ofport *); static void update_mtu_ofproto(struct ofproto *); static void meter_delete(struct ofproto *, uint32_t first, uint32_t last); static void meter_insert_rule(struct rule *); /* unixctl. */ static void ofproto_unixctl_init(void); /* All registered ofproto classes, in probe order. */ static const struct ofproto_class **ofproto_classes; static size_t n_ofproto_classes; static size_t allocated_ofproto_classes; /* Global lock that protects all flow table operations. */ struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER; unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT; unsigned ofproto_max_idle = OFPROTO_MAX_IDLE_DEFAULT; size_t n_handlers, n_revalidators; char *pmd_cpu_mask; /* Map from datapath name to struct ofproto, for use by unixctl commands. */ static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos); /* Initial mappings of port to OpenFlow number mappings. */ static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports); static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); /* The default value of true waits for flow restore. */ static bool flow_restore_wait = true; /* Must be called to initialize the ofproto library. * * The caller may pass in 'iface_hints', which contains an shash of * "iface_hint" elements indexed by the interface's name. The provider * may use these hints to describe the startup configuration in order to * reinitialize its state. The caller owns the provided data, so a * provider will make copies of anything required. An ofproto provider * will remove any existing state that is not described by the hint, and * may choose to remove it all. */ void ofproto_init(const struct shash *iface_hints) { struct shash_node *node; size_t i; ofproto_class_register(&ofproto_dpif_class); /* Make a local copy, since we don't own 'iface_hints' elements. */ SHASH_FOR_EACH(node, iface_hints) { const struct iface_hint *orig_hint = node->data; struct iface_hint *new_hint = xmalloc(sizeof *new_hint); const char *br_type = ofproto_normalize_type(orig_hint->br_type); new_hint->br_name = xstrdup(orig_hint->br_name); new_hint->br_type = xstrdup(br_type); new_hint->ofp_port = orig_hint->ofp_port; shash_add(&init_ofp_ports, node->name, new_hint); } for (i = 0; i < n_ofproto_classes; i++) { ofproto_classes[i]->init(&init_ofp_ports); } ofproto_unixctl_init(); } /* 'type' should be a normalized datapath type, as returned by * ofproto_normalize_type(). Returns the corresponding ofproto_class * structure, or a null pointer if there is none registered for 'type'. */ static const struct ofproto_class * ofproto_class_find__(const char *type) { size_t i; for (i = 0; i < n_ofproto_classes; i++) { const struct ofproto_class *class = ofproto_classes[i]; struct sset types; bool found; sset_init(&types); class->enumerate_types(&types); found = sset_contains(&types, type); sset_destroy(&types); if (found) { return class; } } VLOG_WARN("unknown datapath type %s", type); return NULL; } /* Registers a new ofproto class. After successful registration, new ofprotos * of that type can be created using ofproto_create(). */ int ofproto_class_register(const struct ofproto_class *new_class) { size_t i; for (i = 0; i < n_ofproto_classes; i++) { if (ofproto_classes[i] == new_class) { return EEXIST; } } if (n_ofproto_classes >= allocated_ofproto_classes) { ofproto_classes = x2nrealloc(ofproto_classes, &allocated_ofproto_classes, sizeof *ofproto_classes); } ofproto_classes[n_ofproto_classes++] = new_class; return 0; } /* Unregisters a datapath provider. 'type' must have been previously * registered and not currently be in use by any ofprotos. After * unregistration new datapaths of that type cannot be opened using * ofproto_create(). */ int ofproto_class_unregister(const struct ofproto_class *class) { size_t i; for (i = 0; i < n_ofproto_classes; i++) { if (ofproto_classes[i] == class) { for (i++; i < n_ofproto_classes; i++) { ofproto_classes[i - 1] = ofproto_classes[i]; } n_ofproto_classes--; return 0; } } VLOG_WARN("attempted to unregister an ofproto class that is not " "registered"); return EAFNOSUPPORT; } /* Clears 'types' and enumerates all registered ofproto types into it. The * caller must first initialize the sset. */ void ofproto_enumerate_types(struct sset *types) { size_t i; sset_clear(types); for (i = 0; i < n_ofproto_classes; i++) { ofproto_classes[i]->enumerate_types(types); } } /* Returns the fully spelled out name for the given ofproto 'type'. * * Normalized type string can be compared with strcmp(). Unnormalized type * string might be the same even if they have different spellings. */ const char * ofproto_normalize_type(const char *type) { return type && type[0] ? type : "system"; } /* Clears 'names' and enumerates the names of all known created ofprotos with * the given 'type'. The caller must first initialize the sset. Returns 0 if * successful, otherwise a positive errno value. * * Some kinds of datapaths might not be practically enumerable. This is not * considered an error. */ int ofproto_enumerate_names(const char *type, struct sset *names) { const struct ofproto_class *class = ofproto_class_find__(type); return class ? class->enumerate_names(type, names) : EAFNOSUPPORT; } static void ofproto_bump_tables_version(struct ofproto *ofproto) { ++ofproto->tables_version; ofproto->ofproto_class->set_tables_version(ofproto, ofproto->tables_version); } int ofproto_create(const char *datapath_name, const char *datapath_type, struct ofproto **ofprotop) OVS_EXCLUDED(ofproto_mutex) { const struct ofproto_class *class; struct ofproto *ofproto; int error; int i; *ofprotop = NULL; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (!class) { VLOG_WARN("could not create datapath %s of unknown type %s", datapath_name, datapath_type); return EAFNOSUPPORT; } ofproto = class->alloc(); if (!ofproto) { VLOG_ERR("failed to allocate datapath %s of type %s", datapath_name, datapath_type); return ENOMEM; } /* Initialize. */ ovs_mutex_lock(&ofproto_mutex); memset(ofproto, 0, sizeof *ofproto); ofproto->ofproto_class = class; ofproto->name = xstrdup(datapath_name); ofproto->type = xstrdup(datapath_type); hmap_insert(&all_ofprotos, &ofproto->hmap_node, hash_string(ofproto->name, 0)); ofproto->datapath_id = 0; ofproto->forward_bpdu = false; ofproto->fallback_dpid = pick_fallback_dpid(); ofproto->mfr_desc = NULL; ofproto->hw_desc = NULL; ofproto->sw_desc = NULL; ofproto->serial_desc = NULL; ofproto->dp_desc = NULL; ofproto->frag_handling = OFPUTIL_FRAG_NORMAL; hmap_init(&ofproto->ports); hmap_init(&ofproto->ofport_usage); shash_init(&ofproto->port_by_name); simap_init(&ofproto->ofp_requests); ofproto->max_ports = ofp_to_u16(OFPP_MAX); ofproto->eviction_group_timer = LLONG_MIN; ofproto->tables = NULL; ofproto->n_tables = 0; ofproto->tables_version = OVS_VERSION_MIN; hindex_init(&ofproto->cookies); hmap_init(&ofproto->learned_cookies); ovs_list_init(&ofproto->expirable); ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name); ofproto->min_mtu = INT_MAX; cmap_init(&ofproto->groups); ovs_mutex_unlock(&ofproto_mutex); ofproto->ogf.types = 0xf; ofproto->ogf.capabilities = OFPGFC_CHAINING | OFPGFC_SELECT_LIVENESS | OFPGFC_SELECT_WEIGHT; for (i = 0; i < 4; i++) { ofproto->ogf.max_groups[i] = OFPG_MAX; ofproto->ogf.ofpacts[i] = (UINT64_C(1) << N_OFPACTS) - 1; } ovsrcu_set(&ofproto->metadata_tab, tun_metadata_alloc(NULL)); ovs_mutex_init(&ofproto->vl_mff_map.mutex); cmap_init(&ofproto->vl_mff_map.cmap); error = ofproto->ofproto_class->construct(ofproto); if (error) { VLOG_ERR("failed to open datapath %s: %s", datapath_name, ovs_strerror(error)); ovs_mutex_lock(&ofproto_mutex); connmgr_destroy(ofproto->connmgr); ofproto->connmgr = NULL; ovs_mutex_unlock(&ofproto_mutex); ofproto_destroy__(ofproto); return error; } /* Check that hidden tables, if any, are at the end. */ ovs_assert(ofproto->n_tables); for (i = 0; i + 1 < ofproto->n_tables; i++) { enum oftable_flags flags = ofproto->tables[i].flags; enum oftable_flags next_flags = ofproto->tables[i + 1].flags; ovs_assert(!(flags & OFTABLE_HIDDEN) || next_flags & OFTABLE_HIDDEN); } ofproto->datapath_id = pick_datapath_id(ofproto); init_ports(ofproto); /* Initialize meters table. */ if (ofproto->ofproto_class->meter_get_features) { ofproto->ofproto_class->meter_get_features(ofproto, &ofproto->meter_features); } else { memset(&ofproto->meter_features, 0, sizeof ofproto->meter_features); } ofproto->meters = xzalloc((ofproto->meter_features.max_meters + 1) * sizeof(struct meter *)); /* Set the initial tables version. */ ofproto_bump_tables_version(ofproto); *ofprotop = ofproto; return 0; } /* Must be called (only) by an ofproto implementation in its constructor * function. See the large comment on 'construct' in struct ofproto_class for * details. */ void ofproto_init_tables(struct ofproto *ofproto, int n_tables) { struct oftable *table; ovs_assert(!ofproto->n_tables); ovs_assert(n_tables >= 1 && n_tables <= 255); ofproto->n_tables = n_tables; ofproto->tables = xmalloc(n_tables * sizeof *ofproto->tables); OFPROTO_FOR_EACH_TABLE (table, ofproto) { oftable_init(table); } } /* To be optionally called (only) by an ofproto implementation in its * constructor function. See the large comment on 'construct' in struct * ofproto_class for details. * * Sets the maximum number of ports to 'max_ports'. The ofproto generic layer * will then ensure that actions passed into the ofproto implementation will * not refer to OpenFlow ports numbered 'max_ports' or higher. If this * function is not called, there will be no such restriction. * * Reserved ports numbered OFPP_MAX and higher are special and not subject to * the 'max_ports' restriction. */ void ofproto_init_max_ports(struct ofproto *ofproto, uint16_t max_ports) { ovs_assert(max_ports <= ofp_to_u16(OFPP_MAX)); ofproto->max_ports = max_ports; } uint64_t ofproto_get_datapath_id(const struct ofproto *ofproto) { return ofproto->datapath_id; } void ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id) { uint64_t old_dpid = p->datapath_id; p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p); if (p->datapath_id != old_dpid) { /* Force all active connections to reconnect, since there is no way to * notify a controller that the datapath ID has changed. */ ofproto_reconnect_controllers(p); } } void ofproto_set_controllers(struct ofproto *p, const struct ofproto_controller *controllers, size_t n_controllers, uint32_t allowed_versions) { connmgr_set_controllers(p->connmgr, controllers, n_controllers, allowed_versions); } void ofproto_set_fail_mode(struct ofproto *p, enum ofproto_fail_mode fail_mode) { connmgr_set_fail_mode(p->connmgr, fail_mode); } /* Drops the connections between 'ofproto' and all of its controllers, forcing * them to reconnect. */ void ofproto_reconnect_controllers(struct ofproto *ofproto) { connmgr_reconnect(ofproto->connmgr); } /* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s * in-band control should guarantee access, in the same way that in-band * control guarantees access to OpenFlow controllers. */ void ofproto_set_extra_in_band_remotes(struct ofproto *ofproto, const struct sockaddr_in *extras, size_t n) { connmgr_set_extra_in_band_remotes(ofproto->connmgr, extras, n); } /* Sets the OpenFlow queue used by flows set up by in-band control on * 'ofproto' to 'queue_id'. If 'queue_id' is negative, then in-band control * flows will use the default queue. */ void ofproto_set_in_band_queue(struct ofproto *ofproto, int queue_id) { connmgr_set_in_band_queue(ofproto->connmgr, queue_id); } /* Sets the number of flows at which eviction from the kernel flow table * will occur. */ void ofproto_set_flow_limit(unsigned limit) { ofproto_flow_limit = limit; } /* Sets the maximum idle time for flows in the datapath before they are * expired. */ void ofproto_set_max_idle(unsigned max_idle) { ofproto_max_idle = max_idle; } /* If forward_bpdu is true, the NORMAL action will forward frames with * reserved (e.g. STP) destination Ethernet addresses. if forward_bpdu is false, * the NORMAL action will drop these frames. */ void ofproto_set_forward_bpdu(struct ofproto *ofproto, bool forward_bpdu) { bool old_val = ofproto->forward_bpdu; ofproto->forward_bpdu = forward_bpdu; if (old_val != ofproto->forward_bpdu) { if (ofproto->ofproto_class->forward_bpdu_changed) { ofproto->ofproto_class->forward_bpdu_changed(ofproto); } } } /* Sets the MAC aging timeout for the OFPP_NORMAL action on 'ofproto' to * 'idle_time', in seconds, and the maximum number of MAC table entries to * 'max_entries'. */ void ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time, size_t max_entries) { if (ofproto->ofproto_class->set_mac_table_config) { ofproto->ofproto_class->set_mac_table_config(ofproto, idle_time, max_entries); } } /* Multicast snooping configuration. */ /* Configures multicast snooping on 'ofproto' using the settings * defined in 's'. If 's' is NULL, disables multicast snooping. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_set_mcast_snooping(struct ofproto *ofproto, const struct ofproto_mcast_snooping_settings *s) { return (ofproto->ofproto_class->set_mcast_snooping ? ofproto->ofproto_class->set_mcast_snooping(ofproto, s) : EOPNOTSUPP); } /* Configures multicast snooping flood settings on 'ofp_port' of 'ofproto'. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_set_mcast_snooping(struct ofproto *ofproto, void *aux, const struct ofproto_mcast_snooping_port_settings *s) { return (ofproto->ofproto_class->set_mcast_snooping_port ? ofproto->ofproto_class->set_mcast_snooping_port(ofproto, aux, s) : EOPNOTSUPP); } void ofproto_set_cpu_mask(const char *cmask) { free(pmd_cpu_mask); pmd_cpu_mask = nullable_xstrdup(cmask); } void ofproto_set_threads(int n_handlers_, int n_revalidators_) { int threads = MAX(count_cpu_cores(), 2); n_revalidators = MAX(n_revalidators_, 0); n_handlers = MAX(n_handlers_, 0); if (!n_revalidators) { n_revalidators = n_handlers ? MAX(threads - (int) n_handlers, 1) : threads / 4 + 1; } if (!n_handlers) { n_handlers = MAX(threads - (int) n_revalidators, 1); } } void ofproto_set_dp_desc(struct ofproto *p, const char *dp_desc) { free(p->dp_desc); p->dp_desc = nullable_xstrdup(dp_desc); } int ofproto_set_snoops(struct ofproto *ofproto, const struct sset *snoops) { return connmgr_set_snoops(ofproto->connmgr, snoops); } int ofproto_set_netflow(struct ofproto *ofproto, const struct netflow_options *nf_options) { if (nf_options && sset_is_empty(&nf_options->collectors)) { nf_options = NULL; } if (ofproto->ofproto_class->set_netflow) { return ofproto->ofproto_class->set_netflow(ofproto, nf_options); } else { return nf_options ? EOPNOTSUPP : 0; } } int ofproto_set_sflow(struct ofproto *ofproto, const struct ofproto_sflow_options *oso) { if (oso && sset_is_empty(&oso->targets)) { oso = NULL; } if (ofproto->ofproto_class->set_sflow) { return ofproto->ofproto_class->set_sflow(ofproto, oso); } else { return oso ? EOPNOTSUPP : 0; } } int ofproto_set_ipfix(struct ofproto *ofproto, const struct ofproto_ipfix_bridge_exporter_options *bo, const struct ofproto_ipfix_flow_exporter_options *fo, size_t n_fo) { if (ofproto->ofproto_class->set_ipfix) { return ofproto->ofproto_class->set_ipfix(ofproto, bo, fo, n_fo); } else { return (bo || fo) ? EOPNOTSUPP : 0; } } static int ofproto_get_ipfix_stats(struct ofproto *ofproto, bool bridge_ipfix, struct ovs_list *replies) { int error; if (ofproto->ofproto_class->get_ipfix_stats) { error = ofproto->ofproto_class->get_ipfix_stats(ofproto, bridge_ipfix, replies); } else { error = EOPNOTSUPP; } return error; } static enum ofperr handle_ipfix_bridge_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; enum ofperr error; ofpmp_init(&replies, request); error = ofproto_get_ipfix_stats(ofproto, true, &replies); if (!error) { ofconn_send_replies(ofconn, &replies); } else { ofpbuf_list_delete(&replies); } return error; } static enum ofperr handle_ipfix_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; enum ofperr error; ofpmp_init(&replies, request); error = ofproto_get_ipfix_stats(ofproto, false, &replies); if (!error) { ofconn_send_replies(ofconn, &replies); } else { ofpbuf_list_delete(&replies); } return error; } static enum ofperr handle_nxt_ct_flush_zone(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); const struct nx_zone_id *nzi = ofpmsg_body(oh); if (!is_all_zeros(nzi->zero, sizeof nzi->zero)) { return OFPERR_NXBRC_MUST_BE_ZERO; } uint16_t zone = ntohs(nzi->zone_id); if (ofproto->ofproto_class->ct_flush) { ofproto->ofproto_class->ct_flush(ofproto, &zone); } else { return EOPNOTSUPP; } return 0; } void ofproto_set_flow_restore_wait(bool flow_restore_wait_db) { flow_restore_wait = flow_restore_wait_db; } bool ofproto_get_flow_restore_wait(void) { return flow_restore_wait; } /* Spanning Tree Protocol (STP) configuration. */ /* Configures STP on 'ofproto' using the settings defined in 's'. If * 's' is NULL, disables STP. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_set_stp(struct ofproto *ofproto, const struct ofproto_stp_settings *s) { return (ofproto->ofproto_class->set_stp ? ofproto->ofproto_class->set_stp(ofproto, s) : EOPNOTSUPP); } /* Retrieves STP status of 'ofproto' and stores it in 's'. If the * 'enabled' member of 's' is false, then the other members are not * meaningful. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_get_stp_status(struct ofproto *ofproto, struct ofproto_stp_status *s) { return (ofproto->ofproto_class->get_stp_status ? ofproto->ofproto_class->get_stp_status(ofproto, s) : EOPNOTSUPP); } /* Configures STP on 'ofp_port' of 'ofproto' using the settings defined * in 's'. The caller is responsible for assigning STP port numbers * (using the 'port_num' member in the range of 1 through 255, inclusive) * and ensuring there are no duplicates. If the 's' is NULL, then STP * is disabled on the port. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_set_stp(struct ofproto *ofproto, ofp_port_t ofp_port, const struct ofproto_port_stp_settings *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure STP on nonexistent port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->set_stp_port ? ofproto->ofproto_class->set_stp_port(ofport, s) : EOPNOTSUPP); } /* Retrieves STP port status of 'ofp_port' on 'ofproto' and stores it in * 's'. If the 'enabled' member in 's' is false, then the other members * are not meaningful. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_get_stp_status(struct ofproto *ofproto, ofp_port_t ofp_port, struct ofproto_port_stp_status *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN_RL(&rl, "%s: cannot get STP status on nonexistent " "port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->get_stp_port_status ? ofproto->ofproto_class->get_stp_port_status(ofport, s) : EOPNOTSUPP); } /* Retrieves STP port statistics of 'ofp_port' on 'ofproto' and stores it in * 's'. If the 'enabled' member in 's' is false, then the other members * are not meaningful. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_get_stp_stats(struct ofproto *ofproto, ofp_port_t ofp_port, struct ofproto_port_stp_stats *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN_RL(&rl, "%s: cannot get STP stats on nonexistent " "port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->get_stp_port_stats ? ofproto->ofproto_class->get_stp_port_stats(ofport, s) : EOPNOTSUPP); } /* Rapid Spanning Tree Protocol (RSTP) configuration. */ /* Configures RSTP on 'ofproto' using the settings defined in 's'. If * 's' is NULL, disables RSTP. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_set_rstp(struct ofproto *ofproto, const struct ofproto_rstp_settings *s) { if (!ofproto->ofproto_class->set_rstp) { return EOPNOTSUPP; } ofproto->ofproto_class->set_rstp(ofproto, s); return 0; } /* Retrieves RSTP status of 'ofproto' and stores it in 's'. If the * 'enabled' member of 's' is false, then the other members are not * meaningful. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_get_rstp_status(struct ofproto *ofproto, struct ofproto_rstp_status *s) { if (!ofproto->ofproto_class->get_rstp_status) { return EOPNOTSUPP; } ofproto->ofproto_class->get_rstp_status(ofproto, s); return 0; } /* Configures RSTP on 'ofp_port' of 'ofproto' using the settings defined * in 's'. The caller is responsible for assigning RSTP port numbers * (using the 'port_num' member in the range of 1 through 255, inclusive) * and ensuring there are no duplicates. If the 's' is NULL, then RSTP * is disabled on the port. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_set_rstp(struct ofproto *ofproto, ofp_port_t ofp_port, const struct ofproto_port_rstp_settings *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure RSTP on nonexistent port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } if (!ofproto->ofproto_class->set_rstp_port) { return EOPNOTSUPP; } ofproto->ofproto_class->set_rstp_port(ofport, s); return 0; } /* Retrieves RSTP port status of 'ofp_port' on 'ofproto' and stores it in * 's'. If the 'enabled' member in 's' is false, then the other members * are not meaningful. * * Returns 0 if successful, otherwise a positive errno value.*/ int ofproto_port_get_rstp_status(struct ofproto *ofproto, ofp_port_t ofp_port, struct ofproto_port_rstp_status *s) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN_RL(&rl, "%s: cannot get RSTP status on nonexistent " "port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } if (!ofproto->ofproto_class->get_rstp_port_status) { return EOPNOTSUPP; } ofproto->ofproto_class->get_rstp_port_status(ofport, s); return 0; } /* Queue DSCP configuration. */ /* Registers meta-data associated with the 'n_qdscp' Qualities of Service * 'queues' attached to 'ofport'. This data is not intended to be sufficient * to implement QoS. Instead, it is used to implement features which require * knowledge of what queues exist on a port, and some basic information about * them. * * Returns 0 if successful, otherwise a positive errno value. */ int ofproto_port_set_queues(struct ofproto *ofproto, ofp_port_t ofp_port, const struct ofproto_port_queue *queues, size_t n_queues) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot set queues on nonexistent port %"PRIu32, ofproto->name, ofp_port); return ENODEV; } return (ofproto->ofproto_class->set_queues ? ofproto->ofproto_class->set_queues(ofport, queues, n_queues) : EOPNOTSUPP); } /* LLDP configuration. */ void ofproto_port_set_lldp(struct ofproto *ofproto, ofp_port_t ofp_port, const struct smap *cfg) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure LLDP on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } error = (ofproto->ofproto_class->set_lldp ? ofproto->ofproto_class->set_lldp(ofport, cfg) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: lldp configuration on port %"PRIu32" (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } int ofproto_set_aa(struct ofproto *ofproto, void *aux OVS_UNUSED, const struct aa_settings *s) { if (!ofproto->ofproto_class->set_aa) { return EOPNOTSUPP; } ofproto->ofproto_class->set_aa(ofproto, s); return 0; } int ofproto_aa_mapping_register(struct ofproto *ofproto, void *aux, const struct aa_mapping_settings *s) { if (!ofproto->ofproto_class->aa_mapping_set) { return EOPNOTSUPP; } ofproto->ofproto_class->aa_mapping_set(ofproto, aux, s); return 0; } int ofproto_aa_mapping_unregister(struct ofproto *ofproto, void *aux) { if (!ofproto->ofproto_class->aa_mapping_unset) { return EOPNOTSUPP; } ofproto->ofproto_class->aa_mapping_unset(ofproto, aux); return 0; } int ofproto_aa_vlan_get_queued(struct ofproto *ofproto, struct ovs_list *list) { if (!ofproto->ofproto_class->aa_vlan_get_queued) { return EOPNOTSUPP; } ofproto->ofproto_class->aa_vlan_get_queued(ofproto, list); return 0; } unsigned int ofproto_aa_vlan_get_queue_size(struct ofproto *ofproto) { if (!ofproto->ofproto_class->aa_vlan_get_queue_size) { return EOPNOTSUPP; } return ofproto->ofproto_class->aa_vlan_get_queue_size(ofproto); } /* Connectivity Fault Management configuration. */ /* Clears the CFM configuration from 'ofp_port' on 'ofproto'. */ void ofproto_port_clear_cfm(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); if (ofport && ofproto->ofproto_class->set_cfm) { ofproto->ofproto_class->set_cfm(ofport, NULL); } } /* Configures connectivity fault management on 'ofp_port' in 'ofproto'. Takes * basic configuration from the configuration members in 'cfm', and the remote * maintenance point ID from remote_mpid. Ignores the statistics members of * 'cfm'. * * This function has no effect if 'ofproto' does not have a port 'ofp_port'. */ void ofproto_port_set_cfm(struct ofproto *ofproto, ofp_port_t ofp_port, const struct cfm_settings *s) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure CFM on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } /* XXX: For configuration simplicity, we only support one remote_mpid * outside of the CFM module. It's not clear if this is the correct long * term solution or not. */ error = (ofproto->ofproto_class->set_cfm ? ofproto->ofproto_class->set_cfm(ofport, s) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: CFM configuration on port %"PRIu32" (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } /* Configures BFD on 'ofp_port' in 'ofproto'. This function has no effect if * 'ofproto' does not have a port 'ofp_port'. */ void ofproto_port_set_bfd(struct ofproto *ofproto, ofp_port_t ofp_port, const struct smap *cfg) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure bfd on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } error = (ofproto->ofproto_class->set_bfd ? ofproto->ofproto_class->set_bfd(ofport, cfg) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: bfd configuration on port %"PRIu32" (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } /* Checks the status change of BFD on 'ofport'. * * Returns true if 'ofproto_class' does not support 'bfd_status_changed'. */ bool ofproto_port_bfd_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->bfd_status_changed ? ofproto->ofproto_class->bfd_status_changed(ofport) : true); } /* Populates 'status' with the status of BFD on 'ofport'. Returns 0 on * success. Returns a positive errno otherwise. Has no effect if 'ofp_port' * is not an OpenFlow port in 'ofproto'. * * The caller must provide and own '*status'. */ int ofproto_port_get_bfd_status(struct ofproto *ofproto, ofp_port_t ofp_port, struct smap *status) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->get_bfd_status ? ofproto->ofproto_class->get_bfd_status(ofport, status) : EOPNOTSUPP); } /* Checks the status of LACP negotiation for 'ofp_port' within ofproto. * Returns 1 if LACP partner information for 'ofp_port' is up-to-date, * 0 if LACP partner information is not current (generally indicating a * connectivity problem), or -1 if LACP is not enabled on 'ofp_port'. */ int ofproto_port_is_lacp_current(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->port_is_lacp_current ? ofproto->ofproto_class->port_is_lacp_current(ofport) : -1); } int ofproto_port_get_lacp_stats(const struct ofport *port, struct lacp_slave_stats *stats) { struct ofproto *ofproto = port->ofproto; int error; if (ofproto->ofproto_class->port_get_lacp_stats) { error = ofproto->ofproto_class->port_get_lacp_stats(port, stats); } else { error = EOPNOTSUPP; } return error; } /* Bundles. */ /* Registers a "bundle" associated with client data pointer 'aux' in 'ofproto'. * A bundle is the same concept as a Port in OVSDB, that is, it consists of one * or more "slave" devices (Interfaces, in OVSDB) along with a VLAN * configuration plus, if there is more than one slave, a bonding * configuration. * * If 'aux' is already registered then this function updates its configuration * to 's'. Otherwise, this function registers a new bundle. * * Bundles only affect the NXAST_AUTOPATH action and output to the OFPP_NORMAL * port. */ int ofproto_bundle_register(struct ofproto *ofproto, void *aux, const struct ofproto_bundle_settings *s) { return (ofproto->ofproto_class->bundle_set ? ofproto->ofproto_class->bundle_set(ofproto, aux, s) : EOPNOTSUPP); } /* Unregisters the bundle registered on 'ofproto' with auxiliary data 'aux'. * If no such bundle has been registered, this has no effect. */ int ofproto_bundle_unregister(struct ofproto *ofproto, void *aux) { return ofproto_bundle_register(ofproto, aux, NULL); } /* Registers a mirror associated with client data pointer 'aux' in 'ofproto'. * If 'aux' is already registered then this function updates its configuration * to 's'. Otherwise, this function registers a new mirror. */ int ofproto_mirror_register(struct ofproto *ofproto, void *aux, const struct ofproto_mirror_settings *s) { return (ofproto->ofproto_class->mirror_set ? ofproto->ofproto_class->mirror_set(ofproto, aux, s) : EOPNOTSUPP); } /* Unregisters the mirror registered on 'ofproto' with auxiliary data 'aux'. * If no mirror has been registered, this has no effect. */ int ofproto_mirror_unregister(struct ofproto *ofproto, void *aux) { return ofproto_mirror_register(ofproto, aux, NULL); } /* Retrieves statistics from mirror associated with client data pointer * 'aux' in 'ofproto'. Stores packet and byte counts in 'packets' and * 'bytes', respectively. If a particular counters is not supported, * the appropriate argument is set to UINT64_MAX. */ int ofproto_mirror_get_stats(struct ofproto *ofproto, void *aux, uint64_t *packets, uint64_t *bytes) { if (!ofproto->ofproto_class->mirror_get_stats) { *packets = *bytes = UINT64_MAX; return EOPNOTSUPP; } return ofproto->ofproto_class->mirror_get_stats(ofproto, aux, packets, bytes); } /* Configures the VLANs whose bits are set to 1 in 'flood_vlans' as VLANs on * which all packets are flooded, instead of using MAC learning. If * 'flood_vlans' is NULL, then MAC learning applies to all VLANs. * * Flood VLANs affect only the treatment of packets output to the OFPP_NORMAL * port. */ int ofproto_set_flood_vlans(struct ofproto *ofproto, unsigned long *flood_vlans) { return (ofproto->ofproto_class->set_flood_vlans ? ofproto->ofproto_class->set_flood_vlans(ofproto, flood_vlans) : EOPNOTSUPP); } /* Returns true if 'aux' is a registered bundle that is currently in use as the * output for a mirror. */ bool ofproto_is_mirror_output_bundle(const struct ofproto *ofproto, void *aux) { return (ofproto->ofproto_class->is_mirror_output_bundle ? ofproto->ofproto_class->is_mirror_output_bundle(ofproto, aux) : false); } /* Configuration of OpenFlow tables. */ /* Returns the number of OpenFlow tables in 'ofproto'. */ int ofproto_get_n_tables(const struct ofproto *ofproto) { return ofproto->n_tables; } /* Returns the number of Controller visible OpenFlow tables * in 'ofproto'. This number will exclude Hidden tables. * This funtion's return value should be less or equal to that of * ofproto_get_n_tables() . */ uint8_t ofproto_get_n_visible_tables(const struct ofproto *ofproto) { uint8_t n = ofproto->n_tables; /* Count only non-hidden tables in the number of tables. (Hidden tables, * if present, are always at the end.) */ while(n && (ofproto->tables[n - 1].flags & OFTABLE_HIDDEN)) { n--; } return n; } /* Configures the OpenFlow table in 'ofproto' with id 'table_id' with the * settings from 's'. 'table_id' must be in the range 0 through the number of * OpenFlow tables in 'ofproto' minus 1, inclusive. * * For read-only tables, only the name may be configured. */ void ofproto_configure_table(struct ofproto *ofproto, int table_id, const struct ofproto_table_settings *s) { struct oftable *table; ovs_assert(table_id >= 0 && table_id < ofproto->n_tables); table = &ofproto->tables[table_id]; oftable_set_name(table, s->name); if (table->flags & OFTABLE_READONLY) { return; } if (classifier_set_prefix_fields(&table->cls, s->prefix_fields, s->n_prefix_fields)) { /* XXX: Trigger revalidation. */ } ovs_mutex_lock(&ofproto_mutex); unsigned int new_eviction = (s->enable_eviction ? table->eviction | EVICTION_CLIENT : table->eviction & ~EVICTION_CLIENT); oftable_configure_eviction(table, new_eviction, s->groups, s->n_groups); table->max_flows = s->max_flows; evict_rules_from_table(table); ovs_mutex_unlock(&ofproto_mutex); } bool ofproto_has_snoops(const struct ofproto *ofproto) { return connmgr_has_snoops(ofproto->connmgr); } void ofproto_get_snoops(const struct ofproto *ofproto, struct sset *snoops) { connmgr_get_snoops(ofproto->connmgr, snoops); } /* Deletes 'rule' from 'ofproto'. * * Within an ofproto implementation, this function allows an ofproto * implementation to destroy any rules that remain when its ->destruct() * function is called. This function is not suitable for use elsewhere in an * ofproto implementation. * * This function implements steps 4.4 and 4.5 in the section titled "Rule Life * Cycle" in ofproto-provider.h. */ void ofproto_rule_delete(struct ofproto *ofproto, struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { /* This skips the ofmonitor and flow-removed notifications because the * switch is being deleted and any OpenFlow channels have been or soon will * be killed. */ ovs_mutex_lock(&ofproto_mutex); if (rule->state == RULE_INSERTED) { /* Make sure there is no postponed removal of the rule. */ ovs_assert(cls_rule_visible_in_version(&rule->cr, OVS_VERSION_MAX)); if (!classifier_remove(&rule->ofproto->tables[rule->table_id].cls, &rule->cr)) { OVS_NOT_REACHED(); } ofproto_rule_remove__(rule->ofproto, rule); if (ofproto->ofproto_class->rule_delete) { ofproto->ofproto_class->rule_delete(rule); } /* This may not be the last reference to the rule. */ ofproto_rule_unref(rule); } ovs_mutex_unlock(&ofproto_mutex); } static void ofproto_flush__(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { struct oftable *table; /* This will flush all datapath flows. */ if (ofproto->ofproto_class->flush) { ofproto->ofproto_class->flush(ofproto); } /* XXX: There is a small race window here, where new datapath flows can be * created by upcall handlers based on the existing flow table. We can not * call ofproto class flush while holding 'ofproto_mutex' to prevent this, * as then we could deadlock on syncing with the handler threads waiting on * the same mutex. */ ovs_mutex_lock(&ofproto_mutex); OFPROTO_FOR_EACH_TABLE (table, ofproto) { struct rule_collection rules; struct rule *rule; if (table->flags & OFTABLE_HIDDEN) { continue; } rule_collection_init(&rules); CLS_FOR_EACH (rule, cr, &table->cls) { rule_collection_add(&rules, rule); } delete_flows__(&rules, OFPRR_DELETE, NULL); } /* XXX: Concurrent handler threads may insert new learned flows based on * learn actions of the now deleted flows right after we release * 'ofproto_mutex'. */ ovs_mutex_unlock(&ofproto_mutex); } static void ofproto_destroy__(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { struct oftable *table; cmap_destroy(&ofproto->groups); hmap_remove(&all_ofprotos, &ofproto->hmap_node); free(ofproto->name); free(ofproto->type); free(ofproto->mfr_desc); free(ofproto->hw_desc); free(ofproto->sw_desc); free(ofproto->serial_desc); free(ofproto->dp_desc); hmap_destroy(&ofproto->ports); hmap_destroy(&ofproto->ofport_usage); shash_destroy(&ofproto->port_by_name); simap_destroy(&ofproto->ofp_requests); OFPROTO_FOR_EACH_TABLE (table, ofproto) { oftable_destroy(table); } free(ofproto->tables); ovs_mutex_lock(&ofproto->vl_mff_map.mutex); mf_vl_mff_map_clear(&ofproto->vl_mff_map, true); ovs_mutex_unlock(&ofproto->vl_mff_map.mutex); cmap_destroy(&ofproto->vl_mff_map.cmap); ovs_mutex_destroy(&ofproto->vl_mff_map.mutex); tun_metadata_free(ovsrcu_get_protected(struct tun_table *, &ofproto->metadata_tab)); ovs_assert(hindex_is_empty(&ofproto->cookies)); hindex_destroy(&ofproto->cookies); ovs_assert(hmap_is_empty(&ofproto->learned_cookies)); hmap_destroy(&ofproto->learned_cookies); ofproto->ofproto_class->dealloc(ofproto); } /* Destroying rules is doubly deferred, must have 'ofproto' around for them. * - 1st we defer the removal of the rules from the classifier * - 2nd we defer the actual destruction of the rules. */ static void ofproto_destroy_defer__(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { ovsrcu_postpone(ofproto_destroy__, ofproto); } void ofproto_destroy(struct ofproto *p, bool del) OVS_EXCLUDED(ofproto_mutex) { struct ofport *ofport, *next_ofport; struct ofport_usage *usage; if (!p) { return; } if (p->meters) { meter_delete(p, 1, p->meter_features.max_meters); p->meter_features.max_meters = 0; free(p->meters); p->meters = NULL; } ofproto_flush__(p); HMAP_FOR_EACH_SAFE (ofport, next_ofport, hmap_node, &p->ports) { ofport_destroy(ofport, del); } HMAP_FOR_EACH_POP (usage, hmap_node, &p->ofport_usage) { free(usage); } p->ofproto_class->destruct(p, del); /* We should not postpone this because it involves deleting a listening * socket which we may want to reopen soon. 'connmgr' may be used by other * threads only if they take the ofproto_mutex and read a non-NULL * 'ofproto->connmgr'. */ ovs_mutex_lock(&ofproto_mutex); connmgr_destroy(p->connmgr); p->connmgr = NULL; ovs_mutex_unlock(&ofproto_mutex); /* Destroying rules is deferred, must have 'ofproto' around for them. */ ovsrcu_postpone(ofproto_destroy_defer__, p); } /* Destroys the datapath with the respective 'name' and 'type'. With the Linux * kernel datapath, for example, this destroys the datapath in the kernel, and * with the netdev-based datapath, it tears down the data structures that * represent the datapath. * * The datapath should not be currently open as an ofproto. */ int ofproto_delete(const char *name, const char *type) { const struct ofproto_class *class = ofproto_class_find__(type); return (!class ? EAFNOSUPPORT : !class->del ? EACCES : class->del(type, name)); } static void process_port_change(struct ofproto *ofproto, int error, char *devname) { if (error == ENOBUFS) { reinit_ports(ofproto); } else if (!error) { update_port(ofproto, devname); free(devname); } } int ofproto_type_run(const char *datapath_type) { const struct ofproto_class *class; int error; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); error = class->type_run ? class->type_run(datapath_type) : 0; if (error && error != EAGAIN) { VLOG_ERR_RL(&rl, "%s: type_run failed (%s)", datapath_type, ovs_strerror(error)); } return error; } void ofproto_type_wait(const char *datapath_type) { const struct ofproto_class *class; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (class->type_wait) { class->type_wait(datapath_type); } } int ofproto_run(struct ofproto *p) { int error; uint64_t new_seq; error = p->ofproto_class->run(p); if (error && error != EAGAIN) { VLOG_ERR_RL(&rl, "%s: run failed (%s)", p->name, ovs_strerror(error)); } /* Restore the eviction group heap invariant occasionally. */ if (p->eviction_group_timer < time_msec()) { size_t i; p->eviction_group_timer = time_msec() + 1000; for (i = 0; i < p->n_tables; i++) { struct oftable *table = &p->tables[i]; struct eviction_group *evg; struct rule *rule; if (!table->eviction) { continue; } if (table->n_flows > 100000) { static struct vlog_rate_limit count_rl = VLOG_RATE_LIMIT_INIT(1, 1); VLOG_WARN_RL(&count_rl, "Table %"PRIuSIZE" has an excessive" " number of rules: %d", i, table->n_flows); } ovs_mutex_lock(&ofproto_mutex); CLS_FOR_EACH (rule, cr, &table->cls) { if (rule->idle_timeout || rule->hard_timeout) { if (!rule->eviction_group) { eviction_group_add_rule(rule); } else { heap_raw_change(&rule->evg_node, rule_eviction_priority(p, rule)); } } } HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) { heap_rebuild(&evg->rules); } ovs_mutex_unlock(&ofproto_mutex); } } if (p->ofproto_class->port_poll) { char *devname; while ((error = p->ofproto_class->port_poll(p, &devname)) != EAGAIN) { process_port_change(p, error, devname); } } new_seq = seq_read(connectivity_seq_get()); if (new_seq != p->change_seq) { struct sset devnames; const char *devname; struct ofport *ofport; /* Update OpenFlow port status for any port whose netdev has changed. * * Refreshing a given 'ofport' can cause an arbitrary ofport to be * destroyed, so it's not safe to update ports directly from the * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we * need this two-phase approach. */ sset_init(&devnames); HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { uint64_t port_change_seq; port_change_seq = netdev_get_change_seq(ofport->netdev); if (ofport->change_seq != port_change_seq) { ofport->change_seq = port_change_seq; sset_add(&devnames, netdev_get_name(ofport->netdev)); } } SSET_FOR_EACH (devname, &devnames) { update_port(p, devname); } sset_destroy(&devnames); p->change_seq = new_seq; } connmgr_run(p->connmgr, handle_openflow); return error; } void ofproto_wait(struct ofproto *p) { p->ofproto_class->wait(p); if (p->ofproto_class->port_poll_wait) { p->ofproto_class->port_poll_wait(p); } seq_wait(connectivity_seq_get(), p->change_seq); connmgr_wait(p->connmgr); } bool ofproto_is_alive(const struct ofproto *p) { return connmgr_has_controllers(p->connmgr); } /* Adds some memory usage statistics for 'ofproto' into 'usage', for use with * memory_report(). */ void ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage) { const struct oftable *table; unsigned int n_rules; simap_increase(usage, "ports", hmap_count(&ofproto->ports)); n_rules = 0; OFPROTO_FOR_EACH_TABLE (table, ofproto) { n_rules += table->n_flows; } simap_increase(usage, "rules", n_rules); if (ofproto->ofproto_class->get_memory_usage) { ofproto->ofproto_class->get_memory_usage(ofproto, usage); } connmgr_get_memory_usage(ofproto->connmgr, usage); } void ofproto_type_get_memory_usage(const char *datapath_type, struct simap *usage) { const struct ofproto_class *class; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (class && class->type_get_memory_usage) { class->type_get_memory_usage(datapath_type, usage); } } void ofproto_get_ofproto_controller_info(const struct ofproto *ofproto, struct shash *info) { connmgr_get_controller_info(ofproto->connmgr, info); } void ofproto_free_ofproto_controller_info(struct shash *info) { connmgr_free_controller_info(info); } /* Makes a deep copy of 'old' into 'port'. */ void ofproto_port_clone(struct ofproto_port *port, const struct ofproto_port *old) { port->name = xstrdup(old->name); port->type = xstrdup(old->type); port->ofp_port = old->ofp_port; } /* Frees memory allocated to members of 'ofproto_port'. * * Do not call this function on an ofproto_port obtained from * ofproto_port_dump_next(): that function retains ownership of the data in the * ofproto_port. */ void ofproto_port_destroy(struct ofproto_port *ofproto_port) { free(ofproto_port->name); free(ofproto_port->type); } /* Initializes 'dump' to begin dumping the ports in an ofproto. * * This function provides no status indication. An error status for the entire * dump operation is provided when it is completed by calling * ofproto_port_dump_done(). */ void ofproto_port_dump_start(struct ofproto_port_dump *dump, const struct ofproto *ofproto) { dump->ofproto = ofproto; dump->error = ofproto->ofproto_class->port_dump_start(ofproto, &dump->state); } /* Attempts to retrieve another port from 'dump', which must have been created * with ofproto_port_dump_start(). On success, stores a new ofproto_port into * 'port' and returns true. On failure, returns false. * * Failure might indicate an actual error or merely that the last port has been * dumped. An error status for the entire dump operation is provided when it * is completed by calling ofproto_port_dump_done(). * * The ofproto owns the data stored in 'port'. It will remain valid until at * least the next time 'dump' is passed to ofproto_port_dump_next() or * ofproto_port_dump_done(). */ bool ofproto_port_dump_next(struct ofproto_port_dump *dump, struct ofproto_port *port) { const struct ofproto *ofproto = dump->ofproto; if (dump->error) { return false; } dump->error = ofproto->ofproto_class->port_dump_next(ofproto, dump->state, port); if (dump->error) { ofproto->ofproto_class->port_dump_done(ofproto, dump->state); return false; } return true; } /* Completes port table dump operation 'dump', which must have been created * with ofproto_port_dump_start(). Returns 0 if the dump operation was * error-free, otherwise a positive errno value describing the problem. */ int ofproto_port_dump_done(struct ofproto_port_dump *dump) { const struct ofproto *ofproto = dump->ofproto; if (!dump->error) { dump->error = ofproto->ofproto_class->port_dump_done(ofproto, dump->state); } return dump->error == EOF ? 0 : dump->error; } /* Returns the type to pass to netdev_open() when a datapath of type * 'datapath_type' has a port of type 'port_type', for a few special * cases when a netdev type differs from a port type. For example, when * using the userspace datapath, a port of type "internal" needs to be * opened as "tap". * * Returns either 'type' itself or a string literal, which must not be * freed. */ const char * ofproto_port_open_type(const char *datapath_type, const char *port_type) { const struct ofproto_class *class; datapath_type = ofproto_normalize_type(datapath_type); class = ofproto_class_find__(datapath_type); if (!class) { return port_type; } return (class->port_open_type ? class->port_open_type(datapath_type, port_type) : port_type); } /* Attempts to add 'netdev' as a port on 'ofproto'. If 'ofp_portp' is * non-null and '*ofp_portp' is not OFPP_NONE, attempts to use that as * the port's OpenFlow port number. * * If successful, returns 0 and sets '*ofp_portp' to the new port's * OpenFlow port number (if 'ofp_portp' is non-null). On failure, * returns a positive errno value and sets '*ofp_portp' to OFPP_NONE (if * 'ofp_portp' is non-null). */ int ofproto_port_add(struct ofproto *ofproto, struct netdev *netdev, ofp_port_t *ofp_portp) { ofp_port_t ofp_port = ofp_portp ? *ofp_portp : OFPP_NONE; int error; error = ofproto->ofproto_class->port_add(ofproto, netdev); if (!error) { const char *netdev_name = netdev_get_name(netdev); simap_put(&ofproto->ofp_requests, netdev_name, ofp_to_u16(ofp_port)); error = update_port(ofproto, netdev_name); } if (ofp_portp) { *ofp_portp = OFPP_NONE; if (!error) { struct ofproto_port ofproto_port; error = ofproto_port_query_by_name(ofproto, netdev_get_name(netdev), &ofproto_port); if (!error) { *ofp_portp = ofproto_port.ofp_port; ofproto_port_destroy(&ofproto_port); } } } return error; } /* Looks up a port named 'devname' in 'ofproto'. On success, returns 0 and * initializes '*port' appropriately; on failure, returns a positive errno * value. * * The caller owns the data in 'ofproto_port' and must free it with * ofproto_port_destroy() when it is no longer needed. */ int ofproto_port_query_by_name(const struct ofproto *ofproto, const char *devname, struct ofproto_port *port) { int error; error = ofproto->ofproto_class->port_query_by_name(ofproto, devname, port); if (error) { memset(port, 0, sizeof *port); } return error; } /* Deletes port number 'ofp_port' from the datapath for 'ofproto'. * Returns 0 if successful, otherwise a positive errno. */ int ofproto_port_del(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); const char *name = ofport ? netdev_get_name(ofport->netdev) : "<unknown>"; struct simap_node *ofp_request_node; int error; ofp_request_node = simap_find(&ofproto->ofp_requests, name); if (ofp_request_node) { simap_delete(&ofproto->ofp_requests, ofp_request_node); } error = ofproto->ofproto_class->port_del(ofproto, ofp_port); if (!error && ofport) { /* 'name' is the netdev's name and update_port() is going to close the * netdev. Just in case update_port() refers to 'name' after it * destroys 'ofport', make a copy of it around the update_port() * call. */ char *devname = xstrdup(name); update_port(ofproto, devname); free(devname); } return error; } /* Refreshes datapath configuration of port number 'ofp_port' in 'ofproto'. * * This function has no effect if 'ofproto' does not have a port 'ofp_port'. */ void ofproto_port_set_config(struct ofproto *ofproto, ofp_port_t ofp_port, const struct smap *cfg) { struct ofport *ofport; int error; ofport = ofproto_get_port(ofproto, ofp_port); if (!ofport) { VLOG_WARN("%s: cannot configure datapath on nonexistent port %"PRIu32, ofproto->name, ofp_port); return; } error = (ofproto->ofproto_class->port_set_config ? ofproto->ofproto_class->port_set_config(ofport, cfg) : EOPNOTSUPP); if (error) { VLOG_WARN("%s: datapath configuration on port %"PRIu32 " (%s) failed (%s)", ofproto->name, ofp_port, netdev_get_name(ofport->netdev), ovs_strerror(error)); } } static void flow_mod_init(struct ofputil_flow_mod *fm, const struct match *match, int priority, const struct ofpact *ofpacts, size_t ofpacts_len, enum ofp_flow_mod_command command) { *fm = (struct ofputil_flow_mod) { .match = *match, .priority = priority, .table_id = 0, .command = command, .buffer_id = UINT32_MAX, .out_port = OFPP_ANY, .out_group = OFPG_ANY, .ofpacts = CONST_CAST(struct ofpact *, ofpacts), .ofpacts_len = ofpacts_len, }; } static int simple_flow_mod(struct ofproto *ofproto, const struct match *match, int priority, const struct ofpact *ofpacts, size_t ofpacts_len, enum ofp_flow_mod_command command) { struct ofputil_flow_mod fm; flow_mod_init(&fm, match, priority, ofpacts, ofpacts_len, command); return handle_flow_mod__(ofproto, &fm, NULL); } /* Adds a flow to OpenFlow flow table 0 in 'p' that matches 'cls_rule' and * performs the 'n_actions' actions in 'actions'. The new flow will not * timeout. * * If cls_rule->priority is in the range of priorities supported by OpenFlow * (0...65535, inclusive) then the flow will be visible to OpenFlow * controllers; otherwise, it will be hidden. * * The caller retains ownership of 'cls_rule' and 'ofpacts'. * * This is a helper function for in-band control and fail-open. */ void ofproto_add_flow(struct ofproto *ofproto, const struct match *match, int priority, const struct ofpact *ofpacts, size_t ofpacts_len) OVS_EXCLUDED(ofproto_mutex) { const struct rule *rule; bool must_add; /* First do a cheap check whether the rule we're looking for already exists * with the actions that we want. If it does, then we're done. */ rule = rule_from_cls_rule(classifier_find_match_exactly( &ofproto->tables[0].cls, match, priority, OVS_VERSION_MAX)); if (rule) { const struct rule_actions *actions = rule_get_actions(rule); must_add = !ofpacts_equal(actions->ofpacts, actions->ofpacts_len, ofpacts, ofpacts_len); } else { must_add = true; } /* If there's no such rule or the rule doesn't have the actions we want, * fall back to a executing a full flow mod. We can't optimize this at * all because we didn't take enough locks above to ensure that the flow * table didn't already change beneath us. */ if (must_add) { simple_flow_mod(ofproto, match, priority, ofpacts, ofpacts_len, OFPFC_MODIFY_STRICT); } } /* Executes the flow modification specified in 'fm'. Returns 0 on success, or * an OFPERR_* OpenFlow error code on failure. * * This is a helper function for in-band control and fail-open. */ enum ofperr ofproto_flow_mod(struct ofproto *ofproto, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { return handle_flow_mod__(ofproto, fm, NULL); } /* Searches for a rule with matching criteria exactly equal to 'target' in * ofproto's table 0 and, if it finds one, deletes it. * * This is a helper function for in-band control and fail-open. */ void ofproto_delete_flow(struct ofproto *ofproto, const struct match *target, int priority) OVS_REQUIRES(ofproto_mutex) { struct classifier *cls = &ofproto->tables[0].cls; struct rule *rule; /* First do a cheap check whether the rule we're looking for has already * been deleted. If so, then we're done. */ rule = rule_from_cls_rule(classifier_find_match_exactly( cls, target, priority, OVS_VERSION_MAX)); if (!rule) { return; } struct rule_collection rules; rule_collection_init(&rules); rule_collection_add(&rules, rule); delete_flows__(&rules, OFPRR_DELETE, NULL); rule_collection_destroy(&rules); } /* Delete all of the flows from all of ofproto's flow tables, then reintroduce * the flows required by in-band control and fail-open. */ void ofproto_flush_flows(struct ofproto *ofproto) { COVERAGE_INC(ofproto_flush); ofproto_flush__(ofproto); connmgr_flushed(ofproto->connmgr); } static void reinit_ports(struct ofproto *p) { struct ofproto_port_dump dump; struct sset devnames; struct ofport *ofport; struct ofproto_port ofproto_port; const char *devname; COVERAGE_INC(ofproto_reinit_ports); sset_init(&devnames); HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { sset_add(&devnames, netdev_get_name(ofport->netdev)); } OFPROTO_PORT_FOR_EACH (&ofproto_port, &dump, p) { sset_add(&devnames, ofproto_port.name); } SSET_FOR_EACH (devname, &devnames) { update_port(p, devname); } sset_destroy(&devnames); } static ofp_port_t alloc_ofp_port(struct ofproto *ofproto, const char *netdev_name) { uint16_t port_idx; port_idx = simap_get(&ofproto->ofp_requests, netdev_name); port_idx = port_idx ? port_idx : UINT16_MAX; if (port_idx >= ofproto->max_ports || ofport_get_usage(ofproto, u16_to_ofp(port_idx)) == LLONG_MAX) { uint16_t lru_ofport = 0, end_port_no = ofproto->alloc_port_no; long long int last_used_at, lru = LLONG_MAX; /* Search for a free OpenFlow port number. We try not to * immediately reuse them to prevent problems due to old * flows. * * We limit the automatically assigned port numbers to the lower half * of the port range, to reserve the upper half for assignment by * controllers. */ for (;;) { if (++ofproto->alloc_port_no >= MIN(ofproto->max_ports, 32768)) { ofproto->alloc_port_no = 1; } last_used_at = ofport_get_usage(ofproto, u16_to_ofp(ofproto->alloc_port_no)); if (!last_used_at) { port_idx = ofproto->alloc_port_no; break; } else if ( last_used_at < time_msec() - 60*60*1000) { /* If the port with ofport 'ofproto->alloc_port_no' was deleted * more than an hour ago, consider it usable. */ ofport_remove_usage(ofproto, u16_to_ofp(ofproto->alloc_port_no)); port_idx = ofproto->alloc_port_no; break; } else if (last_used_at < lru) { lru = last_used_at; lru_ofport = ofproto->alloc_port_no; } if (ofproto->alloc_port_no == end_port_no) { if (lru_ofport) { port_idx = lru_ofport; break; } return OFPP_NONE; } } } ofport_set_usage(ofproto, u16_to_ofp(port_idx), LLONG_MAX); return u16_to_ofp(port_idx); } static void dealloc_ofp_port(struct ofproto *ofproto, ofp_port_t ofp_port) { if (ofp_to_u16(ofp_port) < ofproto->max_ports) { ofport_set_usage(ofproto, ofp_port, time_msec()); } } /* Opens and returns a netdev for 'ofproto_port' in 'ofproto', or a null * pointer if the netdev cannot be opened. On success, also fills in * '*pp'. */ static struct netdev * ofport_open(struct ofproto *ofproto, struct ofproto_port *ofproto_port, struct ofputil_phy_port *pp) { enum netdev_flags flags; struct netdev *netdev; int error; error = netdev_open(ofproto_port->name, ofproto_port->type, &netdev); if (error) { VLOG_WARN_RL(&rl, "%s: ignoring port %s (%"PRIu32") because netdev %s " "cannot be opened (%s)", ofproto->name, ofproto_port->name, ofproto_port->ofp_port, ofproto_port->name, ovs_strerror(error)); return NULL; } if (ofproto_port->ofp_port == OFPP_NONE) { if (!strcmp(ofproto->name, ofproto_port->name)) { ofproto_port->ofp_port = OFPP_LOCAL; } else { ofproto_port->ofp_port = alloc_ofp_port(ofproto, ofproto_port->name); } } pp->port_no = ofproto_port->ofp_port; netdev_get_etheraddr(netdev, &pp->hw_addr); ovs_strlcpy(pp->name, ofproto_port->name, sizeof pp->name); netdev_get_flags(netdev, &flags); pp->config = flags & NETDEV_UP ? 0 : OFPUTIL_PC_PORT_DOWN; pp->state = netdev_get_carrier(netdev) ? 0 : OFPUTIL_PS_LINK_DOWN; netdev_get_features(netdev, &pp->curr, &pp->advertised, &pp->supported, &pp->peer); pp->curr_speed = netdev_features_to_bps(pp->curr, 0) / 1000; pp->max_speed = netdev_features_to_bps(pp->supported, 0) / 1000; return netdev; } /* Returns true if most fields of 'a' and 'b' are equal. Differences in name, * port number, and 'config' bits other than OFPUTIL_PC_PORT_DOWN are * disregarded. */ static bool ofport_equal(const struct ofputil_phy_port *a, const struct ofputil_phy_port *b) { return (eth_addr_equals(a->hw_addr, b->hw_addr) && a->state == b->state && !((a->config ^ b->config) & OFPUTIL_PC_PORT_DOWN) && a->curr == b->curr && a->advertised == b->advertised && a->supported == b->supported && a->peer == b->peer && a->curr_speed == b->curr_speed && a->max_speed == b->max_speed); } /* Adds an ofport to 'p' initialized based on the given 'netdev' and 'opp'. * The caller must ensure that 'p' does not have a conflicting ofport (that is, * one with the same name or port number). */ static int ofport_install(struct ofproto *p, struct netdev *netdev, const struct ofputil_phy_port *pp) { const char *netdev_name = netdev_get_name(netdev); struct ofport *ofport; int error; /* Create ofport. */ ofport = p->ofproto_class->port_alloc(); if (!ofport) { error = ENOMEM; goto error; } ofport->ofproto = p; ofport->netdev = netdev; ofport->change_seq = netdev_get_change_seq(netdev); ofport->pp = *pp; ofport->ofp_port = pp->port_no; ofport->created = time_msec(); /* Add port to 'p'. */ hmap_insert(&p->ports, &ofport->hmap_node, hash_ofp_port(ofport->ofp_port)); shash_add(&p->port_by_name, netdev_name, ofport); update_mtu(p, ofport); /* Let the ofproto_class initialize its private data. */ error = p->ofproto_class->port_construct(ofport); if (error) { goto error; } connmgr_send_port_status(p->connmgr, NULL, pp, OFPPR_ADD); return 0; error: VLOG_WARN_RL(&rl, "%s: could not add port %s (%s)", p->name, netdev_name, ovs_strerror(error)); if (ofport) { ofport_destroy__(ofport); } else { netdev_close(netdev); } return error; } /* Removes 'ofport' from 'p' and destroys it. */ static void ofport_remove(struct ofport *ofport) { struct ofproto *p = ofport->ofproto; bool is_mtu_overridden = ofport_is_mtu_overridden(p, ofport); connmgr_send_port_status(ofport->ofproto->connmgr, NULL, &ofport->pp, OFPPR_DELETE); ofport_destroy(ofport, true); if (!is_mtu_overridden) { update_mtu_ofproto(p); } } /* If 'ofproto' contains an ofport named 'name', removes it from 'ofproto' and * destroys it. */ static void ofport_remove_with_name(struct ofproto *ofproto, const char *name) { struct ofport *port = shash_find_data(&ofproto->port_by_name, name); if (port) { ofport_remove(port); } } /* Updates 'port' with new 'pp' description. * * Does not handle a name or port number change. The caller must implement * such a change as a delete followed by an add. */ static void ofport_modified(struct ofport *port, struct ofputil_phy_port *pp) { port->pp.hw_addr = pp->hw_addr; port->pp.config = ((port->pp.config & ~OFPUTIL_PC_PORT_DOWN) | (pp->config & OFPUTIL_PC_PORT_DOWN)); port->pp.state = ((port->pp.state & ~OFPUTIL_PS_LINK_DOWN) | (pp->state & OFPUTIL_PS_LINK_DOWN)); port->pp.curr = pp->curr; port->pp.advertised = pp->advertised; port->pp.supported = pp->supported; port->pp.peer = pp->peer; port->pp.curr_speed = pp->curr_speed; port->pp.max_speed = pp->max_speed; connmgr_send_port_status(port->ofproto->connmgr, NULL, &port->pp, OFPPR_MODIFY); } /* Update OpenFlow 'state' in 'port' and notify controller. */ void ofproto_port_set_state(struct ofport *port, enum ofputil_port_state state) { if (port->pp.state != state) { port->pp.state = state; connmgr_send_port_status(port->ofproto->connmgr, NULL, &port->pp, OFPPR_MODIFY); } } void ofproto_port_unregister(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *port = ofproto_get_port(ofproto, ofp_port); if (port) { if (port->ofproto->ofproto_class->set_stp_port) { port->ofproto->ofproto_class->set_stp_port(port, NULL); } if (port->ofproto->ofproto_class->set_rstp_port) { port->ofproto->ofproto_class->set_rstp_port(port, NULL); } if (port->ofproto->ofproto_class->set_cfm) { port->ofproto->ofproto_class->set_cfm(port, NULL); } if (port->ofproto->ofproto_class->bundle_remove) { port->ofproto->ofproto_class->bundle_remove(port); } } } static void ofport_destroy__(struct ofport *port) { struct ofproto *ofproto = port->ofproto; const char *name = netdev_get_name(port->netdev); hmap_remove(&ofproto->ports, &port->hmap_node); shash_delete(&ofproto->port_by_name, shash_find(&ofproto->port_by_name, name)); netdev_close(port->netdev); ofproto->ofproto_class->port_dealloc(port); } static void ofport_destroy(struct ofport *port, bool del) { if (port) { dealloc_ofp_port(port->ofproto, port->ofp_port); port->ofproto->ofproto_class->port_destruct(port, del); ofport_destroy__(port); } } struct ofport * ofproto_get_port(const struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *port; HMAP_FOR_EACH_IN_BUCKET (port, hmap_node, hash_ofp_port(ofp_port), &ofproto->ports) { if (port->ofp_port == ofp_port) { return port; } } return NULL; } static long long int ofport_get_usage(const struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport_usage *usage; HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), &ofproto->ofport_usage) { if (usage->ofp_port == ofp_port) { return usage->last_used; } } return 0; } static void ofport_set_usage(struct ofproto *ofproto, ofp_port_t ofp_port, long long int last_used) { struct ofport_usage *usage; HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), &ofproto->ofport_usage) { if (usage->ofp_port == ofp_port) { usage->last_used = last_used; return; } } ovs_assert(last_used == LLONG_MAX); usage = xmalloc(sizeof *usage); usage->ofp_port = ofp_port; usage->last_used = last_used; hmap_insert(&ofproto->ofport_usage, &usage->hmap_node, hash_ofp_port(ofp_port)); } static void ofport_remove_usage(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport_usage *usage; HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), &ofproto->ofport_usage) { if (usage->ofp_port == ofp_port) { hmap_remove(&ofproto->ofport_usage, &usage->hmap_node); free(usage); break; } } } int ofproto_port_get_stats(const struct ofport *port, struct netdev_stats *stats) { struct ofproto *ofproto = port->ofproto; int error; if (ofproto->ofproto_class->port_get_stats) { error = ofproto->ofproto_class->port_get_stats(port, stats); } else { error = EOPNOTSUPP; } return error; } static int update_port(struct ofproto *ofproto, const char *name) { struct ofproto_port ofproto_port; struct ofputil_phy_port pp; struct netdev *netdev; struct ofport *port; int error = 0; COVERAGE_INC(ofproto_update_port); /* Fetch 'name''s location and properties from the datapath. */ netdev = (!ofproto_port_query_by_name(ofproto, name, &ofproto_port) ? ofport_open(ofproto, &ofproto_port, &pp) : NULL); if (netdev) { port = ofproto_get_port(ofproto, ofproto_port.ofp_port); if (port && !strcmp(netdev_get_name(port->netdev), name)) { struct netdev *old_netdev = port->netdev; /* 'name' hasn't changed location. Any properties changed? */ if (!ofport_equal(&port->pp, &pp)) { ofport_modified(port, &pp); } update_mtu(ofproto, port); /* Install the newly opened netdev in case it has changed. * Don't close the old netdev yet in case port_modified has to * remove a retained reference to it.*/ port->netdev = netdev; port->change_seq = netdev_get_change_seq(netdev); if (port->ofproto->ofproto_class->port_modified) { port->ofproto->ofproto_class->port_modified(port); } netdev_close(old_netdev); } else { /* If 'port' is nonnull then its name differs from 'name' and thus * we should delete it. If we think there's a port named 'name' * then its port number must be wrong now so delete it too. */ if (port) { ofport_remove(port); } ofport_remove_with_name(ofproto, name); error = ofport_install(ofproto, netdev, &pp); } } else { /* Any port named 'name' is gone now. */ ofport_remove_with_name(ofproto, name); } ofproto_port_destroy(&ofproto_port); return error; } static int init_ports(struct ofproto *p) { struct ofproto_port_dump dump; struct ofproto_port ofproto_port; struct shash_node *node, *next; OFPROTO_PORT_FOR_EACH (&ofproto_port, &dump, p) { const char *name = ofproto_port.name; if (shash_find(&p->port_by_name, name)) { VLOG_WARN_RL(&rl, "%s: ignoring duplicate device %s in datapath", p->name, name); } else { struct ofputil_phy_port pp; struct netdev *netdev; /* Check if an OpenFlow port number had been requested. */ node = shash_find(&init_ofp_ports, name); if (node) { const struct iface_hint *iface_hint = node->data; simap_put(&p->ofp_requests, name, ofp_to_u16(iface_hint->ofp_port)); } netdev = ofport_open(p, &ofproto_port, &pp); if (netdev) { ofport_install(p, netdev, &pp); if (ofp_to_u16(ofproto_port.ofp_port) < p->max_ports) { p->alloc_port_no = MAX(p->alloc_port_no, ofp_to_u16(ofproto_port.ofp_port)); } } } } SHASH_FOR_EACH_SAFE(node, next, &init_ofp_ports) { struct iface_hint *iface_hint = node->data; if (!strcmp(iface_hint->br_name, p->name)) { free(iface_hint->br_name); free(iface_hint->br_type); free(iface_hint); shash_delete(&init_ofp_ports, node); } } return 0; } static bool ofport_is_internal_or_patch(const struct ofproto *p, const struct ofport *port) { return !strcmp(netdev_get_type(port->netdev), ofproto_port_open_type(p->type, "internal")) || !strcmp(netdev_get_type(port->netdev), ofproto_port_open_type(p->type, "patch")); } /* If 'port' is internal or patch and if the user didn't explicitly specify an * mtu through the database, we have to override it. */ static bool ofport_is_mtu_overridden(const struct ofproto *p, const struct ofport *port) { return ofport_is_internal_or_patch(p, port) && !netdev_mtu_is_user_config(port->netdev); } /* Find the minimum MTU of all non-overridden devices attached to 'p'. * Returns ETH_PAYLOAD_MAX or the minimum of the ports. */ static int find_min_mtu(struct ofproto *p) { struct ofport *ofport; int mtu = 0; HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { struct netdev *netdev = ofport->netdev; int dev_mtu; /* Skip any overridden port, since that's what we're trying to set. */ if (ofport_is_mtu_overridden(p, ofport)) { continue; } if (netdev_get_mtu(netdev, &dev_mtu)) { continue; } if (!mtu || dev_mtu < mtu) { mtu = dev_mtu; } } return mtu ? mtu: ETH_PAYLOAD_MAX; } /* Update MTU of all overridden devices on 'p' to the minimum of the * non-overridden ports in event of 'port' added or changed. */ static void update_mtu(struct ofproto *p, struct ofport *port) { struct netdev *netdev = port->netdev; int dev_mtu; if (netdev_get_mtu(netdev, &dev_mtu)) { port->mtu = 0; return; } if (ofport_is_mtu_overridden(p, port)) { if (dev_mtu > p->min_mtu) { if (!netdev_set_mtu(port->netdev, p->min_mtu)) { dev_mtu = p->min_mtu; } } port->mtu = dev_mtu; return; } port->mtu = dev_mtu; /* For non-overridden port find new min mtu. */ update_mtu_ofproto(p); } static void update_mtu_ofproto(struct ofproto *p) { struct ofport *ofport; int old_min = p->min_mtu; p->min_mtu = find_min_mtu(p); if (p->min_mtu == old_min) { return; } HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { struct netdev *netdev = ofport->netdev; if (ofport_is_mtu_overridden(p, ofport)) { if (!netdev_set_mtu(netdev, p->min_mtu)) { ofport->mtu = p->min_mtu; } } } } static void ofproto_rule_destroy__(struct rule *rule) OVS_NO_THREAD_SAFETY_ANALYSIS { cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr)); rule_actions_destroy(rule_get_actions(rule)); ovs_mutex_destroy(&rule->mutex); rule->ofproto->ofproto_class->rule_dealloc(rule); } static void rule_destroy_cb(struct rule *rule) OVS_NO_THREAD_SAFETY_ANALYSIS { /* Send rule removed if needed. */ if (rule->flags & OFPUTIL_FF_SEND_FLOW_REM && rule->removed_reason != OVS_OFPRR_NONE && !rule_is_hidden(rule)) { ofproto_rule_send_removed(rule); } rule->ofproto->ofproto_class->rule_destruct(rule); mf_vl_mff_unref(&rule->ofproto->vl_mff_map, rule->match_tlv_bitmap); mf_vl_mff_unref(&rule->ofproto->vl_mff_map, rule->ofpacts_tlv_bitmap); ofproto_rule_destroy__(rule); } void ofproto_rule_ref(struct rule *rule) { if (rule) { ovs_refcount_ref(&rule->ref_count); } } bool ofproto_rule_try_ref(struct rule *rule) { if (rule) { return ovs_refcount_try_ref_rcu(&rule->ref_count); } return false; } /* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the * ref_count reaches 0. * * Use of RCU allows short term use (between RCU quiescent periods) without * keeping a reference. A reference must be taken if the rule needs to * stay around accross the RCU quiescent periods. */ void ofproto_rule_unref(struct rule *rule) { if (rule && ovs_refcount_unref_relaxed(&rule->ref_count) == 1) { ovs_assert(rule->state != RULE_INSERTED); ovsrcu_postpone(rule_destroy_cb, rule); } } static void remove_rule_rcu__(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { struct ofproto *ofproto = rule->ofproto; struct oftable *table = &ofproto->tables[rule->table_id]; ovs_assert(!cls_rule_visible_in_version(&rule->cr, OVS_VERSION_MAX)); if (!classifier_remove(&table->cls, &rule->cr)) { OVS_NOT_REACHED(); } if (ofproto->ofproto_class->rule_delete) { ofproto->ofproto_class->rule_delete(rule); } ofproto_rule_unref(rule); } static void remove_rule_rcu(struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { ovs_mutex_lock(&ofproto_mutex); remove_rule_rcu__(rule); ovs_mutex_unlock(&ofproto_mutex); } /* Removes and deletes rules from a NULL-terminated array of rule pointers. */ static void remove_rules_rcu(struct rule **rules) OVS_EXCLUDED(ofproto_mutex) { struct rule **orig_rules = rules; if (*rules) { struct ofproto *ofproto = rules[0]->ofproto; unsigned long tables[BITMAP_N_LONGS(256)]; struct rule *rule; size_t table_id; memset(tables, 0, sizeof tables); ovs_mutex_lock(&ofproto_mutex); while ((rule = *rules++)) { /* Defer once for each new table. This defers the subtable cleanup * until later, so that when removing large number of flows the * operation is faster. */ if (!bitmap_is_set(tables, rule->table_id)) { struct classifier *cls = &ofproto->tables[rule->table_id].cls; bitmap_set1(tables, rule->table_id); classifier_defer(cls); } remove_rule_rcu__(rule); } BITMAP_FOR_EACH_1(table_id, 256, tables) { struct classifier *cls = &ofproto->tables[table_id].cls; classifier_publish(cls); } ovs_mutex_unlock(&ofproto_mutex); } free(orig_rules); } void ofproto_group_ref(struct ofgroup *group) { if (group) { ovs_refcount_ref(&group->ref_count); } } bool ofproto_group_try_ref(struct ofgroup *group) { if (group) { return ovs_refcount_try_ref_rcu(&group->ref_count); } return false; } static void group_destroy_cb(struct ofgroup *group) { group->ofproto->ofproto_class->group_destruct(group); ofputil_group_properties_destroy(CONST_CAST(struct ofputil_group_props *, &group->props)); ofputil_bucket_list_destroy(CONST_CAST(struct ovs_list *, &group->buckets)); group->ofproto->ofproto_class->group_dealloc(group); } void ofproto_group_unref(struct ofgroup *group) OVS_NO_THREAD_SAFETY_ANALYSIS { if (group && ovs_refcount_unref_relaxed(&group->ref_count) == 1) { ovs_assert(rule_collection_n(&group->rules) == 0); ovsrcu_postpone(group_destroy_cb, group); } } static void remove_group_rcu__(struct ofgroup *group) OVS_REQUIRES(ofproto_mutex) { struct ofproto *ofproto = group->ofproto; ovs_assert(!versions_visible_in_version(&group->versions, OVS_VERSION_MAX)); cmap_remove(&ofproto->groups, &group->cmap_node, hash_int(group->group_id, 0)); ofproto_group_unref(group); } static void remove_group_rcu(struct ofgroup *group) OVS_EXCLUDED(ofproto_mutex) { ovs_mutex_lock(&ofproto_mutex); remove_group_rcu__(group); ovs_mutex_unlock(&ofproto_mutex); } /* Removes and deletes groups from a NULL-terminated array of group * pointers. */ static void remove_groups_rcu(struct ofgroup **groups) OVS_EXCLUDED(ofproto_mutex) { ovs_mutex_lock(&ofproto_mutex); for (struct ofgroup **g = groups; *g; g++) { remove_group_rcu__(*g); } ovs_mutex_unlock(&ofproto_mutex); free(groups); } static uint32_t get_provider_meter_id(const struct ofproto *, uint32_t of_meter_id); /* Creates and returns a new 'struct rule_actions', whose actions are a copy * of from the 'ofpacts_len' bytes of 'ofpacts'. */ const struct rule_actions * rule_actions_create(const struct ofpact *ofpacts, size_t ofpacts_len) { struct rule_actions *actions; actions = xmalloc(sizeof *actions + ofpacts_len); actions->ofpacts_len = ofpacts_len; memcpy(actions->ofpacts, ofpacts, ofpacts_len); actions->has_meter = ofpacts_get_meter(ofpacts, ofpacts_len) != 0; actions->has_groups = (ofpact_find_type_flattened(ofpacts, OFPACT_GROUP, ofpact_end(ofpacts, ofpacts_len)) != NULL); actions->has_learn_with_delete = (next_learn_with_delete(actions, NULL) != NULL); return actions; } /* Free the actions after the RCU quiescent period is reached. */ void rule_actions_destroy(const struct rule_actions *actions) { if (actions) { ovsrcu_postpone(free, CONST_CAST(struct rule_actions *, actions)); } } /* Returns true if 'rule' has an OpenFlow OFPAT_OUTPUT or OFPAT_ENQUEUE action * that outputs to 'port' (output to OFPP_FLOOD and OFPP_ALL doesn't count). */ bool ofproto_rule_has_out_port(const struct rule *rule, ofp_port_t port) OVS_REQUIRES(ofproto_mutex) { if (port == OFPP_ANY) { return true; } else { const struct rule_actions *actions = rule_get_actions(rule); return ofpacts_output_to_port(actions->ofpacts, actions->ofpacts_len, port); } } /* Returns true if 'rule' has group and equals group_id. */ static bool ofproto_rule_has_out_group(const struct rule *rule, uint32_t group_id) OVS_REQUIRES(ofproto_mutex) { if (group_id == OFPG_ANY) { return true; } else { const struct rule_actions *actions = rule_get_actions(rule); return ofpacts_output_to_group(actions->ofpacts, actions->ofpacts_len, group_id); } } static bool rule_is_readonly(const struct rule *rule) { const struct oftable *table = &rule->ofproto->tables[rule->table_id]; return (table->flags & OFTABLE_READONLY) != 0; } static uint32_t hash_learned_cookie(ovs_be64 cookie_, uint8_t table_id) { uint64_t cookie = (OVS_FORCE uint64_t) cookie_; return hash_3words(cookie, cookie >> 32, table_id); } static void learned_cookies_update_one__(struct ofproto *ofproto, const struct ofpact_learn *learn, int delta, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { uint32_t hash = hash_learned_cookie(learn->cookie, learn->table_id); struct learned_cookie *c; HMAP_FOR_EACH_WITH_HASH (c, u.hmap_node, hash, &ofproto->learned_cookies) { if (c->cookie == learn->cookie && c->table_id == learn->table_id) { c->n += delta; ovs_assert(c->n >= 0); if (!c->n) { hmap_remove(&ofproto->learned_cookies, &c->u.hmap_node); ovs_list_push_back(dead_cookies, &c->u.list_node); } return; } } ovs_assert(delta > 0); c = xmalloc(sizeof *c); hmap_insert(&ofproto->learned_cookies, &c->u.hmap_node, hash); c->cookie = learn->cookie; c->table_id = learn->table_id; c->n = delta; } static const struct ofpact_learn * next_learn_with_delete(const struct rule_actions *actions, const struct ofpact_learn *start) { const struct ofpact *pos; for (pos = start ? ofpact_next(&start->ofpact) : actions->ofpacts; pos < ofpact_end(actions->ofpacts, actions->ofpacts_len); pos = ofpact_next(pos)) { if (pos->type == OFPACT_LEARN) { const struct ofpact_learn *learn = ofpact_get_LEARN(pos); if (learn->flags & NX_LEARN_F_DELETE_LEARNED) { return learn; } } } return NULL; } static void learned_cookies_update__(struct ofproto *ofproto, const struct rule_actions *actions, int delta, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { if (actions->has_learn_with_delete) { const struct ofpact_learn *learn; for (learn = next_learn_with_delete(actions, NULL); learn; learn = next_learn_with_delete(actions, learn)) { learned_cookies_update_one__(ofproto, learn, delta, dead_cookies); } } } static void learned_cookies_inc(struct ofproto *ofproto, const struct rule_actions *actions) OVS_REQUIRES(ofproto_mutex) { learned_cookies_update__(ofproto, actions, +1, NULL); } static void learned_cookies_dec(struct ofproto *ofproto, const struct rule_actions *actions, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { learned_cookies_update__(ofproto, actions, -1, dead_cookies); } static void learned_cookies_flush(struct ofproto *ofproto, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { struct learned_cookie *c; LIST_FOR_EACH_POP (c, u.list_node, dead_cookies) { struct rule_criteria criteria; struct rule_collection rules; struct match match; match_init_catchall(&match); rule_criteria_init(&criteria, c->table_id, &match, 0, OVS_VERSION_MAX, c->cookie, OVS_BE64_MAX, OFPP_ANY, OFPG_ANY); rule_criteria_require_rw(&criteria, false); collect_rules_loose(ofproto, &criteria, &rules); rule_criteria_destroy(&criteria); delete_flows__(&rules, OFPRR_DELETE, NULL); free(c); } } static enum ofperr handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh) { ofconn_send_reply(ofconn, make_echo_reply(oh)); return 0; } static void query_tables(struct ofproto *ofproto, struct ofputil_table_features **featuresp, struct ofputil_table_stats **statsp) { struct mf_bitmap rw_fields = oxm_writable_fields(); struct mf_bitmap match = oxm_matchable_fields(); struct mf_bitmap mask = oxm_maskable_fields(); struct ofputil_table_features *features; struct ofputil_table_stats *stats; int i; features = *featuresp = xcalloc(ofproto->n_tables, sizeof *features); for (i = 0; i < ofproto->n_tables; i++) { struct ofputil_table_features *f = &features[i]; f->table_id = i; sprintf(f->name, "table%d", i); f->metadata_match = OVS_BE64_MAX; f->metadata_write = OVS_BE64_MAX; atomic_read_relaxed(&ofproto->tables[i].miss_config, &f->miss_config); f->max_entries = 1000000; bool more_tables = false; for (int j = i + 1; j < ofproto->n_tables; j++) { if (!(ofproto->tables[j].flags & OFTABLE_HIDDEN)) { bitmap_set1(f->nonmiss.next, j); more_tables = true; } } f->nonmiss.instructions = (1u << N_OVS_INSTRUCTIONS) - 1; if (!more_tables) { f->nonmiss.instructions &= ~(1u << OVSINST_OFPIT11_GOTO_TABLE); } f->nonmiss.write.ofpacts = (UINT64_C(1) << N_OFPACTS) - 1; f->nonmiss.write.set_fields = rw_fields; f->nonmiss.apply = f->nonmiss.write; f->miss = f->nonmiss; f->match = match; f->mask = mask; f->wildcard = match; } if (statsp) { stats = *statsp = xcalloc(ofproto->n_tables, sizeof *stats); for (i = 0; i < ofproto->n_tables; i++) { struct ofputil_table_stats *s = &stats[i]; s->table_id = i; s->active_count = ofproto->tables[i].n_flows; if (i == 0) { s->active_count -= connmgr_count_hidden_rules( ofproto->connmgr); } } } else { stats = NULL; } ofproto->ofproto_class->query_tables(ofproto, features, stats); for (i = 0; i < ofproto->n_tables; i++) { const struct oftable *table = &ofproto->tables[i]; struct ofputil_table_features *f = &features[i]; if (table->name) { ovs_strzcpy(f->name, table->name, sizeof f->name); } if (table->max_flows < f->max_entries) { f->max_entries = table->max_flows; } } } static void query_switch_features(struct ofproto *ofproto, bool *arp_match_ip, uint64_t *ofpacts) { struct ofputil_table_features *features, *f; *arp_match_ip = false; *ofpacts = 0; query_tables(ofproto, &features, NULL); for (f = features; f < &features[ofproto->n_tables]; f++) { *ofpacts |= f->nonmiss.apply.ofpacts | f->miss.apply.ofpacts; if (bitmap_is_set(f->match.bm, MFF_ARP_SPA) || bitmap_is_set(f->match.bm, MFF_ARP_TPA)) { *arp_match_ip = true; } } free(features); /* Sanity check. */ ovs_assert(*ofpacts & (UINT64_C(1) << OFPACT_OUTPUT)); } static enum ofperr handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_switch_features features; struct ofport *port; bool arp_match_ip; struct ofpbuf *b; query_switch_features(ofproto, &arp_match_ip, &features.ofpacts); features.datapath_id = ofproto->datapath_id; features.n_buffers = 0; features.n_tables = ofproto_get_n_visible_tables(ofproto); features.capabilities = (OFPUTIL_C_FLOW_STATS | OFPUTIL_C_TABLE_STATS | OFPUTIL_C_PORT_STATS | OFPUTIL_C_QUEUE_STATS | OFPUTIL_C_GROUP_STATS | OFPUTIL_C_BUNDLES); if (arp_match_ip) { features.capabilities |= OFPUTIL_C_ARP_MATCH_IP; } /* FIXME: Fill in proper features.auxiliary_id for auxiliary connections */ features.auxiliary_id = 0; b = ofputil_encode_switch_features(&features, ofconn_get_protocol(ofconn), oh->xid); HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { ofputil_put_switch_features_port(&port->pp, b); } ofconn_send_reply(ofconn, b); return 0; } static enum ofperr handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_switch_config config; config.frag = ofconn_get_ofproto(ofconn)->frag_handling; config.invalid_ttl_to_controller = ofconn_get_invalid_ttl_to_controller(ofconn); config.miss_send_len = ofconn_get_miss_send_len(ofconn); ofconn_send_reply(ofconn, ofputil_encode_get_config_reply(oh, &config)); return 0; } static enum ofperr handle_set_config(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_switch_config config; enum ofperr error; error = ofputil_decode_set_config(oh, &config); if (error) { return error; } if (ofconn_get_type(ofconn) != OFCONN_PRIMARY || ofconn_get_role(ofconn) != OFPCR12_ROLE_SLAVE) { enum ofputil_frag_handling cur = ofproto->frag_handling; enum ofputil_frag_handling next = config.frag; if (cur != next) { if (ofproto->ofproto_class->set_frag_handling(ofproto, next)) { ofproto->frag_handling = next; } else { VLOG_WARN_RL(&rl, "%s: unsupported fragment handling mode %s", ofproto->name, ofputil_frag_handling_to_string(next)); } } } if (config.invalid_ttl_to_controller >= 0) { ofconn_set_invalid_ttl_to_controller(ofconn, config.invalid_ttl_to_controller); } ofconn_set_miss_send_len(ofconn, config.miss_send_len); return 0; } /* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow * error message code for the caller to propagate upward. Otherwise, returns * 0. * * The log message mentions 'msg_type'. */ static enum ofperr reject_slave_controller(struct ofconn *ofconn) { if (ofconn_get_type(ofconn) == OFCONN_PRIMARY && ofconn_get_role(ofconn) == OFPCR12_ROLE_SLAVE) { return OFPERR_OFPBRC_IS_SLAVE; } else { return 0; } } /* Checks that the 'ofpacts_len' bytes of action in 'ofpacts' are appropriate * for 'ofproto': * * - If they use a meter, then 'ofproto' has that meter configured. * * - If they use any groups, then 'ofproto' has that group configured. * * Returns 0 if successful, otherwise an OpenFlow error. Caller must hold * 'ofproto_mutex' for the result to be valid also after this function * returns. */ enum ofperr ofproto_check_ofpacts(struct ofproto *ofproto, const struct ofpact ofpacts[], size_t ofpacts_len) OVS_REQUIRES(ofproto_mutex) { uint32_t mid; mid = ofpacts_get_meter(ofpacts, ofpacts_len); if (mid && get_provider_meter_id(ofproto, mid) == UINT32_MAX) { return OFPERR_OFPMMFC_INVALID_METER; } const struct ofpact_group *a; OFPACT_FOR_EACH_TYPE_FLATTENED (a, GROUP, ofpacts, ofpacts_len) { if (!ofproto_group_exists(ofproto, a->group_id)) { return OFPERR_OFPBAC_BAD_OUT_GROUP; } } return 0; } void ofproto_packet_out_uninit(struct ofproto_packet_out *opo) { dp_packet_delete(opo->packet); opo->packet = NULL; free(opo->flow); opo->flow = NULL; free(opo->ofpacts); opo->ofpacts = NULL; opo->ofpacts_len = 0; ovs_assert(!opo->aux); } /* Takes ownership of po->ofpacts, which must have been malloc'ed. */ static enum ofperr ofproto_packet_out_init(struct ofproto *ofproto, struct ofconn *ofconn, struct ofproto_packet_out *opo, const struct ofputil_packet_out *po) { enum ofperr error; if (ofp_to_u16(po->in_port) >= ofproto->max_ports && ofp_to_u16(po->in_port) < ofp_to_u16(OFPP_MAX)) { return OFPERR_OFPBRC_BAD_PORT; } /* Get payload. */ if (po->buffer_id != UINT32_MAX) { return OFPERR_OFPBRC_BUFFER_UNKNOWN; } /* Ensure that the L3 header is 32-bit aligned. */ opo->packet = dp_packet_clone_data_with_headroom(po->packet, po->packet_len, 2); /* Store struct flow. */ opo->flow = xmalloc(sizeof *opo->flow); flow_extract(opo->packet, opo->flow); opo->flow->in_port.ofp_port = po->in_port; /* Check actions like for flow mods. We pass a 'table_id' of 0 to * ofproto_check_consistency(), which isn't strictly correct because these * actions aren't in any table. This is OK as 'table_id' is only used to * check instructions (e.g., goto-table), which can't appear on the action * list of a packet-out. */ error = ofpacts_check_consistency(po->ofpacts, po->ofpacts_len, opo->flow, u16_to_ofp(ofproto->max_ports), 0, ofproto->n_tables, ofconn_get_protocol(ofconn)); if (error) { dp_packet_delete(opo->packet); free(opo->flow); return error; } opo->ofpacts = po->ofpacts; opo->ofpacts_len = po->ofpacts_len; opo->aux = NULL; return 0; } static enum ofperr ofproto_packet_out_start(struct ofproto *ofproto, struct ofproto_packet_out *opo) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; error = ofproto_check_ofpacts(ofproto, opo->ofpacts, opo->ofpacts_len); if (error) { return error; } return ofproto->ofproto_class->packet_xlate(ofproto, opo); } static void ofproto_packet_out_revert(struct ofproto *ofproto, struct ofproto_packet_out *opo) OVS_REQUIRES(ofproto_mutex) { ofproto->ofproto_class->packet_xlate_revert(ofproto, opo); } static void ofproto_packet_out_finish(struct ofproto *ofproto, struct ofproto_packet_out *opo) OVS_REQUIRES(ofproto_mutex) { ofproto->ofproto_class->packet_execute(ofproto, opo); } static enum ofperr handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofputil_packet_out po; struct ofproto_packet_out opo; uint64_t ofpacts_stub[1024 / 8]; struct ofpbuf ofpacts; enum ofperr error; COVERAGE_INC(ofproto_packet_out); error = reject_slave_controller(ofconn); if (error) { return error; } /* Decode message. */ ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); error = ofputil_decode_packet_out(&po, oh, &ofpacts); if (error) { ofpbuf_uninit(&ofpacts); return error; } po.ofpacts = ofpbuf_steal_data(&ofpacts); /* Move to heap. */ error = ofproto_packet_out_init(p, ofconn, &opo, &po); if (error) { free(po.ofpacts); return error; } ovs_mutex_lock(&ofproto_mutex); opo.version = p->tables_version; error = ofproto_packet_out_start(p, &opo); if (!error) { ofproto_packet_out_finish(p, &opo); } ovs_mutex_unlock(&ofproto_mutex); ofproto_packet_out_uninit(&opo); return error; } static enum ofperr handle_nxt_resume(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_packet_in_private pin; enum ofperr error; error = ofputil_decode_packet_in_private(oh, false, ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map, &pin, NULL, NULL); if (error) { return error; } error = (ofproto->ofproto_class->nxt_resume ? ofproto->ofproto_class->nxt_resume(ofproto, &pin) : OFPERR_NXR_NOT_SUPPORTED); ofputil_packet_in_private_destroy(&pin); return error; } static void update_port_config(struct ofconn *ofconn, struct ofport *port, enum ofputil_port_config config, enum ofputil_port_config mask) { enum ofputil_port_config toggle = (config ^ port->pp.config) & mask; if (toggle & OFPUTIL_PC_PORT_DOWN && (config & OFPUTIL_PC_PORT_DOWN ? netdev_turn_flags_off(port->netdev, NETDEV_UP, NULL) : netdev_turn_flags_on(port->netdev, NETDEV_UP, NULL))) { /* We tried to bring the port up or down, but it failed, so don't * update the "down" bit. */ toggle &= ~OFPUTIL_PC_PORT_DOWN; } if (toggle) { enum ofputil_port_config old_config = port->pp.config; port->pp.config ^= toggle; port->ofproto->ofproto_class->port_reconfigured(port, old_config); connmgr_send_port_status(port->ofproto->connmgr, ofconn, &port->pp, OFPPR_MODIFY); } } static enum ofperr port_mod_start(struct ofconn *ofconn, struct ofputil_port_mod *pm, struct ofport **port) { struct ofproto *p = ofconn_get_ofproto(ofconn); *port = ofproto_get_port(p, pm->port_no); if (!*port) { return OFPERR_OFPPMFC_BAD_PORT; } if (!eth_addr_equals((*port)->pp.hw_addr, pm->hw_addr)) { return OFPERR_OFPPMFC_BAD_HW_ADDR; } return 0; } static void port_mod_finish(struct ofconn *ofconn, struct ofputil_port_mod *pm, struct ofport *port) { update_port_config(ofconn, port, pm->config, pm->mask); if (pm->advertise) { netdev_set_advertisements(port->netdev, pm->advertise); } } static enum ofperr handle_port_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_port_mod pm; struct ofport *port; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_port_mod(oh, &pm, false); if (error) { return error; } error = port_mod_start(ofconn, &pm, &port); if (!error) { port_mod_finish(ofconn, &pm, port); } return error; } static enum ofperr handle_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { static const char *default_mfr_desc = "Nicira, Inc."; static const char *default_hw_desc = "Open vSwitch"; static const char *default_sw_desc = VERSION; static const char *default_serial_desc = "None"; static const char *default_dp_desc = "None"; struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofp_desc_stats *ods; struct ofpbuf *msg; msg = ofpraw_alloc_stats_reply(request, 0); ods = ofpbuf_put_zeros(msg, sizeof *ods); ovs_strlcpy(ods->mfr_desc, p->mfr_desc ? p->mfr_desc : default_mfr_desc, sizeof ods->mfr_desc); ovs_strlcpy(ods->hw_desc, p->hw_desc ? p->hw_desc : default_hw_desc, sizeof ods->hw_desc); ovs_strlcpy(ods->sw_desc, p->sw_desc ? p->sw_desc : default_sw_desc, sizeof ods->sw_desc); ovs_strlcpy(ods->serial_num, p->serial_desc ? p->serial_desc : default_serial_desc, sizeof ods->serial_num); ovs_strlcpy(ods->dp_desc, p->dp_desc ? p->dp_desc : default_dp_desc, sizeof ods->dp_desc); ofconn_send_reply(ofconn, msg); return 0; } static enum ofperr handle_table_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_table_features *features; struct ofputil_table_stats *stats; struct ofpbuf *reply; size_t i; query_tables(ofproto, &features, &stats); reply = ofputil_encode_table_stats_reply(request); for (i = 0; i < ofproto->n_tables; i++) { if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) { ofputil_append_table_stats_reply(reply, &stats[i], &features[i]); } } ofconn_send_reply(ofconn, reply); free(features); free(stats); return 0; } static enum ofperr handle_table_features_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf msg = ofpbuf_const_initializer(request, ntohs(request->length)); ofpraw_pull_assert(&msg); if (msg.size || ofpmp_more(request)) { return OFPERR_OFPTFFC_EPERM; } struct ofputil_table_features *features; query_tables(ofproto, &features, NULL); struct ovs_list replies; ofpmp_init(&replies, request); for (size_t i = 0; i < ofproto->n_tables; i++) { if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) { ofputil_append_table_features_reply(&features[i], &replies); } } ofconn_send_replies(ofconn, &replies); free(features); return 0; } /* Returns the vacancy of 'oftable', a number that ranges from 0 (if the table * is full) to 100 (if the table is empty). * * A table without a limit on flows is considered to be empty. */ static uint8_t oftable_vacancy(const struct oftable *t) { return (!t->max_flows ? 100 : t->n_flows >= t->max_flows ? 0 : (t->max_flows - t->n_flows) * 100.0 / t->max_flows); } static void query_table_desc__(struct ofputil_table_desc *td, struct ofproto *ofproto, uint8_t table_id) { const struct oftable *t = &ofproto->tables[table_id]; td->table_id = table_id; td->eviction = (t->eviction & EVICTION_OPENFLOW ? OFPUTIL_TABLE_EVICTION_ON : OFPUTIL_TABLE_EVICTION_OFF); td->eviction_flags = OFPROTO_EVICTION_FLAGS; td->vacancy = (t->vacancy_event ? OFPUTIL_TABLE_VACANCY_ON : OFPUTIL_TABLE_VACANCY_OFF); td->table_vacancy.vacancy_down = t->vacancy_down; td->table_vacancy.vacancy_up = t->vacancy_up; td->table_vacancy.vacancy = oftable_vacancy(t); } /* This function queries the database for dumping table-desc. */ static void query_tables_desc(struct ofproto *ofproto, struct ofputil_table_desc **descp) { struct ofputil_table_desc *table_desc; size_t i; table_desc = *descp = xcalloc(ofproto->n_tables, sizeof *table_desc); for (i = 0; i < ofproto->n_tables; i++) { struct ofputil_table_desc *td = &table_desc[i]; query_table_desc__(td, ofproto, i); } } /* Function to handle dump-table-desc request. */ static enum ofperr handle_table_desc_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_table_desc *table_desc; struct ovs_list replies; size_t i; query_tables_desc(ofproto, &table_desc); ofpmp_init(&replies, request); for (i = 0; i < ofproto->n_tables; i++) { if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) { ofputil_append_table_desc_reply(&table_desc[i], &replies, request->version); } } ofconn_send_replies(ofconn, &replies); free(table_desc); return 0; } /* This function determines and sends the vacancy event, based on the value * of current vacancy and threshold vacancy. If the current vacancy is less * than or equal to vacancy_down, vacancy up events must be enabled, and when * the current vacancy is greater or equal to vacancy_up, vacancy down events * must be enabled. */ static void send_table_status(struct ofproto *ofproto, uint8_t table_id) { struct oftable *t = &ofproto->tables[table_id]; if (!t->vacancy_event) { return; } uint8_t vacancy = oftable_vacancy(t); enum ofp14_table_reason event; if (vacancy < t->vacancy_down) { event = OFPTR_VACANCY_DOWN; } else if (vacancy > t->vacancy_up) { event = OFPTR_VACANCY_UP; } else { return; } if (event == t->vacancy_event) { struct ofputil_table_desc td; query_table_desc__(&td, ofproto, table_id); connmgr_send_table_status(ofproto->connmgr, &td, event); t->vacancy_event = (event == OFPTR_VACANCY_DOWN ? OFPTR_VACANCY_UP : OFPTR_VACANCY_DOWN); } } static void append_port_stat(struct ofport *port, struct ovs_list *replies) { struct ofputil_port_stats ops = { .port_no = port->pp.port_no }; calc_duration(port->created, time_msec(), &ops.duration_sec, &ops.duration_nsec); /* Intentionally ignore return value, since errors will set * 'stats' to all-1s, which is correct for OpenFlow, and * netdev_get_stats() will log errors. */ ofproto_port_get_stats(port, &ops.stats); ofputil_append_port_stat(replies, &ops); } static void handle_port_request(struct ofconn *ofconn, const struct ofp_header *request, ofp_port_t port_no, void (*cb)(struct ofport *, struct ovs_list *replies)) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofport *port; struct ovs_list replies; ofpmp_init(&replies, request); if (port_no != OFPP_ANY) { port = ofproto_get_port(ofproto, port_no); if (port) { cb(port, &replies); } } else { HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { cb(port, &replies); } } ofconn_send_replies(ofconn, &replies); } static enum ofperr handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { ofp_port_t port_no; enum ofperr error; error = ofputil_decode_port_stats_request(request, &port_no); if (!error) { handle_port_request(ofconn, request, port_no, append_port_stat); } return error; } static void append_port_desc(struct ofport *port, struct ovs_list *replies) { ofputil_append_port_desc_stats_reply(&port->pp, replies); } static enum ofperr handle_port_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { ofp_port_t port_no; enum ofperr error; error = ofputil_decode_port_desc_stats_request(request, &port_no); if (!error) { handle_port_request(ofconn, request, port_no, append_port_desc); } return error; } static uint32_t hash_cookie(ovs_be64 cookie) { return hash_uint64((OVS_FORCE uint64_t)cookie); } static void cookies_insert(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { hindex_insert(&ofproto->cookies, &rule->cookie_node, hash_cookie(rule->flow_cookie)); } static void cookies_remove(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { hindex_remove(&ofproto->cookies, &rule->cookie_node); } static void calc_duration(long long int start, long long int now, uint32_t *sec, uint32_t *nsec) { long long int msecs = now - start; *sec = msecs / 1000; *nsec = (msecs % 1000) * (1000 * 1000); } /* Checks whether 'table_id' is 0xff or a valid table ID in 'ofproto'. Returns * true if 'table_id' is OK, false otherwise. */ static bool check_table_id(const struct ofproto *ofproto, uint8_t table_id) { return table_id == OFPTT_ALL || table_id < ofproto->n_tables; } static struct oftable * next_visible_table(const struct ofproto *ofproto, uint8_t table_id) { struct oftable *table; for (table = &ofproto->tables[table_id]; table < &ofproto->tables[ofproto->n_tables]; table++) { if (!(table->flags & OFTABLE_HIDDEN)) { return table; } } return NULL; } static struct oftable * first_matching_table(const struct ofproto *ofproto, uint8_t table_id) { if (table_id == 0xff) { return next_visible_table(ofproto, 0); } else if (table_id < ofproto->n_tables) { return &ofproto->tables[table_id]; } else { return NULL; } } static struct oftable * next_matching_table(const struct ofproto *ofproto, const struct oftable *table, uint8_t table_id) { return (table_id == 0xff ? next_visible_table(ofproto, (table - ofproto->tables) + 1) : NULL); } /* Assigns TABLE to each oftable, in turn, that matches TABLE_ID in OFPROTO: * * - If TABLE_ID is 0xff, this iterates over every classifier table in * OFPROTO, skipping tables marked OFTABLE_HIDDEN. * * - If TABLE_ID is the number of a table in OFPROTO, then the loop iterates * only once, for that table. (This can be used to access tables marked * OFTABLE_HIDDEN.) * * - Otherwise, TABLE_ID isn't valid for OFPROTO, so the loop won't be * entered at all. (Perhaps you should have validated TABLE_ID with * check_table_id().) * * All parameters are evaluated multiple times. */ #define FOR_EACH_MATCHING_TABLE(TABLE, TABLE_ID, OFPROTO) \ for ((TABLE) = first_matching_table(OFPROTO, TABLE_ID); \ (TABLE) != NULL; \ (TABLE) = next_matching_table(OFPROTO, TABLE, TABLE_ID)) /* Initializes 'criteria' in a straightforward way based on the other * parameters. * * By default, the criteria include flows that are read-only, on the assumption * that the collected flows won't be modified. Call rule_criteria_require_rw() * if flows will be modified. * * For "loose" matching, the 'priority' parameter is unimportant and may be * supplied as 0. */ static void rule_criteria_init(struct rule_criteria *criteria, uint8_t table_id, const struct match *match, int priority, ovs_version_t version, ovs_be64 cookie, ovs_be64 cookie_mask, ofp_port_t out_port, uint32_t out_group) { criteria->table_id = table_id; cls_rule_init(&criteria->cr, match, priority); criteria->version = version; criteria->cookie = cookie; criteria->cookie_mask = cookie_mask; criteria->out_port = out_port; criteria->out_group = out_group; /* We ordinarily want to skip hidden rules, but there has to be a way for * code internal to OVS to modify and delete them, so if the criteria * specify a priority that can only be for a hidden flow, then allow hidden * rules to be selected. (This doesn't allow OpenFlow clients to meddle * with hidden flows because OpenFlow uses only a 16-bit field to specify * priority.) */ criteria->include_hidden = priority > UINT16_MAX; /* We assume that the criteria are being used to collect flows for reading * but not modification. Thus, we should collect read-only flows. */ criteria->include_readonly = true; } /* By default, criteria initialized by rule_criteria_init() will match flows * that are read-only, on the assumption that the collected flows won't be * modified. Call this function to match only flows that are be modifiable. * * Specify 'can_write_readonly' as false in ordinary circumstances, true if the * caller has special privileges that allow it to modify even "read-only" * flows. */ static void rule_criteria_require_rw(struct rule_criteria *criteria, bool can_write_readonly) { criteria->include_readonly = can_write_readonly; } static void rule_criteria_destroy(struct rule_criteria *criteria) { cls_rule_destroy(&criteria->cr); criteria->version = OVS_VERSION_NOT_REMOVED; /* Mark as destroyed. */ } /* Schedules postponed removal of rules, destroys 'rules'. */ static void remove_rules_postponed(struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { if (rule_collection_n(rules) > 0) { if (rule_collection_n(rules) == 1) { ovsrcu_postpone(remove_rule_rcu, rule_collection_rules(rules)[0]); rule_collection_init(rules); } else { ovsrcu_postpone(remove_rules_rcu, rule_collection_detach(rules)); } } } /* Schedules postponed removal of groups, destroys 'groups'. */ static void remove_groups_postponed(struct group_collection *groups) OVS_REQUIRES(ofproto_mutex) { if (group_collection_n(groups) > 0) { if (group_collection_n(groups) == 1) { ovsrcu_postpone(remove_group_rcu, group_collection_groups(groups)[0]); group_collection_init(groups); } else { ovsrcu_postpone(remove_groups_rcu, group_collection_detach(groups)); } } } /* Checks whether 'rule' matches 'c' and, if so, adds it to 'rules'. This * function verifies most of the criteria in 'c' itself, but the caller must * check 'c->cr' itself. * * Rules that have already been marked for removal are not collected. * * Increments '*n_readonly' if 'rule' wasn't added because it's read-only (and * 'c' only includes modifiable rules). */ static void collect_rule(struct rule *rule, const struct rule_criteria *c, struct rule_collection *rules, size_t *n_readonly) OVS_REQUIRES(ofproto_mutex) { if ((c->table_id == rule->table_id || c->table_id == 0xff) && ofproto_rule_has_out_port(rule, c->out_port) && ofproto_rule_has_out_group(rule, c->out_group) && !((rule->flow_cookie ^ c->cookie) & c->cookie_mask) && (!rule_is_hidden(rule) || c->include_hidden) && cls_rule_visible_in_version(&rule->cr, c->version)) { /* Rule matches all the criteria... */ if (!rule_is_readonly(rule) || c->include_readonly) { /* ...add it. */ rule_collection_add(rules, rule); } else { /* ...except it's read-only. */ ++*n_readonly; } } } /* Searches 'ofproto' for rules that match the criteria in 'criteria'. Matches * on classifiers rules are done in the "loose" way required for OpenFlow * OFPFC_MODIFY and OFPFC_DELETE requests. Puts the selected rules on list * 'rules'. * * Returns 0 on success, otherwise an OpenFlow error code. */ static enum ofperr collect_rules_loose(struct ofproto *ofproto, const struct rule_criteria *criteria, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct oftable *table; enum ofperr error = 0; size_t n_readonly = 0; rule_collection_init(rules); if (!check_table_id(ofproto, criteria->table_id)) { error = OFPERR_OFPBRC_BAD_TABLE_ID; goto exit; } if (criteria->cookie_mask == OVS_BE64_MAX) { struct rule *rule; HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node, hash_cookie(criteria->cookie), &ofproto->cookies) { if (cls_rule_is_loose_match(&rule->cr, &criteria->cr.match)) { collect_rule(rule, criteria, rules, &n_readonly); } } } else { FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) { struct rule *rule; CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &criteria->cr, criteria->version) { collect_rule(rule, criteria, rules, &n_readonly); } } } exit: if (!error && !rule_collection_n(rules) && n_readonly) { /* We didn't find any rules to modify. We did find some read-only * rules that we're not allowed to modify, so report that. */ error = OFPERR_OFPBRC_EPERM; } if (error) { rule_collection_destroy(rules); } return error; } /* Searches 'ofproto' for rules that match the criteria in 'criteria'. Matches * on classifiers rules are done in the "strict" way required for OpenFlow * OFPFC_MODIFY_STRICT and OFPFC_DELETE_STRICT requests. Puts the selected * rules on list 'rules'. * * Returns 0 on success, otherwise an OpenFlow error code. */ static enum ofperr collect_rules_strict(struct ofproto *ofproto, const struct rule_criteria *criteria, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct oftable *table; size_t n_readonly = 0; enum ofperr error = 0; rule_collection_init(rules); if (!check_table_id(ofproto, criteria->table_id)) { error = OFPERR_OFPBRC_BAD_TABLE_ID; goto exit; } if (criteria->cookie_mask == OVS_BE64_MAX) { struct rule *rule; HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node, hash_cookie(criteria->cookie), &ofproto->cookies) { if (cls_rule_equal(&rule->cr, &criteria->cr)) { collect_rule(rule, criteria, rules, &n_readonly); } } } else { FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) { struct rule *rule; rule = rule_from_cls_rule(classifier_find_rule_exactly( &table->cls, &criteria->cr, criteria->version)); if (rule) { collect_rule(rule, criteria, rules, &n_readonly); } } } exit: if (!error && !rule_collection_n(rules) && n_readonly) { /* We didn't find any rules to modify. We did find some read-only * rules that we're not allowed to modify, so report that. */ error = OFPERR_OFPBRC_EPERM; } if (error) { rule_collection_destroy(rules); } return error; } /* Returns 'age_ms' (a duration in milliseconds), converted to seconds and * forced into the range of a uint16_t. */ static int age_secs(long long int age_ms) { return (age_ms < 0 ? 0 : age_ms >= UINT16_MAX * 1000 ? UINT16_MAX : (unsigned int) age_ms / 1000); } static enum ofperr handle_flow_stats_request(struct ofconn *ofconn, const struct ofp_header *request) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_flow_stats_request fsr; struct rule_criteria criteria; struct rule_collection rules; struct ovs_list replies; enum ofperr error; error = ofputil_decode_flow_stats_request(&fsr, request, ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map); if (error) { return error; } rule_criteria_init(&criteria, fsr.table_id, &fsr.match, 0, OVS_VERSION_MAX, fsr.cookie, fsr.cookie_mask, fsr.out_port, fsr.out_group); ovs_mutex_lock(&ofproto_mutex); error = collect_rules_loose(ofproto, &criteria, &rules); rule_criteria_destroy(&criteria); if (!error) { rule_collection_ref(&rules); } ovs_mutex_unlock(&ofproto_mutex); if (error) { return error; } ofpmp_init(&replies, request); struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, &rules) { long long int now = time_msec(); struct ofputil_flow_stats fs; long long int created, used, modified; const struct rule_actions *actions; enum ofputil_flow_mod_flags flags; ovs_mutex_lock(&rule->mutex); fs.cookie = rule->flow_cookie; fs.idle_timeout = rule->idle_timeout; fs.hard_timeout = rule->hard_timeout; fs.importance = rule->importance; created = rule->created; modified = rule->modified; actions = rule_get_actions(rule); flags = rule->flags; ovs_mutex_unlock(&rule->mutex); ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count, &fs.byte_count, &used); minimatch_expand(&rule->cr.match, &fs.match); fs.table_id = rule->table_id; calc_duration(created, now, &fs.duration_sec, &fs.duration_nsec); fs.priority = rule->cr.priority; fs.idle_age = age_secs(now - used); fs.hard_age = age_secs(now - modified); fs.ofpacts = actions->ofpacts; fs.ofpacts_len = actions->ofpacts_len; fs.flags = flags; ofputil_append_flow_stats_reply(&fs, &replies, ofproto_get_tun_tab(ofproto)); } rule_collection_unref(&rules); rule_collection_destroy(&rules); ofconn_send_replies(ofconn, &replies); return 0; } static void flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) { uint64_t packet_count, byte_count; const struct rule_actions *actions; long long int created, used; rule->ofproto->ofproto_class->rule_get_stats(rule, &packet_count, &byte_count, &used); ovs_mutex_lock(&rule->mutex); actions = rule_get_actions(rule); created = rule->created; ovs_mutex_unlock(&rule->mutex); if (rule->table_id != 0) { ds_put_format(results, "table_id=%"PRIu8", ", rule->table_id); } ds_put_format(results, "duration=%llds, ", (time_msec() - created) / 1000); ds_put_format(results, "n_packets=%"PRIu64", ", packet_count); ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count); cls_rule_format(&rule->cr, ofproto_get_tun_tab(ofproto), results); ds_put_char(results, ','); ds_put_cstr(results, "actions="); ofpacts_format(actions->ofpacts, actions->ofpacts_len, results); ds_put_cstr(results, "\n"); } /* Adds a pretty-printed description of all flows to 'results', including * hidden flows (e.g., set up by in-band control). */ void ofproto_get_all_flows(struct ofproto *p, struct ds *results) { struct oftable *table; OFPROTO_FOR_EACH_TABLE (table, p) { struct rule *rule; CLS_FOR_EACH (rule, cr, &table->cls) { flow_stats_ds(p, rule, results); } } } /* Obtains the NetFlow engine type and engine ID for 'ofproto' into * '*engine_type' and '*engine_id', respectively. */ void ofproto_get_netflow_ids(const struct ofproto *ofproto, uint8_t *engine_type, uint8_t *engine_id) { ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id); } /* Checks the status change of CFM on 'ofport'. * * Returns true if 'ofproto_class' does not support 'cfm_status_changed'. */ bool ofproto_port_cfm_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->cfm_status_changed ? ofproto->ofproto_class->cfm_status_changed(ofport) : true); } /* Checks the status of CFM configured on 'ofp_port' within 'ofproto'. * Returns 0 if the port's CFM status was successfully stored into * '*status'. Returns positive errno if the port did not have CFM * configured. * * The caller must provide and own '*status', and must free 'status->rmps'. * '*status' is indeterminate if the return value is non-zero. */ int ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port, struct cfm_status *status) { struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); return (ofport && ofproto->ofproto_class->get_cfm_status ? ofproto->ofproto_class->get_cfm_status(ofport, status) : EOPNOTSUPP); } static enum ofperr handle_aggregate_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_flow_stats_request request; struct ofputil_aggregate_stats stats; bool unknown_packets, unknown_bytes; struct rule_criteria criteria; struct rule_collection rules; struct ofpbuf *reply; enum ofperr error; error = ofputil_decode_flow_stats_request(&request, oh, ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map); if (error) { return error; } rule_criteria_init(&criteria, request.table_id, &request.match, 0, OVS_VERSION_MAX, request.cookie, request.cookie_mask, request.out_port, request.out_group); ovs_mutex_lock(&ofproto_mutex); error = collect_rules_loose(ofproto, &criteria, &rules); rule_criteria_destroy(&criteria); if (!error) { rule_collection_ref(&rules); } ovs_mutex_unlock(&ofproto_mutex); if (error) { return error; } memset(&stats, 0, sizeof stats); unknown_packets = unknown_bytes = false; struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, &rules) { uint64_t packet_count; uint64_t byte_count; long long int used; ofproto->ofproto_class->rule_get_stats(rule, &packet_count, &byte_count, &used); if (packet_count == UINT64_MAX) { unknown_packets = true; } else { stats.packet_count += packet_count; } if (byte_count == UINT64_MAX) { unknown_bytes = true; } else { stats.byte_count += byte_count; } stats.flow_count++; } if (unknown_packets) { stats.packet_count = UINT64_MAX; } if (unknown_bytes) { stats.byte_count = UINT64_MAX; } rule_collection_unref(&rules); rule_collection_destroy(&rules); reply = ofputil_encode_aggregate_stats_reply(&stats, oh); ofconn_send_reply(ofconn, reply); return 0; } struct queue_stats_cbdata { struct ofport *ofport; struct ovs_list replies; long long int now; }; static void put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id, const struct netdev_queue_stats *stats) { struct ofputil_queue_stats oqs; oqs.port_no = cbdata->ofport->pp.port_no; oqs.queue_id = queue_id; oqs.tx_bytes = stats->tx_bytes; oqs.tx_packets = stats->tx_packets; oqs.tx_errors = stats->tx_errors; if (stats->created != LLONG_MIN) { calc_duration(stats->created, cbdata->now, &oqs.duration_sec, &oqs.duration_nsec); } else { oqs.duration_sec = oqs.duration_nsec = UINT32_MAX; } ofputil_append_queue_stat(&cbdata->replies, &oqs); } static void handle_queue_stats_dump_cb(uint32_t queue_id, struct netdev_queue_stats *stats, void *cbdata_) { struct queue_stats_cbdata *cbdata = cbdata_; put_queue_stats(cbdata, queue_id, stats); } static enum ofperr handle_queue_stats_for_port(struct ofport *port, uint32_t queue_id, struct queue_stats_cbdata *cbdata) { cbdata->ofport = port; if (queue_id == OFPQ_ALL) { netdev_dump_queue_stats(port->netdev, handle_queue_stats_dump_cb, cbdata); } else { struct netdev_queue_stats stats; if (!netdev_get_queue_stats(port->netdev, queue_id, &stats)) { put_queue_stats(cbdata, queue_id, &stats); } else { return OFPERR_OFPQOFC_BAD_QUEUE; } } return 0; } static enum ofperr handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *rq) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct queue_stats_cbdata cbdata; struct ofport *port; enum ofperr error; struct ofputil_queue_stats_request oqsr; COVERAGE_INC(ofproto_queue_req); ofpmp_init(&cbdata.replies, rq); cbdata.now = time_msec(); error = ofputil_decode_queue_stats_request(rq, &oqsr); if (error) { return error; } if (oqsr.port_no == OFPP_ANY) { error = OFPERR_OFPQOFC_BAD_QUEUE; HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { if (!handle_queue_stats_for_port(port, oqsr.queue_id, &cbdata)) { error = 0; } } } else { port = ofproto_get_port(ofproto, oqsr.port_no); error = (port ? handle_queue_stats_for_port(port, oqsr.queue_id, &cbdata) : OFPERR_OFPQOFC_BAD_PORT); } if (!error) { ofconn_send_replies(ofconn, &cbdata.replies); } else { ofpbuf_list_delete(&cbdata.replies); } return error; } static enum ofperr evict_rules_from_table(struct oftable *table) OVS_REQUIRES(ofproto_mutex) { enum ofperr error = 0; struct rule_collection rules; unsigned int count = table->n_flows; unsigned int max_flows = table->max_flows; rule_collection_init(&rules); while (count-- > max_flows) { struct rule *rule; if (!choose_rule_to_evict(table, &rule)) { error = OFPERR_OFPFMFC_TABLE_FULL; break; } else { eviction_group_remove_rule(rule); rule_collection_add(&rules, rule); } } delete_flows__(&rules, OFPRR_EVICTION, NULL); return error; } static void get_conjunctions(const struct ofputil_flow_mod *fm, struct cls_conjunction **conjsp, size_t *n_conjsp) { struct cls_conjunction *conjs = NULL; int n_conjs = 0; const struct ofpact *ofpact; OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) { if (ofpact->type == OFPACT_CONJUNCTION) { n_conjs++; } else if (ofpact->type != OFPACT_NOTE) { /* "conjunction" may appear with "note" actions but not with any * other type of actions. */ ovs_assert(!n_conjs); break; } } if (n_conjs) { int i = 0; conjs = xzalloc(n_conjs * sizeof *conjs); OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) { if (ofpact->type == OFPACT_CONJUNCTION) { struct ofpact_conjunction *oc = ofpact_get_CONJUNCTION(ofpact); conjs[i].clause = oc->clause; conjs[i].n_clauses = oc->n_clauses; conjs[i].id = oc->id; i++; } } } *conjsp = conjs; *n_conjsp = n_conjs; } /* add_flow_init(), add_flow_start(), add_flow_revert(), and add_flow_finish() * implement OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT * in which no matching flow already exists in the flow table. * * add_flow_init() creates a new flow according to 'fm' and stores it to 'ofm' * for later reference. If the flow replaces other flow, it will be updated to * match modify semantics later by add_flow_start() (by calling * replace_rule_start()). * * Returns 0 on success, or an OpenFlow error code on failure. * * On successful return the caller must complete the operation by calling * add_flow_start(), and if that succeeds, then either add_flow_finish(), or * add_flow_revert() if the operation needs to be reverted due to a later * failure. */ static enum ofperr add_flow_init(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { struct oftable *table; struct cls_rule cr; uint8_t table_id; enum ofperr error; if (!check_table_id(ofproto, fm->table_id)) { return OFPERR_OFPBRC_BAD_TABLE_ID; } /* Pick table. */ if (fm->table_id == 0xff) { if (ofproto->ofproto_class->rule_choose_table) { error = ofproto->ofproto_class->rule_choose_table(ofproto, &fm->match, &table_id); if (error) { return error; } ovs_assert(table_id < ofproto->n_tables); } else { table_id = 0; } } else if (fm->table_id < ofproto->n_tables) { table_id = fm->table_id; } else { return OFPERR_OFPBRC_BAD_TABLE_ID; } table = &ofproto->tables[table_id]; if (table->flags & OFTABLE_READONLY && !(fm->flags & OFPUTIL_FF_NO_READONLY)) { return OFPERR_OFPBRC_EPERM; } if (!(fm->flags & OFPUTIL_FF_HIDDEN_FIELDS) && !match_has_default_hidden_fields(&fm->match)) { VLOG_WARN_RL(&rl, "%s: (add_flow) only internal flows can set " "non-default values to hidden fields", ofproto->name); return OFPERR_OFPBRC_EPERM; } if (!ofm->temp_rule) { cls_rule_init(&cr, &fm->match, fm->priority); /* Allocate new rule. Destroys 'cr'. */ error = ofproto_rule_create(ofproto, &cr, table - ofproto->tables, fm->new_cookie, fm->idle_timeout, fm->hard_timeout, fm->flags, fm->importance, fm->ofpacts, fm->ofpacts_len, fm->match.flow.tunnel.metadata.present.map, fm->ofpacts_tlv_bitmap, &ofm->temp_rule); if (error) { return error; } get_conjunctions(fm, &ofm->conjs, &ofm->n_conjs); } return 0; } /* ofm->temp_rule is consumed only in the successful case. */ static enum ofperr add_flow_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *old_rule = NULL; struct rule *new_rule = ofm->temp_rule; const struct rule_actions *actions = rule_get_actions(new_rule); struct oftable *table = &ofproto->tables[new_rule->table_id]; enum ofperr error; /* Must check actions while holding ofproto_mutex to avoid a race. */ error = ofproto_check_ofpacts(ofproto, actions->ofpacts, actions->ofpacts_len); if (error) { return error; } /* Check for the existence of an identical rule. * This will not return rules earlier marked for removal. */ old_rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &new_rule->cr, ofm->version)); if (!old_rule) { /* Check for overlap, if requested. */ if (new_rule->flags & OFPUTIL_FF_CHECK_OVERLAP && classifier_rule_overlaps(&table->cls, &new_rule->cr, ofm->version)) { return OFPERR_OFPFMFC_OVERLAP; } /* If necessary, evict an existing rule to clear out space. */ if (table->n_flows >= table->max_flows) { if (!choose_rule_to_evict(table, &old_rule)) { return OFPERR_OFPFMFC_TABLE_FULL; } eviction_group_remove_rule(old_rule); /* Marks 'old_rule' as an evicted rule rather than replaced rule. */ old_rule->removed_reason = OFPRR_EVICTION; } } else { ofm->modify_cookie = true; } if (old_rule) { rule_collection_add(&ofm->old_rules, old_rule); } /* Take ownership of the temp_rule. */ rule_collection_add(&ofm->new_rules, new_rule); ofm->temp_rule = NULL; replace_rule_start(ofproto, ofm, old_rule, new_rule); return 0; } /* Revert the effects of add_flow_start(). */ static void add_flow_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *old_rule = rule_collection_n(&ofm->old_rules) ? rule_collection_rules(&ofm->old_rules)[0] : NULL; struct rule *new_rule = rule_collection_rules(&ofm->new_rules)[0]; replace_rule_revert(ofproto, old_rule, new_rule); } /* To be called after version bump. */ static void add_flow_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { struct rule *old_rule = rule_collection_n(&ofm->old_rules) ? rule_collection_rules(&ofm->old_rules)[0] : NULL; struct rule *new_rule = rule_collection_rules(&ofm->new_rules)[0]; struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies); replace_rule_finish(ofproto, ofm, req, old_rule, new_rule, &dead_cookies); learned_cookies_flush(ofproto, &dead_cookies); if (old_rule) { ovsrcu_postpone(remove_rule_rcu, old_rule); } else { ofmonitor_report(ofproto->connmgr, new_rule, NXFME_ADDED, 0, req ? req->ofconn : NULL, req ? req->request->xid : 0, NULL); /* Send Vacancy Events for OF1.4+. */ send_table_status(ofproto, new_rule->table_id); } } /* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */ /* Create a new rule. Note that the rule is NOT inserted into a any data * structures yet. Takes ownership of 'cr'. Only assigns '*new_rule' if * successful. */ static enum ofperr ofproto_rule_create(struct ofproto *ofproto, struct cls_rule *cr, uint8_t table_id, ovs_be64 new_cookie, uint16_t idle_timeout, uint16_t hard_timeout, enum ofputil_flow_mod_flags flags, uint16_t importance, const struct ofpact *ofpacts, size_t ofpacts_len, uint64_t match_tlv_bitmap, uint64_t ofpacts_tlv_bitmap, struct rule **new_rule) OVS_NO_THREAD_SAFETY_ANALYSIS { struct rule *rule; enum ofperr error; /* Allocate new rule. */ rule = ofproto->ofproto_class->rule_alloc(); if (!rule) { cls_rule_destroy(cr); VLOG_WARN_RL(&rl, "%s: failed to allocate a rule.", ofproto->name); return OFPERR_OFPFMFC_UNKNOWN; } /* Initialize base state. */ *CONST_CAST(struct ofproto **, &rule->ofproto) = ofproto; cls_rule_move(CONST_CAST(struct cls_rule *, &rule->cr), cr); ovs_refcount_init(&rule->ref_count); ovs_mutex_init(&rule->mutex); ovs_mutex_lock(&rule->mutex); *CONST_CAST(ovs_be64 *, &rule->flow_cookie) = new_cookie; rule->created = rule->modified = time_msec(); rule->idle_timeout = idle_timeout; rule->hard_timeout = hard_timeout; *CONST_CAST(uint16_t *, &rule->importance) = importance; rule->removed_reason = OVS_OFPRR_NONE; *CONST_CAST(uint8_t *, &rule->table_id) = table_id; rule->flags = flags & OFPUTIL_FF_STATE; *CONST_CAST(const struct rule_actions **, &rule->actions) = rule_actions_create(ofpacts, ofpacts_len); ovs_list_init(&rule->meter_list_node); rule->eviction_group = NULL; rule->monitor_flags = 0; rule->add_seqno = 0; rule->modify_seqno = 0; ovs_list_init(&rule->expirable); ovs_mutex_unlock(&rule->mutex); /* Construct rule, initializing derived state. */ error = ofproto->ofproto_class->rule_construct(rule); if (error) { ofproto_rule_destroy__(rule); return error; } rule->state = RULE_INITIALIZED; rule->match_tlv_bitmap = match_tlv_bitmap; rule->ofpacts_tlv_bitmap = ofpacts_tlv_bitmap; mf_vl_mff_ref(&rule->ofproto->vl_mff_map, match_tlv_bitmap); mf_vl_mff_ref(&rule->ofproto->vl_mff_map, ofpacts_tlv_bitmap); *new_rule = rule; return 0; } /* Initialize 'ofm' for a learn action. If the rule already existed, reference * to that rule is taken, otherwise a new rule is created. 'ofm' keeps the * rule reference in both. This does not take the global 'ofproto_mutex'. */ enum ofperr ofproto_flow_mod_init_for_learn(struct ofproto *ofproto, const struct ofputil_flow_mod *fm, struct ofproto_flow_mod *ofm) OVS_EXCLUDED(ofproto_mutex) { /* Reject flow mods that do not look like they were generated by a learn * action. */ if (fm->command != OFPFC_MODIFY_STRICT || fm->table_id == OFPTT_ALL || fm->flags & OFPUTIL_FF_RESET_COUNTS || fm->buffer_id != UINT32_MAX) { return OFPERR_OFPFMFC_UNKNOWN; } /* Check if the rule already exists, and we can get a reference to it. */ struct oftable *table = &ofproto->tables[fm->table_id]; struct rule *rule; rule = rule_from_cls_rule(classifier_find_match_exactly( &table->cls, &fm->match, fm->priority, OVS_VERSION_MAX)); if (rule) { /* Check if the rule's attributes match as well. */ const struct rule_actions *actions; ovs_mutex_lock(&rule->mutex); actions = rule_get_actions(rule); if (rule->idle_timeout == fm->idle_timeout && rule->hard_timeout == fm->hard_timeout && rule->importance == fm->importance && rule->flags == (fm->flags & OFPUTIL_FF_STATE) && (!fm->modify_cookie || (fm->new_cookie == rule->flow_cookie)) && ofpacts_equal(fm->ofpacts, fm->ofpacts_len, actions->ofpacts, actions->ofpacts_len)) { /* Rule already exists and need not change, except for the modified * timestamp. Get a reference to the existing rule. */ ovs_mutex_unlock(&rule->mutex); if (!ofproto_rule_try_ref(rule)) { rule = NULL; /* Pretend it did not exist. */ } } else { ovs_mutex_unlock(&rule->mutex); rule = NULL; } } return ofproto_flow_mod_init(ofproto, ofm, fm, rule); } enum ofperr ofproto_flow_mod_learn_refresh(struct ofproto_flow_mod *ofm) { enum ofperr error = 0; /* ofm->temp_rule is our reference to the learned rule. We have a * reference to an existing rule, if it already was in the classifier, * otherwise we may have a fresh rule that we need to insert. */ struct rule *rule = ofm->temp_rule; if (!rule) { return OFPERR_OFPFMFC_UNKNOWN; } /* Create a new rule if the current one has been removed from the * classifier. We need to do this since RCU does not allow a current rule * to be reinserted before all threads have quiesced. * * It is possible that the rule is removed asynchronously, e.g., right * after we have read the 'rule->state' below. In this case the next time * this function is executed the rule will be reinstated. */ if (rule->state == RULE_REMOVED) { struct cls_rule cr; cls_rule_clone(&cr, &rule->cr); ovs_mutex_lock(&rule->mutex); error = ofproto_rule_create(rule->ofproto, &cr, rule->table_id, rule->flow_cookie, rule->idle_timeout, rule->hard_timeout, rule->flags, rule->importance, rule->actions->ofpacts, rule->actions->ofpacts_len, rule->match_tlv_bitmap, rule->ofpacts_tlv_bitmap, &ofm->temp_rule); ovs_mutex_unlock(&rule->mutex); if (!error) { ofproto_rule_unref(rule); /* Release old reference. */ } } else { /* Refresh the existing rule. */ ovs_mutex_lock(&rule->mutex); rule->modified = time_msec(); ovs_mutex_unlock(&rule->mutex); } return error; } enum ofperr ofproto_flow_mod_learn_start(struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *rule = ofm->temp_rule; /* ofproto_flow_mod_start() consumes the reference, so we * take a new one. */ ofproto_rule_ref(rule); enum ofperr error = ofproto_flow_mod_start(rule->ofproto, ofm); ofm->temp_rule = rule; return error; } void ofproto_flow_mod_learn_revert(struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule *rule = rule_collection_rules(&ofm->new_rules)[0]; ofproto_flow_mod_revert(rule->ofproto, ofm); } void ofproto_flow_mod_learn_finish(struct ofproto_flow_mod *ofm, struct ofproto *orig_ofproto) OVS_REQUIRES(ofproto_mutex) { struct rule *rule = rule_collection_rules(&ofm->new_rules)[0]; /* If learning on a different bridge, must bump its version * number and flush connmgr afterwards. */ if (rule->ofproto != orig_ofproto) { ofproto_bump_tables_version(rule->ofproto); } ofproto_flow_mod_finish(rule->ofproto, ofm, NULL); if (rule->ofproto != orig_ofproto) { ofmonitor_flush(rule->ofproto->connmgr); } } /* Refresh 'ofm->temp_rule', for which the caller holds a reference, if already * in the classifier, insert it otherwise. If the rule has already been * removed from the classifier, a new rule is created using 'ofm->temp_rule' as * a template and the reference to the old 'ofm->temp_rule' is freed. If * 'keep_ref' is true, then a reference to the current rule is held, otherwise * it is released and 'ofm->temp_rule' is set to NULL. * * Caller needs to be the exclusive owner of 'ofm' as it is being manipulated * during the call. */ enum ofperr ofproto_flow_mod_learn(struct ofproto_flow_mod *ofm, bool keep_ref) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error = ofproto_flow_mod_learn_refresh(ofm); struct rule *rule = ofm->temp_rule; /* Do we need to insert the rule? */ if (!error && rule->state == RULE_INITIALIZED) { ovs_mutex_lock(&ofproto_mutex); ofm->version = rule->ofproto->tables_version + 1; error = ofproto_flow_mod_learn_start(ofm); if (!error) { ofproto_flow_mod_learn_finish(ofm, NULL); } ovs_mutex_unlock(&ofproto_mutex); } if (!keep_ref) { ofproto_rule_unref(rule); ofm->temp_rule = NULL; } return error; } static void replace_rule_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, struct rule *old_rule, struct rule *new_rule) { struct oftable *table = &ofproto->tables[new_rule->table_id]; /* 'old_rule' may be either an evicted rule or replaced rule. */ if (old_rule) { /* Copy values from old rule for modify semantics. */ if (old_rule->removed_reason != OFPRR_EVICTION) { bool change_cookie = (ofm->modify_cookie && new_rule->flow_cookie != OVS_BE64_MAX && new_rule->flow_cookie != old_rule->flow_cookie); ovs_mutex_lock(&new_rule->mutex); ovs_mutex_lock(&old_rule->mutex); if (ofm->command != OFPFC_ADD) { new_rule->idle_timeout = old_rule->idle_timeout; new_rule->hard_timeout = old_rule->hard_timeout; *CONST_CAST(uint16_t *, &new_rule->importance) = old_rule->importance; new_rule->flags = old_rule->flags; new_rule->created = old_rule->created; } if (!change_cookie) { *CONST_CAST(ovs_be64 *, &new_rule->flow_cookie) = old_rule->flow_cookie; } ovs_mutex_unlock(&old_rule->mutex); ovs_mutex_unlock(&new_rule->mutex); } /* Mark the old rule for removal in the next version. */ cls_rule_make_invisible_in_version(&old_rule->cr, ofm->version); /* Remove the old rule from data structures. */ ofproto_rule_remove__(ofproto, old_rule); } else { table->n_flows++; } /* Insert flow to ofproto data structures, so that later flow_mods may * relate to it. This is reversible, in case later errors require this to * be reverted. */ ofproto_rule_insert__(ofproto, new_rule); /* Make the new rule visible for classifier lookups only from the next * version. */ classifier_insert(&table->cls, &new_rule->cr, ofm->version, ofm->conjs, ofm->n_conjs); } static void replace_rule_revert(struct ofproto *ofproto, struct rule *old_rule, struct rule *new_rule) { struct oftable *table = &ofproto->tables[new_rule->table_id]; if (old_rule) { if (old_rule->removed_reason == OFPRR_EVICTION) { /* Revert the eviction. */ eviction_group_add_rule(old_rule); } /* Restore the old rule to data structures. */ ofproto_rule_insert__(ofproto, old_rule); /* Restore the original visibility of the old rule. */ cls_rule_restore_visibility(&old_rule->cr); } else { /* Restore table's rule count. */ table->n_flows--; } /* Remove the new rule immediately. It was never visible to lookups. */ if (!classifier_remove(&table->cls, &new_rule->cr)) { OVS_NOT_REACHED(); } ofproto_rule_remove__(ofproto, new_rule); ofproto_rule_unref(new_rule); } /* Adds the 'new_rule', replacing the 'old_rule'. */ static void replace_rule_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req, struct rule *old_rule, struct rule *new_rule, struct ovs_list *dead_cookies) OVS_REQUIRES(ofproto_mutex) { struct rule *replaced_rule; replaced_rule = (old_rule && old_rule->removed_reason != OFPRR_EVICTION) ? old_rule : NULL; /* Insert the new flow to the ofproto provider. A non-NULL 'replaced_rule' * is a duplicate rule the 'new_rule' is replacing. The provider should * link the packet and byte counts from the old rule to the new one if * 'modify_keep_counts' is 'true'. The 'replaced_rule' will be deleted * right after this call. */ ofproto->ofproto_class->rule_insert(new_rule, replaced_rule, ofm->modify_keep_counts); learned_cookies_inc(ofproto, rule_get_actions(new_rule)); if (old_rule) { const struct rule_actions *old_actions = rule_get_actions(old_rule); const struct rule_actions *new_actions = rule_get_actions(new_rule); learned_cookies_dec(ofproto, old_actions, dead_cookies); if (replaced_rule) { enum nx_flow_update_event event = ofm->command == OFPFC_ADD ? NXFME_ADDED : NXFME_MODIFIED; bool changed_cookie = (new_rule->flow_cookie != old_rule->flow_cookie); bool changed_actions = !ofpacts_equal(new_actions->ofpacts, new_actions->ofpacts_len, old_actions->ofpacts, old_actions->ofpacts_len); if (event != NXFME_MODIFIED || changed_actions || changed_cookie) { ofmonitor_report(ofproto->connmgr, new_rule, event, 0, req ? req->ofconn : NULL, req ? req->request->xid : 0, changed_actions ? old_actions : NULL); } } else { /* XXX: This is slight duplication with delete_flows_finish__() */ ofmonitor_report(ofproto->connmgr, old_rule, NXFME_DELETED, OFPRR_EVICTION, req ? req->ofconn : NULL, req ? req->request->xid : 0, NULL); } } } /* ofm->temp_rule is consumed only in the successful case. */ static enum ofperr modify_flows_start__(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; struct rule_collection *new_rules = &ofm->new_rules; enum ofperr error; if (rule_collection_n(old_rules) > 0) { /* Create a new 'modified' rule for each old rule. */ struct rule *old_rule, *new_rule; const struct rule_actions *actions = rule_get_actions(ofm->temp_rule); /* Must check actions while holding ofproto_mutex to avoid a race. */ error = ofproto_check_ofpacts(ofproto, actions->ofpacts, actions->ofpacts_len); if (error) { return error; } /* Use the temp rule as the first new rule, and as the template for * the rest. */ struct rule *temp = ofm->temp_rule; ofm->temp_rule = NULL; /* We consume the template. */ bool first = true; RULE_COLLECTION_FOR_EACH (old_rule, old_rules) { if (first) { /* The template rule's match is possibly a loose one, so it * must be replaced with the old rule's match so that the new * rule actually replaces the old one. */ cls_rule_destroy(CONST_CAST(struct cls_rule *, &temp->cr)); cls_rule_clone(CONST_CAST(struct cls_rule *, &temp->cr), &old_rule->cr); if (temp->match_tlv_bitmap != old_rule->match_tlv_bitmap) { mf_vl_mff_unref(&temp->ofproto->vl_mff_map, temp->match_tlv_bitmap); temp->match_tlv_bitmap = old_rule->match_tlv_bitmap; mf_vl_mff_ref(&temp->ofproto->vl_mff_map, temp->match_tlv_bitmap); } *CONST_CAST(uint8_t *, &temp->table_id) = old_rule->table_id; rule_collection_add(new_rules, temp); first = false; } else { struct cls_rule cr; cls_rule_clone(&cr, &old_rule->cr); error = ofproto_rule_create(ofproto, &cr, old_rule->table_id, temp->flow_cookie, temp->idle_timeout, temp->hard_timeout, temp->flags, temp->importance, temp->actions->ofpacts, temp->actions->ofpacts_len, old_rule->match_tlv_bitmap, temp->ofpacts_tlv_bitmap, &new_rule); if (!error) { rule_collection_add(new_rules, new_rule); } else { /* Return the template rule in place in the error case. */ ofm->temp_rule = temp; rule_collection_rules(new_rules)[0] = NULL; rule_collection_unref(new_rules); rule_collection_destroy(new_rules); return error; } } } ovs_assert(rule_collection_n(new_rules) == rule_collection_n(old_rules)); RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) { replace_rule_start(ofproto, ofm, old_rule, new_rule); } } else if (ofm->modify_may_add_flow) { /* No match, add a new flow, consumes 'temp'. */ error = add_flow_start(ofproto, ofm); } else { /* No flow to modify and may not add a flow. */ ofproto_rule_unref(ofm->temp_rule); ofm->temp_rule = NULL; /* We consume the template. */ error = 0; } return error; } static enum ofperr modify_flows_init_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, 0, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, OFPP_ANY, OFPG_ANY); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); /* Must create a new flow in advance for the case that no matches are * found. Also used for template for multiple modified flows. */ add_flow_init(ofproto, ofm, fm); return 0; } /* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code on * failure. */ static enum ofperr modify_flows_start_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; enum ofperr error; error = collect_rules_loose(ofproto, &ofm->criteria, old_rules); if (!error) { error = modify_flows_start__(ofproto, ofm); } if (error) { rule_collection_destroy(old_rules); } return error; } static void modify_flows_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; struct rule_collection *new_rules = &ofm->new_rules; /* Old rules were not changed yet, only need to revert new rules. */ if (rule_collection_n(old_rules) > 0) { struct rule *old_rule, *new_rule; RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) { replace_rule_revert(ofproto, old_rule, new_rule); } rule_collection_destroy(new_rules); rule_collection_destroy(old_rules); } } static void modify_flows_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; struct rule_collection *new_rules = &ofm->new_rules; if (rule_collection_n(old_rules) == 0 && rule_collection_n(new_rules) == 1) { add_flow_finish(ofproto, ofm, req); } else if (rule_collection_n(old_rules) > 0) { struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies); ovs_assert(rule_collection_n(new_rules) == rule_collection_n(old_rules)); struct rule *old_rule, *new_rule; RULE_COLLECTIONS_FOR_EACH (old_rule, new_rule, old_rules, new_rules) { replace_rule_finish(ofproto, ofm, req, old_rule, new_rule, &dead_cookies); } learned_cookies_flush(ofproto, &dead_cookies); remove_rules_postponed(old_rules); } } static enum ofperr modify_flow_init_strict(struct ofproto *ofproto OVS_UNUSED, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, fm->priority, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, OFPP_ANY, OFPG_ANY); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); /* Must create a new flow in advance for the case that no matches are * found. Also used for template for multiple modified flows. */ add_flow_init(ofproto, ofm, fm); return 0; } /* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error * code on failure. */ static enum ofperr modify_flow_start_strict(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *old_rules = &ofm->old_rules; enum ofperr error; error = collect_rules_strict(ofproto, &ofm->criteria, old_rules); if (!error) { /* collect_rules_strict() can return max 1 rule. */ error = modify_flows_start__(ofproto, ofm); } return error; } /* OFPFC_DELETE implementation. */ static void delete_flows_start__(struct ofproto *ofproto, ovs_version_t version, const struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { struct oftable *table = &ofproto->tables[rule->table_id]; table->n_flows--; cls_rule_make_invisible_in_version(&rule->cr, version); /* Remove rule from ofproto data structures. */ ofproto_rule_remove__(ofproto, rule); } } static void delete_flows_revert__(struct ofproto *ofproto, const struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { struct oftable *table = &ofproto->tables[rule->table_id]; /* Add rule back to ofproto data structures. */ ofproto_rule_insert__(ofproto, rule); /* Restore table's rule count. */ table->n_flows++; /* Restore the original visibility of the rule. */ cls_rule_restore_visibility(&rule->cr); } } static void delete_flows_finish__(struct ofproto *ofproto, struct rule_collection *rules, enum ofp_flow_removed_reason reason, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { if (rule_collection_n(rules)) { struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies); struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { /* This value will be used to send the flow removed message right * before the rule is actually destroyed. */ rule->removed_reason = reason; ofmonitor_report(ofproto->connmgr, rule, NXFME_DELETED, reason, req ? req->ofconn : NULL, req ? req->request->xid : 0, NULL); /* Send Vacancy Event for OF1.4+. */ send_table_status(ofproto, rule->table_id); learned_cookies_dec(ofproto, rule_get_actions(rule), &dead_cookies); } remove_rules_postponed(rules); learned_cookies_flush(ofproto, &dead_cookies); } } /* Deletes the rules listed in 'rules'. * The deleted rules will become invisible to the lookups in the next version. * Destroys 'rules'. */ static void delete_flows__(struct rule_collection *rules, enum ofp_flow_removed_reason reason, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { if (rule_collection_n(rules)) { struct ofproto *ofproto = rule_collection_rules(rules)[0]->ofproto; delete_flows_start__(ofproto, ofproto->tables_version + 1, rules); ofproto_bump_tables_version(ofproto); delete_flows_finish__(ofproto, rules, reason, req); ofmonitor_flush(ofproto->connmgr); } } static enum ofperr delete_flows_init_loose(struct ofproto *ofproto OVS_UNUSED, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, 0, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, fm->out_port, fm->out_group); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); return 0; } /* Implements OFPFC_DELETE. */ static enum ofperr delete_flows_start_loose(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *rules = &ofm->old_rules; enum ofperr error; error = collect_rules_loose(ofproto, &ofm->criteria, rules); if (!error) { delete_flows_start__(ofproto, ofm->version, rules); } return error; } static void delete_flows_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { delete_flows_revert__(ofproto, &ofm->old_rules); } static void delete_flows_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { delete_flows_finish__(ofproto, &ofm->old_rules, OFPRR_DELETE, req); } static enum ofperr delete_flows_init_strict(struct ofproto *ofproto OVS_UNUSED, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm) OVS_EXCLUDED(ofproto_mutex) { rule_criteria_init(&ofm->criteria, fm->table_id, &fm->match, fm->priority, OVS_VERSION_MAX, fm->cookie, fm->cookie_mask, fm->out_port, fm->out_group); rule_criteria_require_rw(&ofm->criteria, (fm->flags & OFPUTIL_FF_NO_READONLY) != 0); return 0; } /* Implements OFPFC_DELETE_STRICT. */ static enum ofperr delete_flow_start_strict(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { struct rule_collection *rules = &ofm->old_rules; enum ofperr error; error = collect_rules_strict(ofproto, &ofm->criteria, rules); if (!error) { delete_flows_start__(ofproto, ofm->version, rules); } return error; } /* This may only be called by rule_destroy_cb()! */ static void ofproto_rule_send_removed(struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { struct ofputil_flow_removed fr; long long int used; minimatch_expand(&rule->cr.match, &fr.match); fr.priority = rule->cr.priority; /* Synchronize with connmgr_destroy() calls to prevent connmgr disappearing * while we use it. */ ovs_mutex_lock(&ofproto_mutex); struct connmgr *connmgr = rule->ofproto->connmgr; if (!connmgr) { ovs_mutex_unlock(&ofproto_mutex); return; } fr.cookie = rule->flow_cookie; fr.reason = rule->removed_reason; fr.table_id = rule->table_id; calc_duration(rule->created, time_msec(), &fr.duration_sec, &fr.duration_nsec); ovs_mutex_lock(&rule->mutex); fr.idle_timeout = rule->idle_timeout; fr.hard_timeout = rule->hard_timeout; ovs_mutex_unlock(&rule->mutex); rule->ofproto->ofproto_class->rule_get_stats(rule, &fr.packet_count, &fr.byte_count, &used); connmgr_send_flow_removed(connmgr, &fr); ovs_mutex_unlock(&ofproto_mutex); } /* Sends an OpenFlow "flow removed" message with the given 'reason' (either * OFPRR_HARD_TIMEOUT or OFPRR_IDLE_TIMEOUT), and then removes 'rule' from its * ofproto. * * ofproto implementation ->run() functions should use this function to expire * OpenFlow flows. */ void ofproto_rule_expire(struct rule *rule, uint8_t reason) OVS_REQUIRES(ofproto_mutex) { struct rule_collection rules; rule_collection_init(&rules); rule_collection_add(&rules, rule); delete_flows__(&rules, reason, NULL); } /* Reduces '*timeout' to no more than 'max'. A value of zero in either case * means "infinite". */ static void reduce_timeout(uint16_t max, uint16_t *timeout) { if (max && (!*timeout || *timeout > max)) { *timeout = max; } } /* If 'idle_timeout' is nonzero, and 'rule' has no idle timeout or an idle * timeout greater than 'idle_timeout', lowers 'rule''s idle timeout to * 'idle_timeout' seconds. Similarly for 'hard_timeout'. * * Suitable for implementing OFPACT_FIN_TIMEOUT. */ void ofproto_rule_reduce_timeouts__(struct rule *rule, uint16_t idle_timeout, uint16_t hard_timeout) OVS_REQUIRES(ofproto_mutex) OVS_EXCLUDED(rule->mutex) { if (!idle_timeout && !hard_timeout) { return; } if (ovs_list_is_empty(&rule->expirable)) { ovs_list_insert(&rule->ofproto->expirable, &rule->expirable); } ovs_mutex_lock(&rule->mutex); reduce_timeout(idle_timeout, &rule->idle_timeout); reduce_timeout(hard_timeout, &rule->hard_timeout); ovs_mutex_unlock(&rule->mutex); } void ofproto_rule_reduce_timeouts(struct rule *rule, uint16_t idle_timeout, uint16_t hard_timeout) OVS_EXCLUDED(ofproto_mutex, rule->mutex) { if (!idle_timeout && !hard_timeout) { return; } ovs_mutex_lock(&ofproto_mutex); if (ovs_list_is_empty(&rule->expirable)) { ovs_list_insert(&rule->ofproto->expirable, &rule->expirable); } ovs_mutex_unlock(&ofproto_mutex); ovs_mutex_lock(&rule->mutex); reduce_timeout(idle_timeout, &rule->idle_timeout); reduce_timeout(hard_timeout, &rule->hard_timeout); ovs_mutex_unlock(&rule->mutex); } static enum ofperr handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_flow_mod fm; uint64_t ofpacts_stub[1024 / 8]; struct ofpbuf ofpacts; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_protocol(ofconn), ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map, &ofpacts, u16_to_ofp(ofproto->max_ports), ofproto->n_tables); if (!error) { struct openflow_mod_requester req = { ofconn, oh }; error = handle_flow_mod__(ofproto, &fm, &req); } ofpbuf_uninit(&ofpacts); return error; } static enum ofperr handle_flow_mod__(struct ofproto *ofproto, const struct ofputil_flow_mod *fm, const struct openflow_mod_requester *req) OVS_EXCLUDED(ofproto_mutex) { struct ofproto_flow_mod ofm; enum ofperr error; error = ofproto_flow_mod_init(ofproto, &ofm, fm, NULL); if (error) { return error; } ovs_mutex_lock(&ofproto_mutex); ofm.version = ofproto->tables_version + 1; error = ofproto_flow_mod_start(ofproto, &ofm); if (!error) { ofproto_bump_tables_version(ofproto); ofproto_flow_mod_finish(ofproto, &ofm, req); ofmonitor_flush(ofproto->connmgr); } ovs_mutex_unlock(&ofproto_mutex); return error; } static enum ofperr handle_role_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_role_request request; struct ofputil_role_request reply; struct ofpbuf *buf; enum ofperr error; error = ofputil_decode_role_message(oh, &request); if (error) { return error; } if (request.role != OFPCR12_ROLE_NOCHANGE) { if (request.role != OFPCR12_ROLE_EQUAL && request.have_generation_id && !ofconn_set_master_election_id(ofconn, request.generation_id)) { return OFPERR_OFPRRFC_STALE; } ofconn_set_role(ofconn, request.role); } reply.role = ofconn_get_role(ofconn); reply.have_generation_id = ofconn_get_master_election_id( ofconn, &reply.generation_id); buf = ofputil_encode_role_reply(oh, &reply); ofconn_send_reply(ofconn, buf); return 0; } static enum ofperr handle_nxt_flow_mod_table_id(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_flow_mod_table_id *msg = ofpmsg_body(oh); enum ofputil_protocol cur, next; cur = ofconn_get_protocol(ofconn); next = ofputil_protocol_set_tid(cur, msg->set != 0); ofconn_set_protocol(ofconn, next); return 0; } static enum ofperr handle_nxt_set_flow_format(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_set_flow_format *msg = ofpmsg_body(oh); enum ofputil_protocol cur, next; enum ofputil_protocol next_base; next_base = ofputil_nx_flow_format_to_protocol(ntohl(msg->format)); if (!next_base) { return OFPERR_OFPBRC_EPERM; } cur = ofconn_get_protocol(ofconn); next = ofputil_protocol_set_base(cur, next_base); ofconn_set_protocol(ofconn, next); return 0; } static enum ofperr handle_nxt_set_packet_in_format(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_set_packet_in_format *msg = ofpmsg_body(oh); uint32_t format; format = ntohl(msg->format); if (!ofputil_packet_in_format_is_valid(format)) { return OFPERR_OFPBRC_EPERM; } ofconn_set_packet_in_format(ofconn, format); return 0; } static enum ofperr handle_nxt_set_async_config(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_async_cfg basis = ofconn_get_async_config(ofconn); struct ofputil_async_cfg ac; enum ofperr error; error = ofputil_decode_set_async_config(oh, false, &basis, &ac); if (error) { return error; } ofconn_set_async_config(ofconn, &ac); if (ofconn_get_type(ofconn) == OFCONN_SERVICE && !ofconn_get_miss_send_len(ofconn)) { ofconn_set_miss_send_len(ofconn, OFP_DEFAULT_MISS_SEND_LEN); } return 0; } static enum ofperr handle_nxt_get_async_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn); ofconn_send_reply(ofconn, ofputil_encode_get_async_reply(oh, &ac)); return 0; } static enum ofperr handle_nxt_set_controller_id(struct ofconn *ofconn, const struct ofp_header *oh) { const struct nx_controller_id *nci = ofpmsg_body(oh); if (!is_all_zeros(nci->zero, sizeof nci->zero)) { return OFPERR_NXBRC_MUST_BE_ZERO; } ofconn_set_controller_id(ofconn, ntohs(nci->controller_id)); return 0; } static enum ofperr handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofpbuf *buf; buf = ofpraw_alloc_reply((oh->version == OFP10_VERSION ? OFPRAW_OFPT10_BARRIER_REPLY : OFPRAW_OFPT11_BARRIER_REPLY), oh, 0); ofconn_send_reply(ofconn, buf); return 0; } static void ofproto_compose_flow_refresh_update(const struct rule *rule, enum nx_flow_monitor_flags flags, struct ovs_list *msgs, const struct tun_table *tun_table) OVS_REQUIRES(ofproto_mutex) { const struct rule_actions *actions; struct ofputil_flow_update fu; fu.event = (flags & (NXFMF_INITIAL | NXFMF_ADD) ? NXFME_ADDED : NXFME_MODIFIED); fu.reason = 0; ovs_mutex_lock(&rule->mutex); fu.idle_timeout = rule->idle_timeout; fu.hard_timeout = rule->hard_timeout; ovs_mutex_unlock(&rule->mutex); fu.table_id = rule->table_id; fu.cookie = rule->flow_cookie; minimatch_expand(&rule->cr.match, &fu.match); fu.priority = rule->cr.priority; actions = flags & NXFMF_ACTIONS ? rule_get_actions(rule) : NULL; fu.ofpacts = actions ? actions->ofpacts : NULL; fu.ofpacts_len = actions ? actions->ofpacts_len : 0; if (ovs_list_is_empty(msgs)) { ofputil_start_flow_update(msgs); } ofputil_append_flow_update(&fu, msgs, tun_table); } void ofmonitor_compose_refresh_updates(struct rule_collection *rules, struct ovs_list *msgs) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; RULE_COLLECTION_FOR_EACH (rule, rules) { enum nx_flow_monitor_flags flags = rule->monitor_flags; rule->monitor_flags = 0; ofproto_compose_flow_refresh_update(rule, flags, msgs, ofproto_get_tun_tab(rule->ofproto)); } } static void ofproto_collect_ofmonitor_refresh_rule(const struct ofmonitor *m, struct rule *rule, uint64_t seqno, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { enum nx_flow_monitor_flags update; if (rule_is_hidden(rule)) { return; } if (!ofproto_rule_has_out_port(rule, m->out_port)) { return; } if (seqno) { if (rule->add_seqno > seqno) { update = NXFMF_ADD | NXFMF_MODIFY; } else if (rule->modify_seqno > seqno) { update = NXFMF_MODIFY; } else { return; } if (!(m->flags & update)) { return; } } else { update = NXFMF_INITIAL; } if (!rule->monitor_flags) { rule_collection_add(rules, rule); } rule->monitor_flags |= update | (m->flags & NXFMF_ACTIONS); } static void ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m, uint64_t seqno, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { const struct ofproto *ofproto = ofconn_get_ofproto(m->ofconn); const struct oftable *table; struct cls_rule target; cls_rule_init_from_minimatch(&target, &m->match, 0); FOR_EACH_MATCHING_TABLE (table, m->table_id, ofproto) { struct rule *rule; CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &target, OVS_VERSION_MAX) { ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules); } } cls_rule_destroy(&target); } static void ofproto_collect_ofmonitor_initial_rules(struct ofmonitor *m, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { if (m->flags & NXFMF_INITIAL) { ofproto_collect_ofmonitor_refresh_rules(m, 0, rules); } } void ofmonitor_collect_resume_rules(struct ofmonitor *m, uint64_t seqno, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { ofproto_collect_ofmonitor_refresh_rules(m, seqno, rules); } static enum ofperr flow_monitor_delete(struct ofconn *ofconn, uint32_t id) OVS_REQUIRES(ofproto_mutex) { struct ofmonitor *m; enum ofperr error; m = ofmonitor_lookup(ofconn, id); if (m) { ofmonitor_destroy(m); error = 0; } else { error = OFPERR_OFPMOFC_UNKNOWN_MONITOR; } return error; } static enum ofperr handle_flow_monitor_request(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); struct ofmonitor **monitors = NULL; size_t allocated_monitors = 0; size_t n_monitors = 0; enum ofperr error; ovs_mutex_lock(&ofproto_mutex); for (;;) { struct ofputil_flow_monitor_request request; struct ofmonitor *m; int retval; retval = ofputil_decode_flow_monitor_request(&request, &b); if (retval == EOF) { break; } else if (retval) { error = retval; goto error; } if (request.table_id != 0xff && request.table_id >= ofproto->n_tables) { error = OFPERR_OFPBRC_BAD_TABLE_ID; goto error; } error = ofmonitor_create(&request, ofconn, &m); if (error) { goto error; } if (n_monitors >= allocated_monitors) { monitors = x2nrealloc(monitors, &allocated_monitors, sizeof *monitors); } monitors[n_monitors++] = m; } struct rule_collection rules; rule_collection_init(&rules); for (size_t i = 0; i < n_monitors; i++) { ofproto_collect_ofmonitor_initial_rules(monitors[i], &rules); } struct ovs_list replies; ofpmp_init(&replies, oh); ofmonitor_compose_refresh_updates(&rules, &replies); ovs_mutex_unlock(&ofproto_mutex); rule_collection_destroy(&rules); ofconn_send_replies(ofconn, &replies); free(monitors); return 0; error: for (size_t i = 0; i < n_monitors; i++) { ofmonitor_destroy(monitors[i]); } free(monitors); ovs_mutex_unlock(&ofproto_mutex); return error; } static enum ofperr handle_flow_monitor_cancel(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error; uint32_t id; id = ofputil_decode_flow_monitor_cancel(oh); ovs_mutex_lock(&ofproto_mutex); error = flow_monitor_delete(ofconn, id); ovs_mutex_unlock(&ofproto_mutex); return error; } /* Meters implementation. * * Meter table entry, indexed by the OpenFlow meter_id. * 'created' is used to compute the duration for meter stats. * 'list rules' is needed so that we can delete the dependent rules when the * meter table entry is deleted. * 'provider_meter_id' is for the provider's private use. */ struct meter { long long int created; /* Time created. */ struct ovs_list rules; /* List of "struct rule_dpif"s. */ ofproto_meter_id provider_meter_id; uint16_t flags; /* Meter flags. */ uint16_t n_bands; /* Number of meter bands. */ struct ofputil_meter_band *bands; }; /* * This is used in instruction validation at flow set-up time, * as flows may not use non-existing meters. * Return value of UINT32_MAX signifies an invalid meter. */ static uint32_t get_provider_meter_id(const struct ofproto *ofproto, uint32_t of_meter_id) { if (of_meter_id && of_meter_id <= ofproto->meter_features.max_meters) { const struct meter *meter = ofproto->meters[of_meter_id]; if (meter) { return meter->provider_meter_id.uint32; } } return UINT32_MAX; } /* Finds the meter invoked by 'rule''s actions and adds 'rule' to the meter's * list of rules. */ static void meter_insert_rule(struct rule *rule) { const struct rule_actions *a = rule_get_actions(rule); uint32_t meter_id = ofpacts_get_meter(a->ofpacts, a->ofpacts_len); struct meter *meter = rule->ofproto->meters[meter_id]; ovs_list_insert(&meter->rules, &rule->meter_list_node); } static void meter_update(struct meter *meter, const struct ofputil_meter_config *config) { free(meter->bands); meter->flags = config->flags; meter->n_bands = config->n_bands; meter->bands = xmemdup(config->bands, config->n_bands * sizeof *meter->bands); } static struct meter * meter_create(const struct ofputil_meter_config *config, ofproto_meter_id provider_meter_id) { struct meter *meter; meter = xzalloc(sizeof *meter); meter->provider_meter_id = provider_meter_id; meter->created = time_msec(); ovs_list_init(&meter->rules); meter_update(meter, config); return meter; } static void meter_delete(struct ofproto *ofproto, uint32_t first, uint32_t last) OVS_REQUIRES(ofproto_mutex) { for (uint32_t mid = first; mid <= last; ++mid) { struct meter *meter = ofproto->meters[mid]; if (meter) { /* First delete the rules that use this meter. */ if (!ovs_list_is_empty(&meter->rules)) { struct rule_collection rules; struct rule *rule; rule_collection_init(&rules); LIST_FOR_EACH (rule, meter_list_node, &meter->rules) { rule_collection_add(&rules, rule); } delete_flows__(&rules, OFPRR_METER_DELETE, NULL); } ofproto->meters[mid] = NULL; ofproto->ofproto_class->meter_del(ofproto, meter->provider_meter_id); free(meter->bands); free(meter); } } } static enum ofperr handle_add_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm) { ofproto_meter_id provider_meter_id = { UINT32_MAX }; struct meter **meterp = &ofproto->meters[mm->meter.meter_id]; enum ofperr error; if (*meterp) { return OFPERR_OFPMMFC_METER_EXISTS; } error = ofproto->ofproto_class->meter_set(ofproto, &provider_meter_id, &mm->meter); if (!error) { ovs_assert(provider_meter_id.uint32 != UINT32_MAX); *meterp = meter_create(&mm->meter, provider_meter_id); } return error; } static enum ofperr handle_modify_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm) { struct meter *meter = ofproto->meters[mm->meter.meter_id]; enum ofperr error; uint32_t provider_meter_id; if (!meter) { return OFPERR_OFPMMFC_UNKNOWN_METER; } provider_meter_id = meter->provider_meter_id.uint32; error = ofproto->ofproto_class->meter_set(ofproto, &meter->provider_meter_id, &mm->meter); ovs_assert(meter->provider_meter_id.uint32 == provider_meter_id); if (!error) { meter_update(meter, &mm->meter); } return error; } static enum ofperr handle_delete_meter(struct ofconn *ofconn, struct ofputil_meter_mod *mm) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); uint32_t meter_id = mm->meter.meter_id; enum ofperr error = 0; uint32_t first, last; if (meter_id == OFPM13_ALL) { first = 1; last = ofproto->meter_features.max_meters; } else { if (!meter_id || meter_id > ofproto->meter_features.max_meters) { return 0; } first = last = meter_id; } /* Delete the meters. */ ovs_mutex_lock(&ofproto_mutex); meter_delete(ofproto, first, last); ovs_mutex_unlock(&ofproto_mutex); return error; } static enum ofperr handle_meter_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_meter_mod mm; uint64_t bands_stub[256 / 8]; struct ofpbuf bands; uint32_t meter_id; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub); error = ofputil_decode_meter_mod(oh, &mm, &bands); if (error) { goto exit_free_bands; } meter_id = mm.meter.meter_id; if (mm.command != OFPMC13_DELETE) { /* Fails also when meters are not implemented by the provider. */ if (meter_id == 0 || meter_id > OFPM13_MAX) { error = OFPERR_OFPMMFC_INVALID_METER; goto exit_free_bands; } else if (meter_id > ofproto->meter_features.max_meters) { error = OFPERR_OFPMMFC_OUT_OF_METERS; goto exit_free_bands; } if (mm.meter.n_bands > ofproto->meter_features.max_bands) { error = OFPERR_OFPMMFC_OUT_OF_BANDS; goto exit_free_bands; } } switch (mm.command) { case OFPMC13_ADD: error = handle_add_meter(ofproto, &mm); break; case OFPMC13_MODIFY: error = handle_modify_meter(ofproto, &mm); break; case OFPMC13_DELETE: error = handle_delete_meter(ofconn, &mm); break; default: error = OFPERR_OFPMMFC_BAD_COMMAND; break; } if (!error) { struct ofputil_requestforward rf; rf.xid = oh->xid; rf.reason = OFPRFR_METER_MOD; rf.meter_mod = &mm; connmgr_send_requestforward(ofproto->connmgr, ofconn, &rf); } exit_free_bands: ofpbuf_uninit(&bands); return error; } static enum ofperr handle_meter_features_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_meter_features features; struct ofpbuf *b; if (ofproto->ofproto_class->meter_get_features) { ofproto->ofproto_class->meter_get_features(ofproto, &features); } else { memset(&features, 0, sizeof features); } b = ofputil_encode_meter_features_reply(&features, request); ofconn_send_reply(ofconn, b); return 0; } static enum ofperr handle_meter_request(struct ofconn *ofconn, const struct ofp_header *request, enum ofptype type) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; uint64_t bands_stub[256 / 8]; struct ofpbuf bands; uint32_t meter_id, first, last; ofputil_decode_meter_request(request, &meter_id); if (meter_id == OFPM13_ALL) { first = 1; last = ofproto->meter_features.max_meters; } else { if (!meter_id || meter_id > ofproto->meter_features.max_meters || !ofproto->meters[meter_id]) { return OFPERR_OFPMMFC_UNKNOWN_METER; } first = last = meter_id; } ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub); ofpmp_init(&replies, request); for (meter_id = first; meter_id <= last; ++meter_id) { struct meter *meter = ofproto->meters[meter_id]; if (!meter) { continue; /* Skip non-existing meters. */ } if (type == OFPTYPE_METER_STATS_REQUEST) { struct ofputil_meter_stats stats; stats.meter_id = meter_id; /* Provider sets the packet and byte counts, we do the rest. */ stats.flow_count = ovs_list_size(&meter->rules); calc_duration(meter->created, time_msec(), &stats.duration_sec, &stats.duration_nsec); stats.n_bands = meter->n_bands; ofpbuf_clear(&bands); stats.bands = ofpbuf_put_uninit(&bands, meter->n_bands * sizeof *stats.bands); if (!ofproto->ofproto_class->meter_get(ofproto, meter->provider_meter_id, &stats)) { ofputil_append_meter_stats(&replies, &stats); } } else { /* type == OFPTYPE_METER_CONFIG_REQUEST */ struct ofputil_meter_config config; config.meter_id = meter_id; config.flags = meter->flags; config.n_bands = meter->n_bands; config.bands = meter->bands; ofputil_append_meter_config(&replies, &config); } } ofconn_send_replies(ofconn, &replies); ofpbuf_uninit(&bands); return 0; } /* Returned group is RCU protected. */ static struct ofgroup * ofproto_group_lookup__(const struct ofproto *ofproto, uint32_t group_id, ovs_version_t version) { struct ofgroup *group; CMAP_FOR_EACH_WITH_HASH (group, cmap_node, hash_int(group_id, 0), &ofproto->groups) { if (group->group_id == group_id && versions_visible_in_version(&group->versions, version)) { return group; } } return NULL; } /* If the group exists, this function increments the groups's reference count. * * Make sure to call ofproto_group_unref() after no longer needing to maintain * a reference to the group. */ struct ofgroup * ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id, ovs_version_t version, bool take_ref) { struct ofgroup *group; group = ofproto_group_lookup__(ofproto, group_id, version); if (group && take_ref) { /* Not holding a lock, so it is possible that another thread releases * the last reference just before we manage to get one. */ return ofproto_group_try_ref(group) ? group : NULL; } return group; } /* Caller should hold 'ofproto_mutex' if it is important that the * group is not removed by someone else. */ static bool ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) { return ofproto_group_lookup__(ofproto, group_id, OVS_VERSION_MAX) != NULL; } static void group_add_rule(struct ofgroup *group, struct rule *rule) { rule_collection_add(&group->rules, rule); } static void group_remove_rule(struct ofgroup *group, struct rule *rule) { rule_collection_remove(&group->rules, rule); } static void append_group_stats(struct ofgroup *group, struct ovs_list *replies) OVS_REQUIRES(ofproto_mutex) { struct ofputil_group_stats ogs; const struct ofproto *ofproto = group->ofproto; long long int now = time_msec(); int error; ogs.bucket_stats = xmalloc(group->n_buckets * sizeof *ogs.bucket_stats); /* Provider sets the packet and byte counts, we do the rest. */ ogs.ref_count = rule_collection_n(&group->rules); ogs.n_buckets = group->n_buckets; error = (ofproto->ofproto_class->group_get_stats ? ofproto->ofproto_class->group_get_stats(group, &ogs) : EOPNOTSUPP); if (error) { ogs.packet_count = UINT64_MAX; ogs.byte_count = UINT64_MAX; memset(ogs.bucket_stats, 0xff, ogs.n_buckets * sizeof *ogs.bucket_stats); } ogs.group_id = group->group_id; calc_duration(group->created, now, &ogs.duration_sec, &ogs.duration_nsec); ofputil_append_group_stats(replies, &ogs); free(ogs.bucket_stats); } static void handle_group_request(struct ofconn *ofconn, const struct ofp_header *request, uint32_t group_id, void (*cb)(struct ofgroup *, struct ovs_list *replies)) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofgroup *group; struct ovs_list replies; ofpmp_init(&replies, request); /* Must exclude modifications to guarantee iterating groups. */ ovs_mutex_lock(&ofproto_mutex); if (group_id == OFPG_ALL) { CMAP_FOR_EACH (group, cmap_node, &ofproto->groups) { if (versions_visible_in_version(&group->versions, OVS_VERSION_MAX)) { cb(group, &replies); } } } else { group = ofproto_group_lookup__(ofproto, group_id, OVS_VERSION_MAX); if (group) { cb(group, &replies); } } ovs_mutex_unlock(&ofproto_mutex); ofconn_send_replies(ofconn, &replies); } static enum ofperr handle_group_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { uint32_t group_id; enum ofperr error; error = ofputil_decode_group_stats_request(request, &group_id); if (error) { return error; } handle_group_request(ofconn, request, group_id, append_group_stats); return 0; } static void append_group_desc(struct ofgroup *group, struct ovs_list *replies) { struct ofputil_group_desc gds; gds.group_id = group->group_id; gds.type = group->type; gds.props = group->props; ofputil_append_group_desc_reply(&gds, &group->buckets, replies); } static enum ofperr handle_group_desc_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { handle_group_request(ofconn, request, ofputil_decode_group_desc_request(request), append_group_desc); return 0; } static enum ofperr handle_group_features_stats_request(struct ofconn *ofconn, const struct ofp_header *request) { struct ofproto *p = ofconn_get_ofproto(ofconn); struct ofpbuf *msg; msg = ofputil_encode_group_features_reply(&p->ogf, request); if (msg) { ofconn_send_reply(ofconn, msg); } return 0; } static void put_queue_get_config_reply(struct ofport *port, uint32_t queue, struct ovs_list *replies) { struct ofputil_queue_config qc; /* None of the existing queues have compatible properties, so we hard-code * omitting min_rate and max_rate. */ qc.port = port->ofp_port; qc.queue = queue; qc.min_rate = UINT16_MAX; qc.max_rate = UINT16_MAX; ofputil_append_queue_get_config_reply(&qc, replies); } static int handle_queue_get_config_request_for_port(struct ofport *port, uint32_t queue, struct ovs_list *replies) { struct smap details = SMAP_INITIALIZER(&details); if (queue != OFPQ_ALL) { int error = netdev_get_queue(port->netdev, queue, &details); switch (error) { case 0: put_queue_get_config_reply(port, queue, replies); break; case EOPNOTSUPP: case EINVAL: return OFPERR_OFPQOFC_BAD_QUEUE; default: return OFPERR_NXQOFC_QUEUE_ERROR; } } else { struct netdev_queue_dump queue_dump; uint32_t queue_id; NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &queue_dump, port->netdev) { put_queue_get_config_reply(port, queue_id, replies); } } smap_destroy(&details); return 0; } static enum ofperr handle_queue_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ovs_list replies; struct ofport *port; ofp_port_t req_port; uint32_t req_queue; enum ofperr error; error = ofputil_decode_queue_get_config_request(oh, &req_port, &req_queue); if (error) { return error; } ofputil_start_queue_get_config_reply(oh, &replies); if (req_port == OFPP_ANY) { error = OFPERR_OFPQOFC_BAD_QUEUE; HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) { if (!handle_queue_get_config_request_for_port(port, req_queue, &replies)) { error = 0; } } } else { port = ofproto_get_port(ofproto, req_port); error = (port ? handle_queue_get_config_request_for_port(port, req_queue, &replies) : OFPERR_OFPQOFC_BAD_PORT); } if (!error) { ofconn_send_replies(ofconn, &replies); } else { ofpbuf_list_delete(&replies); } return error; } /* Allocates, initializes, and constructs a new group in 'ofproto', obtaining * all the attributes for it from 'gm', and stores a pointer to it in * '*ofgroup'. Makes the new group visible from the flow table starting from * 'version'. * * Returns 0 if successful, otherwise an error code. If there is an error then * '*ofgroup' is indeterminate upon return. */ static enum ofperr init_group(struct ofproto *ofproto, const struct ofputil_group_mod *gm, ovs_version_t version, struct ofgroup **ofgroup) { enum ofperr error; const long long int now = time_msec(); if (gm->group_id > OFPG_MAX) { return OFPERR_OFPGMFC_INVALID_GROUP; } if (gm->type > OFPGT11_FF) { return OFPERR_OFPGMFC_BAD_TYPE; } *ofgroup = ofproto->ofproto_class->group_alloc(); if (!*ofgroup) { VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name); return OFPERR_OFPGMFC_OUT_OF_GROUPS; } *CONST_CAST(struct ofproto **, &(*ofgroup)->ofproto) = ofproto; *CONST_CAST(uint32_t *, &((*ofgroup)->group_id)) = gm->group_id; *CONST_CAST(enum ofp11_group_type *, &(*ofgroup)->type) = gm->type; *CONST_CAST(long long int *, &((*ofgroup)->created)) = now; *CONST_CAST(long long int *, &((*ofgroup)->modified)) = now; ovs_refcount_init(&(*ofgroup)->ref_count); (*ofgroup)->being_deleted = false; ovs_list_init(CONST_CAST(struct ovs_list *, &(*ofgroup)->buckets)); ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *, &(*ofgroup)->buckets), &gm->buckets, NULL); *CONST_CAST(uint32_t *, &(*ofgroup)->n_buckets) = ovs_list_size(&(*ofgroup)->buckets); ofputil_group_properties_copy(CONST_CAST(struct ofputil_group_props *, &(*ofgroup)->props), &gm->props); rule_collection_init(&(*ofgroup)->rules); /* Make group visible from 'version'. */ (*ofgroup)->versions = VERSIONS_INITIALIZER(version, OVS_VERSION_NOT_REMOVED); /* Construct called BEFORE any locks are held. */ error = ofproto->ofproto_class->group_construct(*ofgroup); if (error) { ofputil_group_properties_destroy(CONST_CAST(struct ofputil_group_props *, &(*ofgroup)->props)); ofputil_bucket_list_destroy(CONST_CAST(struct ovs_list *, &(*ofgroup)->buckets)); ofproto->ofproto_class->group_dealloc(*ofgroup); } return error; } /* Implements the OFPGC11_ADD operation specified by 'gm', adding a group to * 'ofproto''s group table. Returns 0 on success or an OpenFlow error code on * failure. */ static enum ofperr add_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; if (ofproto_group_exists(ofproto, ogm->gm.group_id)) { return OFPERR_OFPGMFC_GROUP_EXISTS; } if (ofproto->n_groups[ogm->gm.type] >= ofproto->ogf.max_groups[ogm->gm.type]) { return OFPERR_OFPGMFC_OUT_OF_GROUPS; } /* Allocate new group and initialize it. */ error = init_group(ofproto, &ogm->gm, ogm->version, &ogm->new_group); if (!error) { /* Insert new group. */ cmap_insert(&ofproto->groups, &ogm->new_group->cmap_node, hash_int(ogm->new_group->group_id, 0)); ofproto->n_groups[ogm->new_group->type]++; } return error; } /* Adds all of the buckets from 'ofgroup' to 'new_ofgroup'. The buckets * already in 'new_ofgroup' will be placed just after the (copy of the) bucket * in 'ofgroup' with bucket ID 'command_bucket_id'. Special * 'command_bucket_id' values OFPG15_BUCKET_FIRST and OFPG15_BUCKET_LAST are * also honored. */ static enum ofperr copy_buckets_for_insert_bucket(const struct ofgroup *ofgroup, struct ofgroup *new_ofgroup, uint32_t command_bucket_id) { struct ofputil_bucket *last = NULL; if (command_bucket_id <= OFPG15_BUCKET_MAX) { /* Check here to ensure that a bucket corresponding to * command_bucket_id exists in the old bucket list. * * The subsequent search of below of new_ofgroup covers * both buckets in the old bucket list and buckets added * by the insert buckets group mod message this function processes. */ if (!ofputil_bucket_find(&ofgroup->buckets, command_bucket_id)) { return OFPERR_OFPGMFC_UNKNOWN_BUCKET; } if (!ovs_list_is_empty(&new_ofgroup->buckets)) { last = ofputil_bucket_list_back(&new_ofgroup->buckets); } } ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *, &new_ofgroup->buckets), &ofgroup->buckets, NULL); if (ofputil_bucket_check_duplicate_id(&new_ofgroup->buckets)) { VLOG_INFO_RL(&rl, "Duplicate bucket id"); return OFPERR_OFPGMFC_BUCKET_EXISTS; } /* Rearrange list according to command_bucket_id */ if (command_bucket_id == OFPG15_BUCKET_LAST) { if (!ovs_list_is_empty(&ofgroup->buckets)) { struct ofputil_bucket *new_first; const struct ofputil_bucket *first; first = ofputil_bucket_list_front(&ofgroup->buckets); new_first = ofputil_bucket_find(&new_ofgroup->buckets, first->bucket_id); ovs_list_splice(new_ofgroup->buckets.next, &new_first->list_node, CONST_CAST(struct ovs_list *, &new_ofgroup->buckets)); } } else if (command_bucket_id <= OFPG15_BUCKET_MAX && last) { struct ofputil_bucket *after; /* Presence of bucket is checked above so after should never be NULL */ after = ofputil_bucket_find(&new_ofgroup->buckets, command_bucket_id); ovs_list_splice(after->list_node.next, new_ofgroup->buckets.next, last->list_node.next); } return 0; } /* Appends all of the a copy of all the buckets from 'ofgroup' to 'new_ofgroup' * with the exception of the bucket whose bucket id is 'command_bucket_id'. * Special 'command_bucket_id' values OFPG15_BUCKET_FIRST, OFPG15_BUCKET_LAST * and OFPG15_BUCKET_ALL are also honored. */ static enum ofperr copy_buckets_for_remove_bucket(const struct ofgroup *ofgroup, struct ofgroup *new_ofgroup, uint32_t command_bucket_id) { const struct ofputil_bucket *skip = NULL; if (command_bucket_id == OFPG15_BUCKET_ALL) { return 0; } if (command_bucket_id == OFPG15_BUCKET_FIRST) { if (!ovs_list_is_empty(&ofgroup->buckets)) { skip = ofputil_bucket_list_front(&ofgroup->buckets); } } else if (command_bucket_id == OFPG15_BUCKET_LAST) { if (!ovs_list_is_empty(&ofgroup->buckets)) { skip = ofputil_bucket_list_back(&ofgroup->buckets); } } else { skip = ofputil_bucket_find(&ofgroup->buckets, command_bucket_id); if (!skip) { return OFPERR_OFPGMFC_UNKNOWN_BUCKET; } } ofputil_bucket_clone_list(CONST_CAST(struct ovs_list *, &new_ofgroup->buckets), &ofgroup->buckets, skip); return 0; } /* Implements OFPGC11_MODIFY, OFPGC15_INSERT_BUCKET and * OFPGC15_REMOVE_BUCKET. Returns 0 on success or an OpenFlow error code * on failure. * * Note that the group is re-created and then replaces the old group in * ofproto's ofgroup hash map. Thus, the group is never altered while users of * the xlate module hold a pointer to the group. */ static enum ofperr modify_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *old_group; /* Modified group. */ struct ofgroup *new_group; enum ofperr error; old_group = ofproto_group_lookup__(ofproto, ogm->gm.group_id, OVS_VERSION_MAX); if (!old_group) { return OFPERR_OFPGMFC_UNKNOWN_GROUP; } /* Inserting or deleting a bucket should not change the group's type or * properties, so change the group mod so that these aspects match the old * group. (See EXT-570.) */ if (ogm->gm.command == OFPGC15_INSERT_BUCKET || ogm->gm.command == OFPGC15_REMOVE_BUCKET) { ogm->gm.type = old_group->type; ofputil_group_properties_destroy(&ogm->gm.props); ofputil_group_properties_copy(&ogm->gm.props, &old_group->props); } if (old_group->type != ogm->gm.type && (ofproto->n_groups[ogm->gm.type] >= ofproto->ogf.max_groups[ogm->gm.type])) { return OFPERR_OFPGMFC_OUT_OF_GROUPS; } error = init_group(ofproto, &ogm->gm, ogm->version, &ogm->new_group); if (error) { return error; } new_group = ogm->new_group; /* Manipulate bucket list for bucket commands */ if (ogm->gm.command == OFPGC15_INSERT_BUCKET) { error = copy_buckets_for_insert_bucket(old_group, new_group, ogm->gm.command_bucket_id); } else if (ogm->gm.command == OFPGC15_REMOVE_BUCKET) { error = copy_buckets_for_remove_bucket(old_group, new_group, ogm->gm.command_bucket_id); } if (error) { goto out; } /* The group creation time does not change during modification. */ *CONST_CAST(long long int *, &(new_group->created)) = old_group->created; *CONST_CAST(long long int *, &(new_group->modified)) = time_msec(); group_collection_add(&ogm->old_groups, old_group); /* Mark the old group for deletion. */ versions_set_remove_version(&old_group->versions, ogm->version); /* Insert replacement group. */ cmap_insert(&ofproto->groups, &new_group->cmap_node, hash_int(new_group->group_id, 0)); /* Transfer rules. */ rule_collection_move(&new_group->rules, &old_group->rules); if (old_group->type != new_group->type) { ofproto->n_groups[old_group->type]--; ofproto->n_groups[new_group->type]++; } return 0; out: ofproto_group_unref(new_group); return error; } /* Implements the OFPGC11_ADD_OR_MOD command which creates the group when it does not * exist yet and modifies it otherwise */ static enum ofperr add_or_modify_group_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; if (!ofproto_group_exists(ofproto, ogm->gm.group_id)) { error = add_group_start(ofproto, ogm); } else { error = modify_group_start(ofproto, ogm); } return error; } static void delete_group_start(struct ofproto *ofproto, ovs_version_t version, struct group_collection *groups, struct ofgroup *group) OVS_REQUIRES(ofproto_mutex) { /* Makes flow deletion code leave the rule pointers in 'group->rules' * intact, so that we can later refer to the rules deleted due to the group * deletion. Rule pointers will be removed from all other groups, if any, * so we will never try to delete the same rule twice. */ group->being_deleted = true; /* Mark all the referring groups for deletion. */ delete_flows_start__(ofproto, version, &group->rules); group_collection_add(groups, group); versions_set_remove_version(&group->versions, version); ofproto->n_groups[group->type]--; } static void delete_group_finish(struct ofproto *ofproto, struct ofgroup *group) OVS_REQUIRES(ofproto_mutex) { /* Finish deletion of all flow entries containing this group in a group * action. */ delete_flows_finish__(ofproto, &group->rules, OFPRR_GROUP_DELETE, NULL); /* Group removal is postponed by the caller. */ } /* Implements OFPGC11_DELETE. */ static void delete_groups_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *group; if (ogm->gm.group_id == OFPG_ALL) { CMAP_FOR_EACH (group, cmap_node, &ofproto->groups) { if (versions_visible_in_version(&group->versions, ogm->version)) { delete_group_start(ofproto, ogm->version, &ogm->old_groups, group); } } } else { group = ofproto_group_lookup__(ofproto, ogm->gm.group_id, ogm->version); if (group) { delete_group_start(ofproto, ogm->version, &ogm->old_groups, group); } } } static enum ofperr ofproto_group_mod_start(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; ogm->new_group = NULL; group_collection_init(&ogm->old_groups); switch (ogm->gm.command) { case OFPGC11_ADD: error = add_group_start(ofproto, ogm); break; case OFPGC11_MODIFY: error = modify_group_start(ofproto, ogm); break; case OFPGC11_ADD_OR_MOD: error = add_or_modify_group_start(ofproto, ogm); break; case OFPGC11_DELETE: delete_groups_start(ofproto, ogm); error = 0; break; case OFPGC15_INSERT_BUCKET: error = modify_group_start(ofproto, ogm); break; case OFPGC15_REMOVE_BUCKET: error = modify_group_start(ofproto, ogm); break; default: if (ogm->gm.command > OFPGC11_DELETE) { VLOG_INFO_RL(&rl, "%s: Invalid group_mod command type %d", ofproto->name, ogm->gm.command); } error = OFPERR_OFPGMFC_BAD_COMMAND; break; } return error; } static void ofproto_group_mod_revert(struct ofproto *ofproto, struct ofproto_group_mod *ogm) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *new_group = ogm->new_group; struct ofgroup *old_group; /* Restore replaced or deleted groups. */ GROUP_COLLECTION_FOR_EACH (old_group, &ogm->old_groups) { ofproto->n_groups[old_group->type]++; if (new_group) { ovs_assert(group_collection_n(&ogm->old_groups) == 1); /* Transfer rules back. */ rule_collection_move(&old_group->rules, &new_group->rules); } else { old_group->being_deleted = false; /* Revert rule deletion. */ delete_flows_revert__(ofproto, &old_group->rules); } /* Restore visibility. */ versions_set_remove_version(&old_group->versions, OVS_VERSION_NOT_REMOVED); } if (new_group) { /* Remove the new group immediately. It was never visible to * lookups. */ cmap_remove(&ofproto->groups, &new_group->cmap_node, hash_int(new_group->group_id, 0)); ofproto->n_groups[new_group->type]--; ofproto_group_unref(new_group); } } static void ofproto_group_mod_finish(struct ofproto *ofproto, struct ofproto_group_mod *ogm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { struct ofgroup *new_group = ogm->new_group; struct ofgroup *old_group; if (new_group && group_collection_n(&ogm->old_groups) && ofproto->ofproto_class->group_modify) { /* Modify a group. */ ovs_assert(group_collection_n(&ogm->old_groups) == 1); /* XXX: OK to lose old group's stats? */ ofproto->ofproto_class->group_modify(new_group); } /* Delete old groups. */ GROUP_COLLECTION_FOR_EACH(old_group, &ogm->old_groups) { delete_group_finish(ofproto, old_group); } remove_groups_postponed(&ogm->old_groups); if (req) { struct ofputil_requestforward rf; rf.xid = req->request->xid; rf.reason = OFPRFR_GROUP_MOD; rf.group_mod = &ogm->gm; connmgr_send_requestforward(ofproto->connmgr, req->ofconn, &rf); } } /* Delete all groups from 'ofproto'. * * This is intended for use within an ofproto provider's 'destruct' * function. */ void ofproto_group_delete_all(struct ofproto *ofproto) OVS_EXCLUDED(ofproto_mutex) { struct ofproto_group_mod ogm; ogm.gm.command = OFPGC11_DELETE; ogm.gm.group_id = OFPG_ALL; ovs_mutex_lock(&ofproto_mutex); ogm.version = ofproto->tables_version + 1; ofproto_group_mod_start(ofproto, &ogm); ofproto_bump_tables_version(ofproto); ofproto_group_mod_finish(ofproto, &ogm, NULL); ovs_mutex_unlock(&ofproto_mutex); } static enum ofperr handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofproto_group_mod ogm; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_group_mod(oh, &ogm.gm); if (error) { return error; } ovs_mutex_lock(&ofproto_mutex); ogm.version = ofproto->tables_version + 1; error = ofproto_group_mod_start(ofproto, &ogm); if (!error) { struct openflow_mod_requester req = { ofconn, oh }; ofproto_bump_tables_version(ofproto); ofproto_group_mod_finish(ofproto, &ogm, &req); ofmonitor_flush(ofproto->connmgr); } ovs_mutex_unlock(&ofproto_mutex); ofputil_uninit_group_mod(&ogm.gm); return error; } enum ofputil_table_miss ofproto_table_get_miss_config(const struct ofproto *ofproto, uint8_t table_id) { enum ofputil_table_miss miss; atomic_read_relaxed(&ofproto->tables[table_id].miss_config, &miss); return miss; } static void table_mod__(struct oftable *oftable, const struct ofputil_table_mod *tm) { if (tm->miss == OFPUTIL_TABLE_MISS_DEFAULT) { /* This is how an OFPT_TABLE_MOD decodes if it doesn't specify any * table-miss configuration (because the protocol used doesn't have * such a concept), so there's nothing to do. */ } else { atomic_store_relaxed(&oftable->miss_config, tm->miss); } unsigned int new_eviction = oftable->eviction; if (tm->eviction == OFPUTIL_TABLE_EVICTION_ON) { new_eviction |= EVICTION_OPENFLOW; } else if (tm->eviction == OFPUTIL_TABLE_EVICTION_OFF) { new_eviction &= ~EVICTION_OPENFLOW; } if (new_eviction != oftable->eviction) { ovs_mutex_lock(&ofproto_mutex); oftable_configure_eviction(oftable, new_eviction, oftable->eviction_fields, oftable->n_eviction_fields); ovs_mutex_unlock(&ofproto_mutex); } if (tm->vacancy != OFPUTIL_TABLE_VACANCY_DEFAULT) { ovs_mutex_lock(&ofproto_mutex); oftable->vacancy_down = tm->table_vacancy.vacancy_down; oftable->vacancy_up = tm->table_vacancy.vacancy_up; if (tm->vacancy == OFPUTIL_TABLE_VACANCY_OFF) { oftable->vacancy_event = 0; } else if (!oftable->vacancy_event) { uint8_t vacancy = oftable_vacancy(oftable); oftable->vacancy_event = (vacancy < oftable->vacancy_up ? OFPTR_VACANCY_UP : OFPTR_VACANCY_DOWN); } ovs_mutex_unlock(&ofproto_mutex); } } static enum ofperr table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm) { if (!check_table_id(ofproto, tm->table_id)) { return OFPERR_OFPTMFC_BAD_TABLE; } /* Don't allow the eviction flags to be changed (except to the only fixed * value that OVS supports). OF1.4 says this is normal: "The * OFPTMPT_EVICTION property usually cannot be modified using a * OFP_TABLE_MOD request, because the eviction mechanism is switch * defined". */ if (tm->eviction_flags != UINT32_MAX && tm->eviction_flags != OFPROTO_EVICTION_FLAGS) { return OFPERR_OFPTMFC_BAD_CONFIG; } if (tm->table_id == OFPTT_ALL) { struct oftable *oftable; OFPROTO_FOR_EACH_TABLE (oftable, ofproto) { if (!(oftable->flags & (OFTABLE_HIDDEN | OFTABLE_READONLY))) { table_mod__(oftable, tm); } } } else { struct oftable *oftable = &ofproto->tables[tm->table_id]; if (oftable->flags & OFTABLE_READONLY) { return OFPERR_OFPTMFC_EPERM; } table_mod__(oftable, tm); } return 0; } static enum ofperr handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_table_mod tm; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_table_mod(oh, &tm); if (error) { return error; } return table_mod(ofproto, &tm); } /* Free resources that may be allocated by ofproto_flow_mod_init(). */ void ofproto_flow_mod_uninit(struct ofproto_flow_mod *ofm) { if (ofm->temp_rule) { ofproto_rule_unref(ofm->temp_rule); ofm->temp_rule = NULL; } if (ofm->criteria.version != OVS_VERSION_NOT_REMOVED) { rule_criteria_destroy(&ofm->criteria); } if (ofm->conjs) { free(ofm->conjs); ofm->conjs = NULL; ofm->n_conjs = 0; } } /* Initializes 'ofm' with 'ofproto', 'fm', and 'rule'. 'rule' may be null, but * if it is nonnull then the caller must own a reference to it, which on * success is transferred to 'ofm' and on failure is unreffed. */ static enum ofperr ofproto_flow_mod_init(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct ofputil_flow_mod *fm, struct rule *rule) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error; /* Forward flow mod fields we need later. */ ofm->command = fm->command; ofm->modify_cookie = fm->modify_cookie; ofm->modify_may_add_flow = (fm->new_cookie != OVS_BE64_MAX && fm->cookie_mask == htonll(0)); /* Old flags must be kept when modifying a flow, but we still must * honor the reset counts flag if present in the flow mod. */ ofm->modify_keep_counts = !(fm->flags & OFPUTIL_FF_RESET_COUNTS); /* Initialize state needed by ofproto_flow_mod_uninit(). */ ofm->temp_rule = rule; ofm->criteria.version = OVS_VERSION_NOT_REMOVED; ofm->conjs = NULL; ofm->n_conjs = 0; bool check_buffer_id = false; switch (ofm->command) { case OFPFC_ADD: check_buffer_id = true; error = add_flow_init(ofproto, ofm, fm); break; case OFPFC_MODIFY: check_buffer_id = true; error = modify_flows_init_loose(ofproto, ofm, fm); break; case OFPFC_MODIFY_STRICT: check_buffer_id = true; error = modify_flow_init_strict(ofproto, ofm, fm); break; case OFPFC_DELETE: error = delete_flows_init_loose(ofproto, ofm, fm); break; case OFPFC_DELETE_STRICT: error = delete_flows_init_strict(ofproto, ofm, fm); break; default: error = OFPERR_OFPFMFC_BAD_COMMAND; break; } if (!error && check_buffer_id && fm->buffer_id != UINT32_MAX) { error = OFPERR_OFPBRC_BUFFER_UNKNOWN; } if (error) { ofproto_flow_mod_uninit(ofm); } return error; } static enum ofperr ofproto_flow_mod_start(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { enum ofperr error; rule_collection_init(&ofm->old_rules); rule_collection_init(&ofm->new_rules); switch (ofm->command) { case OFPFC_ADD: error = add_flow_start(ofproto, ofm); break; case OFPFC_MODIFY: error = modify_flows_start_loose(ofproto, ofm); break; case OFPFC_MODIFY_STRICT: error = modify_flow_start_strict(ofproto, ofm); break; case OFPFC_DELETE: error = delete_flows_start_loose(ofproto, ofm); break; case OFPFC_DELETE_STRICT: error = delete_flow_start_strict(ofproto, ofm); break; default: OVS_NOT_REACHED(); } /* Release resources not needed after start. */ ofproto_flow_mod_uninit(ofm); if (error) { rule_collection_destroy(&ofm->old_rules); rule_collection_destroy(&ofm->new_rules); } return error; } static void ofproto_flow_mod_revert(struct ofproto *ofproto, struct ofproto_flow_mod *ofm) OVS_REQUIRES(ofproto_mutex) { switch (ofm->command) { case OFPFC_ADD: add_flow_revert(ofproto, ofm); break; case OFPFC_MODIFY: case OFPFC_MODIFY_STRICT: modify_flows_revert(ofproto, ofm); break; case OFPFC_DELETE: case OFPFC_DELETE_STRICT: delete_flows_revert(ofproto, ofm); break; default: break; } rule_collection_destroy(&ofm->old_rules); rule_collection_destroy(&ofm->new_rules); } static void ofproto_flow_mod_finish(struct ofproto *ofproto, struct ofproto_flow_mod *ofm, const struct openflow_mod_requester *req) OVS_REQUIRES(ofproto_mutex) { switch (ofm->command) { case OFPFC_ADD: add_flow_finish(ofproto, ofm, req); break; case OFPFC_MODIFY: case OFPFC_MODIFY_STRICT: modify_flows_finish(ofproto, ofm, req); break; case OFPFC_DELETE: case OFPFC_DELETE_STRICT: delete_flows_finish(ofproto, ofm, req); break; default: break; } rule_collection_destroy(&ofm->old_rules); rule_collection_destroy(&ofm->new_rules); if (req) { ofconn_report_flow_mod(req->ofconn, ofm->command); } } /* Commit phases (all while locking ofproto_mutex): * * 1. Begin: Gather resources and make changes visible in the next version. * - Mark affected rules for removal in the next version. * - Create new replacement rules, make visible in the next * version. * - Do not send any events or notifications. * * 2. Revert: Fail if any errors are found. After this point no errors are * possible. No visible changes were made, so rollback is minimal (remove * added invisible rules, restore visibility of rules marked for removal). * * 3. Finish: Make the changes visible for lookups. Insert replacement rules to * the ofproto provider. Remove replaced and deleted rules from ofproto data * structures, and Schedule postponed removal of deleted rules from the * classifier. Send notifications, buffered packets, etc. */ static enum ofperr do_bundle_commit(struct ofconn *ofconn, uint32_t id, uint16_t flags) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); ovs_version_t version = ofproto->tables_version + 1; struct ofp_bundle *bundle; struct ofp_bundle_entry *be; enum ofperr error; bundle = ofconn_get_bundle(ofconn, id); if (!bundle) { return OFPERR_OFPBFC_BAD_ID; } if (bundle->flags != flags) { error = OFPERR_OFPBFC_BAD_FLAGS; } else { bool prev_is_port_mod = false; error = 0; ovs_mutex_lock(&ofproto_mutex); /* 1. Begin. */ LIST_FOR_EACH (be, node, &bundle->msg_list) { if (be->type == OFPTYPE_PORT_MOD) { /* Our port mods are not atomic. */ if (flags & OFPBF_ATOMIC) { error = OFPERR_OFPBFC_MSG_FAILED; } else { prev_is_port_mod = true; error = port_mod_start(ofconn, &be->opm.pm, &be->opm.port); } } else { /* Flow & group mods between port mods are applied as a single * version, but the versions are published only after we know * the commit is successful. */ if (prev_is_port_mod) { prev_is_port_mod = false; ++version; } if (be->type == OFPTYPE_FLOW_MOD) { /* Store the version in which the changes should take * effect. */ be->ofm.version = version; error = ofproto_flow_mod_start(ofproto, &be->ofm); } else if (be->type == OFPTYPE_GROUP_MOD) { /* Store the version in which the changes should take * effect. */ be->ogm.version = version; error = ofproto_group_mod_start(ofproto, &be->ogm); } else if (be->type == OFPTYPE_PACKET_OUT) { be->opo.version = version; error = ofproto_packet_out_start(ofproto, &be->opo); } else { OVS_NOT_REACHED(); } } if (error) { break; } } if (error) { /* Send error referring to the original message. */ if (error) { ofconn_send_error(ofconn, &be->ofp_msg, error); error = OFPERR_OFPBFC_MSG_FAILED; } /* 2. Revert. Undo all the changes made above. */ LIST_FOR_EACH_REVERSE_CONTINUE(be, node, &bundle->msg_list) { if (be->type == OFPTYPE_FLOW_MOD) { ofproto_flow_mod_revert(ofproto, &be->ofm); } else if (be->type == OFPTYPE_GROUP_MOD) { ofproto_group_mod_revert(ofproto, &be->ogm); } else if (be->type == OFPTYPE_PACKET_OUT) { ofproto_packet_out_revert(ofproto, &be->opo); } /* Nothing needs to be reverted for a port mod. */ } } else { /* 4. Finish. */ LIST_FOR_EACH (be, node, &bundle->msg_list) { if (be->type == OFPTYPE_PORT_MOD) { /* Perform the actual port mod. This is not atomic, i.e., * the effects will be immediately seen by upcall * processing regardless of the lookup version. It should * be noted that port configuration changes can originate * also from OVSDB changes asynchronously to all upcall * processing. */ port_mod_finish(ofconn, &be->opm.pm, be->opm.port); } else { version = (be->type == OFPTYPE_FLOW_MOD) ? be->ofm.version : (be->type == OFPTYPE_GROUP_MOD) ? be->ogm.version : (be->type == OFPTYPE_PACKET_OUT) ? be->opo.version : version; /* Bump the lookup version to the one of the current * message. This makes all the changes in the bundle at * this version visible to lookups at once. */ if (ofproto->tables_version < version) { ofproto->tables_version = version; ofproto->ofproto_class->set_tables_version( ofproto, ofproto->tables_version); } struct openflow_mod_requester req = { ofconn, &be->ofp_msg }; if (be->type == OFPTYPE_FLOW_MOD) { ofproto_flow_mod_finish(ofproto, &be->ofm, &req); } else if (be->type == OFPTYPE_GROUP_MOD) { ofproto_group_mod_finish(ofproto, &be->ogm, &req); } else if (be->type == OFPTYPE_PACKET_OUT) { ofproto_packet_out_finish(ofproto, &be->opo); } } } } ofmonitor_flush(ofproto->connmgr); ovs_mutex_unlock(&ofproto_mutex); } /* The bundle is discarded regardless the outcome. */ ofp_bundle_remove__(ofconn, bundle); return error; } static enum ofperr handle_bundle_control(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofputil_bundle_ctrl_msg bctrl; struct ofputil_bundle_ctrl_msg reply; struct ofpbuf *buf; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_bundle_ctrl(oh, &bctrl); if (error) { return error; } reply.flags = 0; reply.bundle_id = bctrl.bundle_id; switch (bctrl.type) { case OFPBCT_OPEN_REQUEST: error = ofp_bundle_open(ofconn, bctrl.bundle_id, bctrl.flags, oh); reply.type = OFPBCT_OPEN_REPLY; break; case OFPBCT_CLOSE_REQUEST: error = ofp_bundle_close(ofconn, bctrl.bundle_id, bctrl.flags); reply.type = OFPBCT_CLOSE_REPLY; break; case OFPBCT_COMMIT_REQUEST: error = do_bundle_commit(ofconn, bctrl.bundle_id, bctrl.flags); reply.type = OFPBCT_COMMIT_REPLY; break; case OFPBCT_DISCARD_REQUEST: error = ofp_bundle_discard(ofconn, bctrl.bundle_id); reply.type = OFPBCT_DISCARD_REPLY; break; case OFPBCT_OPEN_REPLY: case OFPBCT_CLOSE_REPLY: case OFPBCT_COMMIT_REPLY: case OFPBCT_DISCARD_REPLY: return OFPERR_OFPBFC_BAD_TYPE; break; } if (!error) { buf = ofputil_encode_bundle_ctrl_reply(oh, &reply); ofconn_send_reply(ofconn, buf); } return error; } static enum ofperr handle_bundle_add(struct ofconn *ofconn, const struct ofp_header *oh) OVS_EXCLUDED(ofproto_mutex) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); enum ofperr error; struct ofputil_bundle_add_msg badd; struct ofp_bundle_entry *bmsg; enum ofptype type; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_bundle_add(oh, &badd, &type); if (error) { return error; } bmsg = ofp_bundle_entry_alloc(type, badd.msg); struct ofpbuf ofpacts; uint64_t ofpacts_stub[1024 / 8]; ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); if (type == OFPTYPE_PORT_MOD) { error = ofputil_decode_port_mod(badd.msg, &bmsg->opm.pm, false); } else if (type == OFPTYPE_FLOW_MOD) { struct ofputil_flow_mod fm; error = ofputil_decode_flow_mod(&fm, badd.msg, ofconn_get_protocol(ofconn), ofproto_get_tun_tab(ofproto), &ofproto->vl_mff_map, &ofpacts, u16_to_ofp(ofproto->max_ports), ofproto->n_tables); if (!error) { error = ofproto_flow_mod_init(ofproto, &bmsg->ofm, &fm, NULL); } } else if (type == OFPTYPE_GROUP_MOD) { error = ofputil_decode_group_mod(badd.msg, &bmsg->ogm.gm); } else if (type == OFPTYPE_PACKET_OUT) { struct ofputil_packet_out po; COVERAGE_INC(ofproto_packet_out); /* Decode message. */ error = ofputil_decode_packet_out(&po, badd.msg, &ofpacts); if (!error) { po.ofpacts = ofpbuf_steal_data(&ofpacts); /* Move to heap. */ error = ofproto_packet_out_init(ofproto, ofconn, &bmsg->opo, &po); } } else { OVS_NOT_REACHED(); } ofpbuf_uninit(&ofpacts); if (!error) { error = ofp_bundle_add_message(ofconn, badd.bundle_id, badd.flags, bmsg, oh); } if (error) { ofp_bundle_entry_free(bmsg); } return error; } static enum ofperr handle_tlv_table_mod(struct ofconn *ofconn, const struct ofp_header *oh) { struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct tun_table *old_tab, *new_tab; struct ofputil_tlv_table_mod ttm; enum ofperr error; error = reject_slave_controller(ofconn); if (error) { return error; } error = ofputil_decode_tlv_table_mod(oh, &ttm); if (error) { return error; } old_tab = ovsrcu_get_protected(struct tun_table *, &ofproto->metadata_tab); error = tun_metadata_table_mod(&ttm, old_tab, &new_tab); if (!error) { ovs_mutex_lock(&ofproto->vl_mff_map.mutex); error = mf_vl_mff_map_mod_from_tun_metadata(&ofproto->vl_mff_map, &ttm); ovs_mutex_unlock(&ofproto->vl_mff_map.mutex); if (!error) { ovsrcu_set(&ofproto->metadata_tab, new_tab); tun_metadata_postpone_free(old_tab); } } ofputil_uninit_tlv_table(&ttm.mappings); return error; } static enum ofperr handle_tlv_table_request(struct ofconn *ofconn, const struct ofp_header *oh) { const struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_tlv_table_reply ttr; struct ofpbuf *b; tun_metadata_table_request(ofproto_get_tun_tab(ofproto), &ttr); b = ofputil_encode_tlv_table_reply(oh, &ttr); ofputil_uninit_tlv_table(&ttr.mappings); ofconn_send_reply(ofconn, b); return 0; } static enum ofperr handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) OVS_EXCLUDED(ofproto_mutex) { const struct ofp_header *oh = msg->data; enum ofptype type; enum ofperr error; error = ofptype_decode(&type, oh); if (error) { return error; } if (oh->version >= OFP13_VERSION && ofpmsg_is_stat_request(oh) && ofpmp_more(oh)) { /* We have no buffer implementation for multipart requests. * Report overflow for requests which consists of multiple * messages. */ return OFPERR_OFPBRC_MULTIPART_BUFFER_OVERFLOW; } switch (type) { /* OpenFlow requests. */ case OFPTYPE_ECHO_REQUEST: return handle_echo_request(ofconn, oh); case OFPTYPE_FEATURES_REQUEST: return handle_features_request(ofconn, oh); case OFPTYPE_GET_CONFIG_REQUEST: return handle_get_config_request(ofconn, oh); case OFPTYPE_SET_CONFIG: return handle_set_config(ofconn, oh); case OFPTYPE_PACKET_OUT: return handle_packet_out(ofconn, oh); case OFPTYPE_PORT_MOD: return handle_port_mod(ofconn, oh); case OFPTYPE_FLOW_MOD: return handle_flow_mod(ofconn, oh); case OFPTYPE_GROUP_MOD: return handle_group_mod(ofconn, oh); case OFPTYPE_TABLE_MOD: return handle_table_mod(ofconn, oh); case OFPTYPE_METER_MOD: return handle_meter_mod(ofconn, oh); case OFPTYPE_BARRIER_REQUEST: return handle_barrier_request(ofconn, oh); case OFPTYPE_ROLE_REQUEST: return handle_role_request(ofconn, oh); /* OpenFlow replies. */ case OFPTYPE_ECHO_REPLY: return 0; /* Nicira extension requests. */ case OFPTYPE_FLOW_MOD_TABLE_ID: return handle_nxt_flow_mod_table_id(ofconn, oh); case OFPTYPE_SET_FLOW_FORMAT: return handle_nxt_set_flow_format(ofconn, oh); case OFPTYPE_SET_PACKET_IN_FORMAT: return handle_nxt_set_packet_in_format(ofconn, oh); case OFPTYPE_SET_CONTROLLER_ID: return handle_nxt_set_controller_id(ofconn, oh); case OFPTYPE_FLOW_AGE: /* Nothing to do. */ return 0; case OFPTYPE_FLOW_MONITOR_CANCEL: return handle_flow_monitor_cancel(ofconn, oh); case OFPTYPE_SET_ASYNC_CONFIG: return handle_nxt_set_async_config(ofconn, oh); case OFPTYPE_GET_ASYNC_REQUEST: return handle_nxt_get_async_request(ofconn, oh); case OFPTYPE_NXT_RESUME: return handle_nxt_resume(ofconn, oh); /* Statistics requests. */ case OFPTYPE_DESC_STATS_REQUEST: return handle_desc_stats_request(ofconn, oh); case OFPTYPE_FLOW_STATS_REQUEST: return handle_flow_stats_request(ofconn, oh); case OFPTYPE_AGGREGATE_STATS_REQUEST: return handle_aggregate_stats_request(ofconn, oh); case OFPTYPE_TABLE_STATS_REQUEST: return handle_table_stats_request(ofconn, oh); case OFPTYPE_TABLE_FEATURES_STATS_REQUEST: return handle_table_features_request(ofconn, oh); case OFPTYPE_TABLE_DESC_REQUEST: return handle_table_desc_request(ofconn, oh); case OFPTYPE_PORT_STATS_REQUEST: return handle_port_stats_request(ofconn, oh); case OFPTYPE_QUEUE_STATS_REQUEST: return handle_queue_stats_request(ofconn, oh); case OFPTYPE_PORT_DESC_STATS_REQUEST: return handle_port_desc_stats_request(ofconn, oh); case OFPTYPE_FLOW_MONITOR_STATS_REQUEST: return handle_flow_monitor_request(ofconn, oh); case OFPTYPE_METER_STATS_REQUEST: case OFPTYPE_METER_CONFIG_STATS_REQUEST: return handle_meter_request(ofconn, oh, type); case OFPTYPE_METER_FEATURES_STATS_REQUEST: return handle_meter_features_request(ofconn, oh); case OFPTYPE_GROUP_STATS_REQUEST: return handle_group_stats_request(ofconn, oh); case OFPTYPE_GROUP_DESC_STATS_REQUEST: return handle_group_desc_stats_request(ofconn, oh); case OFPTYPE_GROUP_FEATURES_STATS_REQUEST: return handle_group_features_stats_request(ofconn, oh); case OFPTYPE_QUEUE_GET_CONFIG_REQUEST: return handle_queue_get_config_request(ofconn, oh); case OFPTYPE_BUNDLE_CONTROL: return handle_bundle_control(ofconn, oh); case OFPTYPE_BUNDLE_ADD_MESSAGE: return handle_bundle_add(ofconn, oh); case OFPTYPE_NXT_TLV_TABLE_MOD: return handle_tlv_table_mod(ofconn, oh); case OFPTYPE_NXT_TLV_TABLE_REQUEST: return handle_tlv_table_request(ofconn, oh); case OFPTYPE_IPFIX_BRIDGE_STATS_REQUEST: return handle_ipfix_bridge_stats_request(ofconn, oh); case OFPTYPE_IPFIX_FLOW_STATS_REQUEST: return handle_ipfix_flow_stats_request(ofconn, oh); case OFPTYPE_CT_FLUSH_ZONE: return handle_nxt_ct_flush_zone(ofconn, oh); case OFPTYPE_HELLO: case OFPTYPE_ERROR: case OFPTYPE_FEATURES_REPLY: case OFPTYPE_GET_CONFIG_REPLY: case OFPTYPE_PACKET_IN: case OFPTYPE_FLOW_REMOVED: case OFPTYPE_PORT_STATUS: case OFPTYPE_BARRIER_REPLY: case OFPTYPE_QUEUE_GET_CONFIG_REPLY: case OFPTYPE_DESC_STATS_REPLY: case OFPTYPE_FLOW_STATS_REPLY: case OFPTYPE_QUEUE_STATS_REPLY: case OFPTYPE_PORT_STATS_REPLY: case OFPTYPE_TABLE_STATS_REPLY: case OFPTYPE_AGGREGATE_STATS_REPLY: case OFPTYPE_PORT_DESC_STATS_REPLY: case OFPTYPE_ROLE_REPLY: case OFPTYPE_FLOW_MONITOR_PAUSED: case OFPTYPE_FLOW_MONITOR_RESUMED: case OFPTYPE_FLOW_MONITOR_STATS_REPLY: case OFPTYPE_GET_ASYNC_REPLY: case OFPTYPE_GROUP_STATS_REPLY: case OFPTYPE_GROUP_DESC_STATS_REPLY: case OFPTYPE_GROUP_FEATURES_STATS_REPLY: case OFPTYPE_METER_STATS_REPLY: case OFPTYPE_METER_CONFIG_STATS_REPLY: case OFPTYPE_METER_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_DESC_REPLY: case OFPTYPE_ROLE_STATUS: case OFPTYPE_REQUESTFORWARD: case OFPTYPE_TABLE_STATUS: case OFPTYPE_NXT_TLV_TABLE_REPLY: case OFPTYPE_IPFIX_BRIDGE_STATS_REPLY: case OFPTYPE_IPFIX_FLOW_STATS_REPLY: default: if (ofpmsg_is_stat_request(oh)) { return OFPERR_OFPBRC_BAD_STAT; } else { return OFPERR_OFPBRC_BAD_TYPE; } } } static void handle_openflow(struct ofconn *ofconn, const struct ofpbuf *ofp_msg) OVS_EXCLUDED(ofproto_mutex) { enum ofperr error = handle_openflow__(ofconn, ofp_msg); if (error) { ofconn_send_error(ofconn, ofp_msg->data, error); } COVERAGE_INC(ofproto_recv_openflow); } static uint64_t pick_datapath_id(const struct ofproto *ofproto) { const struct ofport *port; port = ofproto_get_port(ofproto, OFPP_LOCAL); if (port) { struct eth_addr ea; int error; error = netdev_get_etheraddr(port->netdev, &ea); if (!error) { return eth_addr_to_uint64(ea); } VLOG_WARN("%s: could not get MAC address for %s (%s)", ofproto->name, netdev_get_name(port->netdev), ovs_strerror(error)); } return ofproto->fallback_dpid; } static uint64_t pick_fallback_dpid(void) { struct eth_addr ea; eth_addr_nicira_random(&ea); return eth_addr_to_uint64(ea); } /* Table overflow policy. */ /* Chooses and updates 'rulep' with a rule to evict from 'table'. Sets 'rulep' * to NULL if the table is not configured to evict rules or if the table * contains no evictable rules. (Rules with a readlock on their evict rwlock, * or with no timeouts are not evictable.) */ static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep) OVS_REQUIRES(ofproto_mutex) { struct eviction_group *evg; *rulep = NULL; if (!table->eviction) { return false; } /* In the common case, the outer and inner loops here will each be entered * exactly once: * * - The inner loop normally "return"s in its first iteration. If the * eviction group has any evictable rules, then it always returns in * some iteration. * * - The outer loop only iterates more than once if the largest eviction * group has no evictable rules. * * - The outer loop can exit only if table's 'max_flows' is all filled up * by unevictable rules. */ HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) { struct rule *rule; HEAP_FOR_EACH (rule, evg_node, &evg->rules) { *rulep = rule; return true; } } return false; } /* Eviction groups. */ /* Returns the priority to use for an eviction_group that contains 'n_rules' * rules. The priority contains low-order random bits to ensure that eviction * groups with the same number of rules are prioritized randomly. */ static uint32_t eviction_group_priority(size_t n_rules) { uint16_t size = MIN(UINT16_MAX, n_rules); return (size << 16) | random_uint16(); } /* Updates 'evg', an eviction_group within 'table', following a change that * adds or removes rules in 'evg'. */ static void eviction_group_resized(struct oftable *table, struct eviction_group *evg) OVS_REQUIRES(ofproto_mutex) { heap_change(&table->eviction_groups_by_size, &evg->size_node, eviction_group_priority(heap_count(&evg->rules))); } /* Destroys 'evg', an eviction_group within 'table': * * - Removes all the rules, if any, from 'evg'. (It doesn't destroy the * rules themselves, just removes them from the eviction group.) * * - Removes 'evg' from 'table'. * * - Frees 'evg'. */ static void eviction_group_destroy(struct oftable *table, struct eviction_group *evg) OVS_REQUIRES(ofproto_mutex) { while (!heap_is_empty(&evg->rules)) { struct rule *rule; rule = CONTAINER_OF(heap_pop(&evg->rules), struct rule, evg_node); rule->eviction_group = NULL; } hmap_remove(&table->eviction_groups_by_id, &evg->id_node); heap_remove(&table->eviction_groups_by_size, &evg->size_node); heap_destroy(&evg->rules); free(evg); } /* Removes 'rule' from its eviction group, if any. */ static void eviction_group_remove_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { if (rule->eviction_group) { struct oftable *table = &rule->ofproto->tables[rule->table_id]; struct eviction_group *evg = rule->eviction_group; rule->eviction_group = NULL; heap_remove(&evg->rules, &rule->evg_node); if (heap_is_empty(&evg->rules)) { eviction_group_destroy(table, evg); } else { eviction_group_resized(table, evg); } } } /* Hashes the 'rule''s values for the eviction_fields of 'rule''s table, and * returns the hash value. */ static uint32_t eviction_group_hash_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { struct oftable *table = &rule->ofproto->tables[rule->table_id]; const struct mf_subfield *sf; struct flow flow; uint32_t hash; hash = table->eviction_group_id_basis; miniflow_expand(rule->cr.match.flow, &flow); for (sf = table->eviction_fields; sf < &table->eviction_fields[table->n_eviction_fields]; sf++) { if (mf_are_prereqs_ok(sf->field, &flow, NULL)) { union mf_value value; mf_get_value(sf->field, &flow, &value); if (sf->ofs) { bitwise_zero(&value, sf->field->n_bytes, 0, sf->ofs); } if (sf->ofs + sf->n_bits < sf->field->n_bytes * 8) { unsigned int start = sf->ofs + sf->n_bits; bitwise_zero(&value, sf->field->n_bytes, start, sf->field->n_bytes * 8 - start); } hash = hash_bytes(&value, sf->field->n_bytes, hash); } else { hash = hash_int(hash, 0); } } return hash; } /* Returns an eviction group within 'table' with the given 'id', creating one * if necessary. */ static struct eviction_group * eviction_group_find(struct oftable *table, uint32_t id) OVS_REQUIRES(ofproto_mutex) { struct eviction_group *evg; HMAP_FOR_EACH_WITH_HASH (evg, id_node, id, &table->eviction_groups_by_id) { return evg; } evg = xmalloc(sizeof *evg); hmap_insert(&table->eviction_groups_by_id, &evg->id_node, id); heap_insert(&table->eviction_groups_by_size, &evg->size_node, eviction_group_priority(0)); heap_init(&evg->rules); return evg; } /* Returns an eviction priority for 'rule'. The return value should be * interpreted so that higher priorities make a rule a more attractive * candidate for eviction. */ static uint64_t rule_eviction_priority(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { /* Calculate absolute time when this flow will expire. If it will never * expire, then return 0 to make it unevictable. */ long long int expiration = LLONG_MAX; if (rule->hard_timeout) { /* 'modified' needs protection even when we hold 'ofproto_mutex'. */ ovs_mutex_lock(&rule->mutex); long long int modified = rule->modified; ovs_mutex_unlock(&rule->mutex); expiration = modified + rule->hard_timeout * 1000; } if (rule->idle_timeout) { uint64_t packets, bytes; long long int used; long long int idle_expiration; ofproto->ofproto_class->rule_get_stats(rule, &packets, &bytes, &used); idle_expiration = used + rule->idle_timeout * 1000; expiration = MIN(expiration, idle_expiration); } if (expiration == LLONG_MAX) { return 0; } /* Calculate the time of expiration as a number of (approximate) seconds * after program startup. * * This should work OK for program runs that last UINT32_MAX seconds or * less. Therefore, please restart OVS at least once every 136 years. */ uint32_t expiration_ofs = (expiration >> 10) - (time_boot_msec() >> 10); /* Combine expiration time with OpenFlow "importance" to form a single * priority value. We want flows with relatively low "importance" to be * evicted before even considering expiration time, so put "importance" in * the most significant bits and expiration time in the least significant * bits. * * Small 'priority' should be evicted before those with large 'priority'. * The caller expects the opposite convention (a large return value being * more attractive for eviction) so we invert it before returning. */ uint64_t priority = ((uint64_t) rule->importance << 32) + expiration_ofs; return UINT64_MAX - priority; } /* Adds 'rule' to an appropriate eviction group for its oftable's * configuration. Does nothing if 'rule''s oftable doesn't have eviction * enabled, or if 'rule' is a permanent rule (one that will never expire on its * own). * * The caller must ensure that 'rule' is not already in an eviction group. */ static void eviction_group_add_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex) { struct ofproto *ofproto = rule->ofproto; struct oftable *table = &ofproto->tables[rule->table_id]; bool has_timeout; /* Timeouts may be modified only when holding 'ofproto_mutex'. We have it * so no additional protection is needed. */ has_timeout = rule->hard_timeout || rule->idle_timeout; if (table->eviction && has_timeout) { struct eviction_group *evg; evg = eviction_group_find(table, eviction_group_hash_rule(rule)); rule->eviction_group = evg; heap_insert(&evg->rules, &rule->evg_node, rule_eviction_priority(ofproto, rule)); eviction_group_resized(table, evg); } } /* oftables. */ /* Initializes 'table'. */ static void oftable_init(struct oftable *table) { memset(table, 0, sizeof *table); classifier_init(&table->cls, flow_segment_u64s); table->max_flows = UINT_MAX; table->n_flows = 0; hmap_init(&table->eviction_groups_by_id); heap_init(&table->eviction_groups_by_size); atomic_init(&table->miss_config, OFPUTIL_TABLE_MISS_DEFAULT); classifier_set_prefix_fields(&table->cls, default_prefix_fields, ARRAY_SIZE(default_prefix_fields)); atomic_init(&table->n_matched, 0); atomic_init(&table->n_missed, 0); } /* Destroys 'table', including its classifier and eviction groups. * * The caller is responsible for freeing 'table' itself. */ static void oftable_destroy(struct oftable *table) { ovs_assert(classifier_is_empty(&table->cls)); ovs_mutex_lock(&ofproto_mutex); oftable_configure_eviction(table, 0, NULL, 0); ovs_mutex_unlock(&ofproto_mutex); hmap_destroy(&table->eviction_groups_by_id); heap_destroy(&table->eviction_groups_by_size); classifier_destroy(&table->cls); free(table->name); } /* Changes the name of 'table' to 'name'. If 'name' is NULL or the empty * string, then 'table' will use its default name. * * This only affects the name exposed for a table exposed through the OpenFlow * OFPST_TABLE (as printed by "ovs-ofctl dump-tables"). */ static void oftable_set_name(struct oftable *table, const char *name) { if (name && name[0]) { int len = strnlen(name, OFP_MAX_TABLE_NAME_LEN); if (!table->name || strncmp(name, table->name, len)) { free(table->name); table->name = xmemdup0(name, len); } } else { free(table->name); table->name = NULL; } } /* oftables support a choice of two policies when adding a rule would cause the * number of flows in the table to exceed the configured maximum number: either * they can refuse to add the new flow or they can evict some existing flow. * This function configures the latter policy on 'table', with fairness based * on the values of the 'n_fields' fields specified in 'fields'. (Specifying * 'n_fields' as 0 disables fairness.) */ static void oftable_configure_eviction(struct oftable *table, unsigned int eviction, const struct mf_subfield *fields, size_t n_fields) OVS_REQUIRES(ofproto_mutex) { struct rule *rule; if ((table->eviction != 0) == (eviction != 0) && n_fields == table->n_eviction_fields && (!n_fields || !memcmp(fields, table->eviction_fields, n_fields * sizeof *fields))) { /* The set of eviction fields did not change. If 'eviction' changed, * it remains nonzero, so that we can just update table->eviction * without fussing with the eviction groups. */ table->eviction = eviction; return; } /* Destroy existing eviction groups, then destroy and recreate data * structures to recover memory. */ struct eviction_group *evg, *next; HMAP_FOR_EACH_SAFE (evg, next, id_node, &table->eviction_groups_by_id) { eviction_group_destroy(table, evg); } hmap_destroy(&table->eviction_groups_by_id); hmap_init(&table->eviction_groups_by_id); heap_destroy(&table->eviction_groups_by_size); heap_init(&table->eviction_groups_by_size); /* Replace eviction groups by the new ones, if there is a change. Free the * old fields only after allocating the new ones, because 'fields == * table->eviction_fields' is possible. */ struct mf_subfield *old_fields = table->eviction_fields; table->n_eviction_fields = n_fields; table->eviction_fields = (fields ? xmemdup(fields, n_fields * sizeof *fields) : NULL); free(old_fields); /* Add the new eviction groups, if enabled. */ table->eviction = eviction; if (table->eviction) { table->eviction_group_id_basis = random_uint32(); CLS_FOR_EACH (rule, cr, &table->cls) { eviction_group_add_rule(rule); } } } /* Inserts 'rule' from the ofproto data structures BEFORE caller has inserted * it to the classifier. */ static void ofproto_rule_insert__(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { const struct rule_actions *actions = rule_get_actions(rule); /* A rule may not be reinserted. */ ovs_assert(rule->state != RULE_INSERTED); if (rule->hard_timeout || rule->idle_timeout) { ovs_list_insert(&ofproto->expirable, &rule->expirable); } cookies_insert(ofproto, rule); eviction_group_add_rule(rule); if (actions->has_meter) { meter_insert_rule(rule); } if (actions->has_groups) { const struct ofpact_group *a; OFPACT_FOR_EACH_TYPE_FLATTENED (a, GROUP, actions->ofpacts, actions->ofpacts_len) { struct ofgroup *group; group = ofproto_group_lookup(ofproto, a->group_id, OVS_VERSION_MAX, false); ovs_assert(group != NULL); group_add_rule(group, rule); } } rule->state = RULE_INSERTED; } /* Removes 'rule' from the ofproto data structures. Caller may have deferred * the removal from the classifier. */ static void ofproto_rule_remove__(struct ofproto *ofproto, struct rule *rule) OVS_REQUIRES(ofproto_mutex) { ovs_assert(rule->state == RULE_INSERTED); cookies_remove(ofproto, rule); eviction_group_remove_rule(rule); if (!ovs_list_is_empty(&rule->expirable)) { ovs_list_remove(&rule->expirable); } if (!ovs_list_is_empty(&rule->meter_list_node)) { ovs_list_remove(&rule->meter_list_node); ovs_list_init(&rule->meter_list_node); } /* Remove the rule from any groups, except from the group that is being * deleted, if any. */ const struct rule_actions *actions = rule_get_actions(rule); if (actions->has_groups) { const struct ofpact_group *a; OFPACT_FOR_EACH_TYPE_FLATTENED(a, GROUP, actions->ofpacts, actions->ofpacts_len) { struct ofgroup *group; group = ofproto_group_lookup(ofproto, a->group_id, OVS_VERSION_MAX, false); ovs_assert(group); /* Leave the rule for the group that is being deleted, if any, * as we still need the list of rules for clean-up. */ if (!group->being_deleted) { group_remove_rule(group, rule); } } } rule->state = RULE_REMOVED; } /* unixctl commands. */ struct ofproto * ofproto_lookup(const char *name) { struct ofproto *ofproto; HMAP_FOR_EACH_WITH_HASH (ofproto, hmap_node, hash_string(name, 0), &all_ofprotos) { if (!strcmp(ofproto->name, name)) { return ofproto; } } return NULL; } static void ofproto_unixctl_list(struct unixctl_conn *conn, int argc OVS_UNUSED, const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED) { struct ofproto *ofproto; struct ds results; ds_init(&results); HMAP_FOR_EACH (ofproto, hmap_node, &all_ofprotos) { ds_put_format(&results, "%s\n", ofproto->name); } unixctl_command_reply(conn, ds_cstr(&results)); ds_destroy(&results); } static void ofproto_unixctl_init(void) { static bool registered; if (registered) { return; } registered = true; unixctl_command_register("ofproto/list", "", 0, 0, ofproto_unixctl_list, NULL); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_390_0
crossvul-cpp_data_bad_3398_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/delegate.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" #include "magick/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireMagickMemory(sizeof(*image)); if (image == (Image *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; image->blur=1.0; InitializeExceptionInfo(&image->exception); (void) QueryColorDatabase(BackgroundColor,&image->background_color, &image->exception); (void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception); (void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception); (void) QueryColorDatabase(TransparentColor,&image->transparent_color, &image->exception); GetTimerInfo(&image->timer); image->ping=MagickFalse; image->cache=AcquirePixelCache(0); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=time((time_t *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AllocateSemaphoreInfo(); image->signature=MagickSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); (void) SyncImageSettings(image_info,image); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, matte, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); matte=images->matte; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass) == MagickFalse) { InheritException(exception,&append_image->exception); append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace); append_image->depth=depth; append_image->matte=matte; append_image->page=images->page; (void) SetImageBackgroundColor(append_image); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict append_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); append_indexes=GetCacheViewAuthenticIndexQueue(append_view); for (x=0; x < (ssize_t) next->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (next->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if ((next->colorspace == CMYKColorspace) && (append_image->colorspace == CMYKColorspace)) SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x)); p++; q++; } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); GetImageException(image,exception); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipImage(Image *image) { return(ClipImagePath(image,"#1",MagickTrue)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(&image->exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask); if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageClipMask(image,clip_mask); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image)); if (clone_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickSignature; clone_image->storage_class=image->storage_class; clone_image->channels=image->channels; clone_image->colorspace=image->colorspace; clone_image->matte=image->matte; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; if (image->colormap != (PixelPacket *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelPacket *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) CopyMagickMemory(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); InitializeExceptionInfo(&clone_image->exception); InheritException(&clone_image->exception,&image->exception); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); clone_image->clip_mask=NewImageList(); clone_image->mask=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AllocateSemaphoreInfo(); if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } if ((columns == image->columns) && (rows == image->rows)) { if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows) == MagickFalse) { InheritException(exception,&clone_image->exception); clone_image=DestroyImage(clone_image); } return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; (void) CloneString(&clone_info->size,image_info->size); (void) CloneString(&clone_info->extract,image_info->extract); (void) CloneString(&clone_info->scenes,image_info->scenes); (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; (void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor); (void) CloneString(&clone_info->server_name,image_info->server_name); (void) CloneString(&clone_info->font,image_info->font); (void) CloneString(&clone_info->texture,image_info->texture); (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->pen=image_info->pen; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colors=image_info->colors; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; (void) CloneString(&clone_info->view,image_info->view); (void) CloneString(&clone_info->authenticate,image_info->authenticate); (void) CloneImageOptions(clone_info,image_info); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->virtual_pixel_method=image_info->virtual_pixel_method; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->subimage=image_info->scene; /* deprecated */ clone_info->subrange=image_info->number_scenes; /* deprecated */ clone_info->channel=image_info->channel; clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return the highest severity exception. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) geometry->width; x++) { *q=(*p); if (image->colorspace == CMYKColorspace) indexes[x]=source_indexes[x]; p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CopyImagePixels) #endif proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); source_view=DestroyCacheView(source_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelPacket *) NULL) image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); (void) ClearExceptionInfo(&image->exception,MagickTrue); if (image->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&image->semaphore); image->signature=(~MagickSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->authenticate != (char *) NULL) image_info->authenticate=DestroyString( image_info->authenticate); DestroyImageOptions(image_info); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); image_info->signature=(~MagickSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClipMask() returns the clip path associated with the image. % % The format of the GetImageClipMask method is: % % Image *GetImageClipMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageClipMask(const Image *image, ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->clip_mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->clip_mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageException() traverses an image sequence and returns any % error more severe than noted by the exception parameter. % % The format of the GetImageException method is: % % void GetImageException(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to a list of one or more images. % % o exception: return the highest severity exception. % */ MagickExport void GetImageException(Image *image,ExceptionInfo *exception) { register Image *next; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->exception.severity == UndefinedException) continue; if (next->exception.severity > exception->severity) InheritException(exception,&next->exception); next->exception.severity=UndefinedException; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) ResetMagickMemory(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorDatabase(BackgroundColor,&image_info->background_color, exception); (void) QueryColorDatabase(BorderColor,&image_info->border_color,exception); (void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception); (void) QueryColorDatabase(TransparentColor,&image_info->transparent_color, exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannels() returns the number of pixel channels associated with the % specified image. % % The format of the GetChannels method is: % % size_t GetImageChannels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport size_t GetImageChannels(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(image->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename) { char *q; int c; MagickBooleanType canonical; register const char *p; size_t length; canonical=MagickFalse; length=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } if (*q == '0') { ssize_t value; value=(ssize_t) strtol(q,&q,10); (void) value; } switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent- (p-format)),p,value); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; #if 0 /* FUTURE: remove this code. -- Anthony 29 Arpil 2012 Removed as GetMagickProperty() will will never match a "filename:" string as this is not a 'known' image property. */ if ((image_info != (const ImageInfo *) NULL) && (image != (const Image *) NULL)) value=GetMagickProperty(image_info,image,pattern); else #endif if (image != (Image *) NULL) value=GetImageProperty(image,pattern); if ((value == (const char *) NULL) && (image != (Image *) NULL)) value=GetImageArtifact(image,pattern); if ((value == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-length),value,(size_t) (MaxTextExtent-(p-format-length))); length+=strlen(pattern)-1; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((pixel.red < 0.0) || (pixel.red > QuantumRange) || (pixel.red != (QuantumAny) pixel.red)) break; if ((pixel.green < 0.0) || (pixel.green > QuantumRange) || (pixel.green != (QuantumAny) pixel.green)) break; if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) || (pixel.blue != (QuantumAny) pixel.blue)) break; if (pixel.matte != MagickFalse) { if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) || (pixel.opacity != (QuantumAny) pixel.opacity)) break; } if (pixel.colorspace == CMYKColorspace) { if ((pixel.index < 0.0) || (pixel.index > QuantumRange) || (pixel.index != (QuantumAny) pixel.index)) break; } p++; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const MagickPixelPacket *background) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const MagickPixelPacket *background) { CacheView *image_view; ExceptionInfo *exception; Image *image; ssize_t y; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickSignature); assert(background != (const MagickPixelPacket *) NULL); image=AcquireImage(image_info); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->matte=background->matte; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,background,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; pixel.opacity=OpaqueOpacity; SetPixelPacket(image,&background,&pixel,&index); /* Set image background color. */ status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) *q++=pixel; if (image->colorspace == CMYKColorspace) { register IndexPacket *magick_restrict indexes; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannels() sets the number of pixels channels associated with the % image. % % The format of the SetImageChannels method is: % % MagickBooleanType SetImageChannels(Image *image,const size_t channels) % % A description of each parameter follows: % % o image: the image. % % o channels: The number of pixel channels. % */ MagickExport MagickBooleanType SetImageChannels(Image *image, const size_t channels) { image->channels=channels; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image, % const MagickPixelPacket *color) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const MagickPixelPacket *color) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); assert(color != (const MagickPixelPacket *) NULL); image->colorspace=color->colorspace; image->matte=color->matte; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,color,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageClipMask() associates a clip path with the image. The clip path % must be the same dimensions as the image. Set any pixel component of % the clip path to TransparentOpacity to prevent that corresponding image % pixel component from being updated when SyncAuthenticPixels() is applied. % % The format of the SetImageClipMask method is: % % MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask) % % A description of each parameter follows: % % o image: the image. % % o clip_mask: the image clip path. % */ MagickExport MagickBooleanType SetImageClipMask(Image *image, const Image *clip_mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (clip_mask != (const Image *) NULL) if ((clip_mask->columns != image->columns) || (clip_mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); image->clip_mask=NewImageList(); if (clip_mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception); if (image->clip_mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth > (8*sizeof(MagickSizeType))) ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename); return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the `magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, `ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: `image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; image_info->subimage=image_info->scene; image_info->subrange=image_info->number_scenes; } } *extension='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"svgz") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if (*extension != '\0') { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); magick_info=GetMagickInfo(magic,sans_exception); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy standard input or pipe to temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) ResetMagickMemory(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (mask != (const Image *) NULL) if ((mask->columns != image->columns) || (mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=NewImageList(); if (mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception); if (image->mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e O p a c i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageOpacity() sets the opacity levels of the image. % % The format of the SetImageOpacity method is: % % MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageOpacity(Image *image, const Quantum opacity) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); image->matte=MagickTrue; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelOpacity(q,opacity); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const PixelPacket *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const PixelPacket *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" CacheView *smush_view; const Image *image; Image *smush_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse) { InheritException(exception,&smush_image->exception); smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->matte=matte; (void) SetImageBackgroundColor(smush_image); status=MagickTrue; x_offset=0; y_offset=0; smush_view=AcquireVirtualCacheView(smush_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; smush_view=DestroyCacheView(smush_view); if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType StripImage(Image *image) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,cHRM,EXIF,gAMA,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline IndexPacket PushColormapIndex(Image *image, const size_t index,MagickBooleanType *range_exception) { if (index < image->colors) return((IndexPacket) index); *range_exception=MagickTrue; return((IndexPacket) 0); } MagickExport MagickBooleanType SyncImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelPacket *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(range_exception,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x), &range_exception); if (image->matte == MagickFalse) SetPixelRgb(q,image->colormap+(ssize_t) index) else SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs image_info options into per-image attributes. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image) { char property[MaxTextExtent]; const char *option, *value; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->background_color, &image->exception); option=GetImageOption(image_info,"bias"); if (option != (const char *) NULL) image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->border_color,&image->exception); option=GetImageOption(image_info,"colors"); if (option != (const char *) NULL) image->colors=StringToUnsignedLong(option); option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; /* Set image density. */ flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(InterpolatePixelMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->matte_color,&image->exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->transparent_color, &image->exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); else units = image_info->units; if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->x_resolution=(double) ((size_t) (100.0*2.54* image->x_resolution+0.5))/100.0; image->y_resolution=(double) ((size_t) (100.0*2.54* image->y_resolution+0.5))/100.0; } break; } default: break; } image->units=units; } option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_3398_0
crossvul-cpp_data_good_3397_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M PPPP CCCC % % MM MM P P C % % M M M PPPP C % % M M P C % % M M P CCCC % % % % % % Read/Write Magick Persistant Cache Image Format % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/constitute.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" #include "MagickCore/version-private.h" /* Forward declarations. */ static MagickBooleanType WriteMPCImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M P C % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMPC() returns MagickTrue if the image format type, identified by the % magick string, is an Magick Persistent Cache image. % % The format of the IsMPC method is: % % MagickBooleanType IsMPC(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsMPC(const unsigned char *magick,const size_t length) { if (length < 14) return(MagickFalse); if (LocaleNCompare((const char *) magick,"id=MagickCache",14) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d C A C H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadMPCImage() reads an Magick Persistent Cache image file and returns % it. It allocates the memory necessary for the new Image structure and % returns a pointer to the new image. % % The format of the ReadMPCImage method is: % % Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception) % % Decompression code contributed by Kyle Shorter. % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception) { char cache_filename[MagickPathExtent], id[MagickPathExtent], keyword[MagickPathExtent], *options; const unsigned char *p; GeometryInfo geometry_info; Image *image; int c; LinkedListInfo *profiles; MagickBooleanType status; MagickOffsetType offset; MagickStatusType flags; register ssize_t i; size_t depth, length; ssize_t count; StringInfo *profile; unsigned int signature; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) CopyMagickString(cache_filename,image->filename,MagickPathExtent); AppendImageFormat("cache",cache_filename); c=ReadBlobByte(image); if (c == EOF) { image=DestroyImage(image); return((Image *) NULL); } *id='\0'; (void) ResetMagickMemory(keyword,0,sizeof(keyword)); offset=0; do { /* Decode image header; header terminates one character beyond a ':'. */ profiles=(LinkedListInfo *) NULL; length=MagickPathExtent; options=AcquireString((char *) NULL); signature=GetMagickSignature((const StringInfo *) NULL); image->depth=8; image->compression=NoCompression; while ((isgraph(c) != MagickFalse) && (c != (int) ':')) { register char *p; if (c == (int) '{') { char *comment; /* Read comment-- any text between { }. */ length=MagickPathExtent; comment=AcquireString((char *) NULL); for (p=comment; comment != (char *) NULL; p++) { c=ReadBlobByte(image); if (c == (int) '\\') c=ReadBlobByte(image); else if ((c == EOF) || (c == (int) '}')) break; if ((size_t) (p-comment+1) >= length) { *p='\0'; length<<=1; comment=(char *) ResizeQuantumMemory(comment,length+ MagickPathExtent,sizeof(*comment)); if (comment == (char *) NULL) break; p=comment+strlen(comment); } *p=(char) c; } if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); *p='\0'; (void) SetImageProperty(image,"comment",comment,exception); comment=DestroyString(comment); c=ReadBlobByte(image); } else if (isalnum(c) != MagickFalse) { /* Get the keyword. */ length=MagickPathExtent; p=keyword; do { if (c == (int) '=') break; if ((size_t) (p-keyword) < (MagickPathExtent-1)) *p++=(char) c; c=ReadBlobByte(image); } while (c != EOF); *p='\0'; p=options; while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); if (c == (int) '=') { /* Get the keyword value. */ c=ReadBlobByte(image); while ((c != (int) '}') && (c != EOF)) { if ((size_t) (p-options+1) >= length) { *p='\0'; length<<=1; options=(char *) ResizeQuantumMemory(options,length+ MagickPathExtent,sizeof(*options)); if (options == (char *) NULL) break; p=options+strlen(options); } *p++=(char) c; c=ReadBlobByte(image); if (c == '\\') { c=ReadBlobByte(image); if (c == (int) '}') { *p++=(char) c; c=ReadBlobByte(image); } } if (*options != '{') if (isspace((int) ((unsigned char) c)) != 0) break; } if (options == (char *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } *p='\0'; if (*options == '{') (void) CopyMagickString(options,options+1,strlen(options)); /* Assign a value to the specified keyword. */ switch (*keyword) { case 'a': case 'A': { if (LocaleCompare(keyword,"alpha-trait") == 0) { ssize_t alpha_trait; alpha_trait=ParseCommandOption(MagickPixelTraitOptions, MagickFalse,options); if (alpha_trait < 0) break; image->alpha_trait=(PixelTrait) alpha_trait; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'b': case 'B': { if (LocaleCompare(keyword,"background-color") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->background_color,exception); break; } if (LocaleCompare(keyword,"blue-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y= image->chromaticity.blue_primary.x; break; } if (LocaleCompare(keyword,"border-color") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->border_color,exception); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'c': case 'C': { if (LocaleCompare(keyword,"class") == 0) { ssize_t storage_class; storage_class=ParseCommandOption(MagickClassOptions, MagickFalse,options); if (storage_class < 0) break; image->storage_class=(ClassType) storage_class; break; } if (LocaleCompare(keyword,"colors") == 0) { image->colors=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"colorspace") == 0) { ssize_t colorspace; colorspace=ParseCommandOption(MagickColorspaceOptions, MagickFalse,options); if (colorspace < 0) break; image->colorspace=(ColorspaceType) colorspace; break; } if (LocaleCompare(keyword,"compression") == 0) { ssize_t compression; compression=ParseCommandOption(MagickCompressOptions, MagickFalse,options); if (compression < 0) break; image->compression=(CompressionType) compression; break; } if (LocaleCompare(keyword,"columns") == 0) { image->columns=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'd': case 'D': { if (LocaleCompare(keyword,"delay") == 0) { image->delay=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"depth") == 0) { image->depth=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"dispose") == 0) { ssize_t dispose; dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, options); if (dispose < 0) break; image->dispose=(DisposeType) dispose; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'e': case 'E': { if (LocaleCompare(keyword,"endian") == 0) { ssize_t endian; endian=ParseCommandOption(MagickEndianOptions,MagickFalse, options); if (endian < 0) break; image->endian=(EndianType) endian; break; } if (LocaleCompare(keyword,"error") == 0) { image->error.mean_error_per_pixel=StringToDouble(options, (char **) NULL); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'g': case 'G': { if (LocaleCompare(keyword,"gamma") == 0) { image->gamma=StringToDouble(options,(char **) NULL); break; } if (LocaleCompare(keyword,"green-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y= image->chromaticity.green_primary.x; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'i': case 'I': { if (LocaleCompare(keyword,"id") == 0) { (void) CopyMagickString(id,options,MagickPathExtent); break; } if (LocaleCompare(keyword,"iterations") == 0) { image->iterations=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'm': case 'M': { if (LocaleCompare(keyword,"magick-signature") == 0) { signature=(unsigned int) StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"mattecolor") == 0) { (void) QueryColorCompliance(options,AllCompliance, &image->matte_color,exception); break; } if (LocaleCompare(keyword,"maximum-error") == 0) { image->error.normalized_maximum_error=StringToDouble( options,(char **) NULL); break; } if (LocaleCompare(keyword,"mean-error") == 0) { image->error.normalized_mean_error=StringToDouble(options, (char **) NULL); break; } if (LocaleCompare(keyword,"montage") == 0) { (void) CloneString(&image->montage,options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'n': case 'N': { if (LocaleCompare(keyword,"number-channels") == 0) { image->number_channels=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"number-meta-channels") == 0) { image->number_meta_channels=StringToUnsignedLong(options); if (image->number_meta_channels > MaxPixelChannels) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); break; } break; } case 'o': case 'O': { if (LocaleCompare(keyword,"orientation") == 0) { ssize_t orientation; orientation=ParseCommandOption(MagickOrientationOptions, MagickFalse,options); if (orientation < 0) break; image->orientation=(OrientationType) orientation; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'p': case 'P': { if (LocaleCompare(keyword,"page") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); break; } if (LocaleCompare(keyword,"pixel-intensity") == 0) { ssize_t intensity; intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,options); if (intensity < 0) break; image->intensity=(PixelIntensityMethod) intensity; break; } if ((LocaleNCompare(keyword,"profile:",8) == 0) || (LocaleNCompare(keyword,"profile-",8) == 0)) { if (profiles == (LinkedListInfo *) NULL) profiles=NewLinkedList(0); (void) AppendValueToLinkedList(profiles, AcquireString(keyword+8)); profile=BlobToStringInfo((const void *) NULL,(size_t) StringToLong(options)); if (profile == (StringInfo *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); (void) SetImageProfile(image,keyword+8,profile,exception); profile=DestroyStringInfo(profile); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'q': case 'Q': { if (LocaleCompare(keyword,"quality") == 0) { image->quality=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'r': case 'R': { if (LocaleCompare(keyword,"red-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; if ((flags & SigmaValue) != 0) image->chromaticity.red_primary.y=geometry_info.sigma; break; } if (LocaleCompare(keyword,"rendering-intent") == 0) { ssize_t rendering_intent; rendering_intent=ParseCommandOption(MagickIntentOptions, MagickFalse,options); if (rendering_intent < 0) break; image->rendering_intent=(RenderingIntent) rendering_intent; break; } if (LocaleCompare(keyword,"resolution") == 0) { flags=ParseGeometry(options,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; break; } if (LocaleCompare(keyword,"rows") == 0) { image->rows=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 's': case 'S': { if (LocaleCompare(keyword,"scene") == 0) { image->scene=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 't': case 'T': { if (LocaleCompare(keyword,"ticks-per-second") == 0) { image->ticks_per_second=(ssize_t) StringToLong(options); break; } if (LocaleCompare(keyword,"tile-offset") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } if (LocaleCompare(keyword,"type") == 0) { ssize_t type; type=ParseCommandOption(MagickTypeOptions,MagickFalse, options); if (type < 0) break; image->type=(ImageType) type; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'u': case 'U': { if (LocaleCompare(keyword,"units") == 0) { ssize_t units; units=ParseCommandOption(MagickResolutionOptions, MagickFalse,options); if (units < 0) break; image->units=(ResolutionType) units; break; } (void) SetImageProperty(image,keyword,options,exception); break; } case 'w': case 'W': { if (LocaleCompare(keyword,"white-point") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y= image->chromaticity.white_point.x; break; } (void) SetImageProperty(image,keyword,options,exception); break; } default: { (void) SetImageProperty(image,keyword,options,exception); break; } } } else c=ReadBlobByte(image); while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); } options=DestroyString(options); (void) ReadBlobByte(image); /* Verify that required image information is defined. */ if ((LocaleCompare(id,"MagickCache") != 0) || (image->storage_class == UndefinedClass) || (image->compression == UndefinedCompression) || (image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (signature != GetMagickSignature((const StringInfo *) NULL)) ThrowReaderException(CacheError,"IncompatibleAPI"); if (image->montage != (char *) NULL) { register char *p; /* Image directory. */ length=MagickPathExtent; image->directory=AcquireString((char *) NULL); p=image->directory; do { *p='\0'; if ((strlen(image->directory)+MagickPathExtent) >= length) { /* Allocate more memory for the image directory. */ length<<=1; image->directory=(char *) ResizeQuantumMemory(image->directory, length+MagickPathExtent,sizeof(*image->directory)); if (image->directory == (char *) NULL) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); p=image->directory+strlen(image->directory); } c=ReadBlobByte(image); *p++=(char) c; } while (c != (int) '\0'); } if (profiles != (LinkedListInfo *) NULL) { const char *name; const StringInfo *profile; register unsigned char *p; /* Read image profiles. */ ResetLinkedListIterator(profiles); name=(const char *) GetNextValueInLinkedList(profiles); while (name != (const char *) NULL) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { p=GetStringInfoDatum(profile); count=ReadBlob(image,GetStringInfoLength(profile),p); } name=(const char *) GetNextValueInLinkedList(profiles); } profiles=DestroyLinkedList(profiles,RelinquishMagickMemory); } depth=GetImageQuantumDepth(image,MagickFalse); if (image->storage_class == PseudoClass) { /* Create image colormap. */ image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1, sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->colors != 0) { size_t packet_size; unsigned char *colormap; /* Read image colormap from file. */ packet_size=(size_t) (3UL*depth/8UL); colormap=(unsigned char *) AcquireQuantumMemory(image->colors, packet_size*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,packet_size*image->colors,colormap); if (count != (ssize_t) (packet_size*image->colors)) { colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } p=colormap; switch (depth) { default: colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowReaderException(CorruptImageError, "ImageDepthNotSupported"); case 8: { unsigned char pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushCharPixel(p,&pixel); image->colormap[i].red=ScaleCharToQuantum(pixel); p=PushCharPixel(p,&pixel); image->colormap[i].green=ScaleCharToQuantum(pixel); p=PushCharPixel(p,&pixel); image->colormap[i].blue=ScaleCharToQuantum(pixel); } break; } case 16: { unsigned short pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].red=ScaleShortToQuantum(pixel); p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].green=ScaleShortToQuantum(pixel); p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].blue=ScaleShortToQuantum(pixel); } break; } case 32: { unsigned int pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].red=ScaleLongToQuantum(pixel); p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].green=ScaleLongToQuantum(pixel); p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].blue=ScaleLongToQuantum(pixel); } break; } } colormap=(unsigned char *) RelinquishMagickMemory(colormap); } } if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit"); /* Attach persistent pixel cache. */ status=PersistPixelCache(image,cache_filename,MagickTrue,&offset,exception); if (status == MagickFalse) ThrowReaderException(CacheError,"UnableToPersistPixelCache"); /* Proceed to next image. */ do { c=ReadBlobByte(image); } while ((isgraph(c) == MagickFalse) && (c != EOF)); if (c != EOF) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (c != EOF); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterMPCImage() adds properties for the Cache image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterMPCImage method is: % % size_t RegisterMPCImage(void) % */ ModuleExport size_t RegisterMPCImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("MPC","CACHE", "Magick Persistent Cache image format"); entry->flags|=CoderStealthFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("MPC","MPC","Magick Persistent Cache image format"); entry->decoder=(DecodeImageHandler *) ReadMPCImage; entry->encoder=(EncodeImageHandler *) WriteMPCImage; entry->magick=(IsImageFormatHandler *) IsMPC; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterMPCImage() removes format registrations made by the % MPC module from the list of supported formats. % % The format of the UnregisterMPCImage method is: % % UnregisterMPCImage(void) % */ ModuleExport void UnregisterMPCImage(void) { (void) UnregisterMagickInfo("CACHE"); (void) UnregisterMagickInfo("MPC"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteMPCImage() writes an Magick Persistent Cache image to a file. % % The format of the WriteMPCImage method is: % % MagickBooleanType WriteMPCImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteMPCImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { char buffer[MagickPathExtent], cache_filename[MagickPathExtent]; const char *property, *value; MagickBooleanType status; MagickOffsetType offset, scene; register ssize_t i; size_t depth; /* Open persistent cache. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) CopyMagickString(cache_filename,image->filename,MagickPathExtent); AppendImageFormat("cache",cache_filename); scene=0; offset=0; do { /* Write persistent cache meta-information. */ depth=GetImageQuantumDepth(image,MagickTrue); if ((image->storage_class == PseudoClass) && (image->colors > (size_t) (GetQuantumRange(image->depth)+1))) (void) SetImageStorageClass(image,DirectClass,exception); (void) WriteBlobString(image,"id=MagickCache\n"); (void) FormatLocaleString(buffer,MagickPathExtent,"magick-signature=%u\n", GetMagickSignature((const StringInfo *) NULL)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "class=%s colors=%.20g alpha-trait=%s\n",CommandOptionToMnemonic( MagickClassOptions,image->storage_class),(double) image->colors, CommandOptionToMnemonic(MagickPixelTraitOptions,(ssize_t) image->alpha_trait)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "number-channels=%.20g number-meta-channels=%.20g\n", (double) image->number_channels,(double) image->number_meta_channels); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "columns=%.20g rows=%.20g depth=%.20g\n",(double) image->columns, (double) image->rows,(double) image->depth); (void) WriteBlobString(image,buffer); if (image->type != UndefinedType) { (void) FormatLocaleString(buffer,MagickPathExtent,"type=%s\n", CommandOptionToMnemonic(MagickTypeOptions,image->type)); (void) WriteBlobString(image,buffer); } (void) FormatLocaleString(buffer,MagickPathExtent,"colorspace=%s\n", CommandOptionToMnemonic(MagickColorspaceOptions,image->colorspace)); (void) WriteBlobString(image,buffer); if (image->intensity != UndefinedPixelIntensityMethod) { (void) FormatLocaleString(buffer,MagickPathExtent, "pixel-intensity=%s\n",CommandOptionToMnemonic( MagickPixelIntensityOptions,image->intensity)); (void) WriteBlobString(image,buffer); } if (image->endian != UndefinedEndian) { (void) FormatLocaleString(buffer,MagickPathExtent,"endian=%s\n", CommandOptionToMnemonic(MagickEndianOptions,image->endian)); (void) WriteBlobString(image,buffer); } if (image->compression != UndefinedCompression) { (void) FormatLocaleString(buffer,MagickPathExtent, "compression=%s quality=%.20g\n",CommandOptionToMnemonic( MagickCompressOptions,image->compression),(double) image->quality); (void) WriteBlobString(image,buffer); } if (image->units != UndefinedResolution) { (void) FormatLocaleString(buffer,MagickPathExtent,"units=%s\n", CommandOptionToMnemonic(MagickResolutionOptions,image->units)); (void) WriteBlobString(image,buffer); } if ((image->resolution.x != 0) || (image->resolution.y != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent, "resolution=%gx%g\n",image->resolution.x,image->resolution.y); (void) WriteBlobString(image,buffer); } if ((image->page.width != 0) || (image->page.height != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent, "page=%.20gx%.20g%+.20g%+.20g\n",(double) image->page.width,(double) image->page.height,(double) image->page.x,(double) image->page.y); (void) WriteBlobString(image,buffer); } else if ((image->page.x != 0) || (image->page.y != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent,"page=%+ld%+ld\n", (long) image->page.x,(long) image->page.y); (void) WriteBlobString(image,buffer); } if ((image->tile_offset.x != 0) || (image->tile_offset.y != 0)) { (void) FormatLocaleString(buffer,MagickPathExtent, "tile-offset=%+ld%+ld\n",(long) image->tile_offset.x,(long) image->tile_offset.y); (void) WriteBlobString(image,buffer); } if ((GetNextImageInList(image) != (Image *) NULL) || (GetPreviousImageInList(image) != (Image *) NULL)) { if (image->scene == 0) (void) FormatLocaleString(buffer,MagickPathExtent, "iterations=%.20g delay=%.20g ticks-per-second=%.20g\n",(double) image->iterations,(double) image->delay,(double) image->ticks_per_second); else (void) FormatLocaleString(buffer,MagickPathExtent,"scene=%.20g " "iterations=%.20g delay=%.20g ticks-per-second=%.20g\n", (double) image->scene,(double) image->iterations,(double) image->delay,(double) image->ticks_per_second); (void) WriteBlobString(image,buffer); } else { if (image->scene != 0) { (void) FormatLocaleString(buffer,MagickPathExtent,"scene=%.20g\n", (double) image->scene); (void) WriteBlobString(image,buffer); } if (image->iterations != 0) { (void) FormatLocaleString(buffer,MagickPathExtent, "iterations=%.20g\n",(double) image->iterations); (void) WriteBlobString(image,buffer); } if (image->delay != 0) { (void) FormatLocaleString(buffer,MagickPathExtent,"delay=%.20g\n", (double) image->delay); (void) WriteBlobString(image,buffer); } if (image->ticks_per_second != UndefinedTicksPerSecond) { (void) FormatLocaleString(buffer,MagickPathExtent, "ticks-per-second=%.20g\n",(double) image->ticks_per_second); (void) WriteBlobString(image,buffer); } } if (image->gravity != UndefinedGravity) { (void) FormatLocaleString(buffer,MagickPathExtent,"gravity=%s\n", CommandOptionToMnemonic(MagickGravityOptions,image->gravity)); (void) WriteBlobString(image,buffer); } if (image->dispose != UndefinedDispose) { (void) FormatLocaleString(buffer,MagickPathExtent,"dispose=%s\n", CommandOptionToMnemonic(MagickDisposeOptions,image->dispose)); (void) WriteBlobString(image,buffer); } if (image->rendering_intent != UndefinedIntent) { (void) FormatLocaleString(buffer,MagickPathExtent, "rendering-intent=%s\n",CommandOptionToMnemonic(MagickIntentOptions, image->rendering_intent)); (void) WriteBlobString(image,buffer); } if (image->gamma != 0.0) { (void) FormatLocaleString(buffer,MagickPathExtent,"gamma=%g\n", image->gamma); (void) WriteBlobString(image,buffer); } if (image->chromaticity.white_point.x != 0.0) { /* Note chomaticity points. */ (void) FormatLocaleString(buffer,MagickPathExtent,"red-primary=" "%g,%g green-primary=%g,%g blue-primary=%g,%g\n", image->chromaticity.red_primary.x,image->chromaticity.red_primary.y, image->chromaticity.green_primary.x, image->chromaticity.green_primary.y, image->chromaticity.blue_primary.x, image->chromaticity.blue_primary.y); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MagickPathExtent, "white-point=%g,%g\n",image->chromaticity.white_point.x, image->chromaticity.white_point.y); (void) WriteBlobString(image,buffer); } if (image->orientation != UndefinedOrientation) { (void) FormatLocaleString(buffer,MagickPathExtent, "orientation=%s\n",CommandOptionToMnemonic(MagickOrientationOptions, image->orientation)); (void) WriteBlobString(image,buffer); } if (image->profiles != (void *) NULL) { const char *name; const StringInfo *profile; /* Generic profile. */ ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { (void) FormatLocaleString(buffer,MagickPathExtent, "profile:%s=%.20g\n",name,(double) GetStringInfoLength(profile)); (void) WriteBlobString(image,buffer); } name=GetNextImageProfile(image); } } if (image->montage != (char *) NULL) { (void) FormatLocaleString(buffer,MagickPathExtent,"montage=%s\n", image->montage); (void) WriteBlobString(image,buffer); } ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { (void) FormatLocaleString(buffer,MagickPathExtent,"%s=",property); (void) WriteBlobString(image,buffer); value=GetImageProperty(image,property,exception); if (value != (const char *) NULL) { size_t length; length=strlen(value); for (i=0; i < (ssize_t) length; i++) if (isspace((int) ((unsigned char) value[i])) != 0) break; if ((i == (ssize_t) length) && (i != 0)) (void) WriteBlob(image,length,(const unsigned char *) value); else { (void) WriteBlobByte(image,'{'); if (strchr(value,'}') == (char *) NULL) (void) WriteBlob(image,length,(const unsigned char *) value); else for (i=0; i < (ssize_t) length; i++) { if (value[i] == (int) '}') (void) WriteBlobByte(image,'\\'); (void) WriteBlobByte(image,value[i]); } (void) WriteBlobByte(image,'}'); } } (void) WriteBlobByte(image,'\n'); property=GetNextImageProperty(image); } (void) WriteBlobString(image,"\f\n:\032"); if (image->montage != (char *) NULL) { /* Write montage tile directory. */ if (image->directory != (char *) NULL) (void) WriteBlobString(image,image->directory); (void) WriteBlobByte(image,'\0'); } if (image->profiles != 0) { const char *name; const StringInfo *profile; /* Write image profiles. */ ResetImageProfileIterator(image); name=GetNextImageProfile(image); while (name != (const char *) NULL) { profile=GetImageProfile(image,name); (void) WriteBlob(image,GetStringInfoLength(profile), GetStringInfoDatum(profile)); name=GetNextImageProfile(image); } } if (image->storage_class == PseudoClass) { size_t packet_size; unsigned char *colormap, *q; /* Allocate colormap. */ packet_size=(size_t) (3UL*depth/8UL); colormap=(unsigned char *) AcquireQuantumMemory(image->colors, packet_size*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) return(MagickFalse); /* Write colormap to file. */ q=colormap; for (i=0; i < (ssize_t) image->colors; i++) { switch (depth) { default: ThrowWriterException(CorruptImageError,"ImageDepthNotSupported"); case 32: { unsigned int pixel; pixel=ScaleQuantumToLong(image->colormap[i].red); q=PopLongPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToLong(image->colormap[i].green); q=PopLongPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToLong(image->colormap[i].blue); q=PopLongPixel(MSBEndian,pixel,q); break; } case 16: { unsigned short pixel; pixel=ScaleQuantumToShort(image->colormap[i].red); q=PopShortPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToShort(image->colormap[i].green); q=PopShortPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToShort(image->colormap[i].blue); q=PopShortPixel(MSBEndian,pixel,q); break; } case 8: { unsigned char pixel; pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].red); q=PopCharPixel(pixel,q); pixel=(unsigned char) ScaleQuantumToChar( image->colormap[i].green); q=PopCharPixel(pixel,q); pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].blue); q=PopCharPixel(pixel,q); break; } } } (void) WriteBlob(image,packet_size*image->colors,colormap); colormap=(unsigned char *) RelinquishMagickMemory(colormap); } /* Initialize persistent pixel cache. */ status=PersistPixelCache(image,cache_filename,MagickFalse,&offset, exception); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { status=image->progress_monitor(SaveImagesTag,scene, GetImageListLength(image),image->client_data); if (status == MagickFalse) break; } scene++; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_3397_0
crossvul-cpp_data_good_2524_0
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "irq.h" #include "mmu.h" #include "cpuid.h" #include "lapic.h" #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <linux/mod_devicetable.h> #include <linux/trace_events.h> #include <linux/slab.h> #include <linux/tboot.h> #include <linux/hrtimer.h> #include <linux/frame.h> #include "kvm_cache_regs.h" #include "x86.h" #include <asm/cpu.h> #include <asm/io.h> #include <asm/desc.h> #include <asm/vmx.h> #include <asm/virtext.h> #include <asm/mce.h> #include <asm/fpu/internal.h> #include <asm/perf_event.h> #include <asm/debugreg.h> #include <asm/kexec.h> #include <asm/apic.h> #include <asm/irq_remapping.h> #include <asm/mmu_context.h> #include "trace.h" #include "pmu.h" #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); static const struct x86_cpu_id vmx_cpu_id[] = { X86_FEATURE_MATCH(X86_FEATURE_VMX), {} }; MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); static bool __read_mostly enable_vpid = 1; module_param_named(vpid, enable_vpid, bool, 0444); static bool __read_mostly flexpriority_enabled = 1; module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); static bool __read_mostly enable_ept = 1; module_param_named(ept, enable_ept, bool, S_IRUGO); static bool __read_mostly enable_unrestricted_guest = 1; module_param_named(unrestricted_guest, enable_unrestricted_guest, bool, S_IRUGO); static bool __read_mostly enable_ept_ad_bits = 1; module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); static bool __read_mostly emulate_invalid_guest_state = true; module_param(emulate_invalid_guest_state, bool, S_IRUGO); static bool __read_mostly fasteoi = 1; module_param(fasteoi, bool, S_IRUGO); static bool __read_mostly enable_apicv = 1; module_param(enable_apicv, bool, S_IRUGO); static bool __read_mostly enable_shadow_vmcs = 1; module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); /* * If nested=1, nested virtualization is supported, i.e., guests may use * VMX and be a hypervisor for its own guests. If nested=0, guests may not * use VMX instructions. */ static bool __read_mostly nested = 0; module_param(nested, bool, S_IRUGO); static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; module_param_named(pml, enable_pml, bool, S_IRUGO); #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL /* Guest_tsc -> host_tsc conversion requires 64-bit division. */ static int __read_mostly cpu_preemption_timer_multi; static bool __read_mostly enable_preemption_timer = 1; #ifdef CONFIG_X86_64 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); #endif #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE) #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) #define KVM_CR4_GUEST_OWNED_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD) #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 /* * Hyper-V requires all of these, so mark them as supported even though * they are just treated the same as all-context. */ #define VMX_VPID_EXTENT_SUPPORTED_MASK \ (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) /* * These 2 parameters are used to config the controls for Pause-Loop Exiting: * ple_gap: upper bound on the amount of time between two successive * executions of PAUSE in a loop. Also indicate if ple enabled. * According to test, this time is usually smaller than 128 cycles. * ple_window: upper bound on the amount of time a guest is allowed to execute * in a PAUSE loop. Tests indicate that most spinlocks are held for * less than 2^12 cycles * Time is measured based on a counter that runs at the same rate as the TSC, * refer SDM volume 3b section 21.6.13 & 22.1.3. */ #define KVM_VMX_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \ INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; module_param(ple_gap, int, S_IRUGO); static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; module_param(ple_window, int, S_IRUGO); /* Default doubles per-vcpu window every exit. */ static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW; module_param(ple_window_grow, int, S_IRUGO); /* Default resets per-vcpu window every exit to ple_window. */ static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK; module_param(ple_window_shrink, int, S_IRUGO); /* Default is to compute the maximum so we can never overflow. */ static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, int, S_IRUGO); extern const ulong vmx_return; #define NR_AUTOLOAD_MSRS 8 #define VMCS02_POOL_SIZE 1 struct vmcs { u32 revision_id; u32 abort; char data[0]; }; /* * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs * loaded on this CPU (so we can clear them if the CPU goes down). */ struct loaded_vmcs { struct vmcs *vmcs; struct vmcs *shadow_vmcs; int cpu; bool launched; bool nmi_known_unmasked; struct list_head loaded_vmcss_on_cpu_link; }; struct shared_msr_entry { unsigned index; u64 data; u64 mask; }; /* * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a * single nested guest (L2), hence the name vmcs12. Any VMX implementation has * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is * stored in guest memory specified by VMPTRLD, but is opaque to the guest, * which must access it using VMREAD/VMWRITE/VMCLEAR instructions. * More than one of these structures may exist, if L1 runs multiple L2 guests. * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the * underlying hardware which will be used to run L2. * This structure is packed to ensure that its layout is identical across * machines (necessary for live migration). * If there are changes in this struct, VMCS12_REVISION must be changed. */ typedef u64 natural_width; struct __packed vmcs12 { /* According to the Intel spec, a VMCS region must start with the * following two fields. Then follow implementation-specific data. */ u32 revision_id; u32 abort; u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ u32 padding[7]; /* room for future expansion */ u64 io_bitmap_a; u64 io_bitmap_b; u64 msr_bitmap; u64 vm_exit_msr_store_addr; u64 vm_exit_msr_load_addr; u64 vm_entry_msr_load_addr; u64 tsc_offset; u64 virtual_apic_page_addr; u64 apic_access_addr; u64 posted_intr_desc_addr; u64 vm_function_control; u64 ept_pointer; u64 eoi_exit_bitmap0; u64 eoi_exit_bitmap1; u64 eoi_exit_bitmap2; u64 eoi_exit_bitmap3; u64 eptp_list_address; u64 xss_exit_bitmap; u64 guest_physical_address; u64 vmcs_link_pointer; u64 pml_address; u64 guest_ia32_debugctl; u64 guest_ia32_pat; u64 guest_ia32_efer; u64 guest_ia32_perf_global_ctrl; u64 guest_pdptr0; u64 guest_pdptr1; u64 guest_pdptr2; u64 guest_pdptr3; u64 guest_bndcfgs; u64 host_ia32_pat; u64 host_ia32_efer; u64 host_ia32_perf_global_ctrl; u64 padding64[8]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have * unsigned long fields with no explict size. We use u64 (aliased * natural_width) instead. Luckily, x86 is little-endian. */ natural_width cr0_guest_host_mask; natural_width cr4_guest_host_mask; natural_width cr0_read_shadow; natural_width cr4_read_shadow; natural_width cr3_target_value0; natural_width cr3_target_value1; natural_width cr3_target_value2; natural_width cr3_target_value3; natural_width exit_qualification; natural_width guest_linear_address; natural_width guest_cr0; natural_width guest_cr3; natural_width guest_cr4; natural_width guest_es_base; natural_width guest_cs_base; natural_width guest_ss_base; natural_width guest_ds_base; natural_width guest_fs_base; natural_width guest_gs_base; natural_width guest_ldtr_base; natural_width guest_tr_base; natural_width guest_gdtr_base; natural_width guest_idtr_base; natural_width guest_dr7; natural_width guest_rsp; natural_width guest_rip; natural_width guest_rflags; natural_width guest_pending_dbg_exceptions; natural_width guest_sysenter_esp; natural_width guest_sysenter_eip; natural_width host_cr0; natural_width host_cr3; natural_width host_cr4; natural_width host_fs_base; natural_width host_gs_base; natural_width host_tr_base; natural_width host_gdtr_base; natural_width host_idtr_base; natural_width host_ia32_sysenter_esp; natural_width host_ia32_sysenter_eip; natural_width host_rsp; natural_width host_rip; natural_width paddingl[8]; /* room for future expansion */ u32 pin_based_vm_exec_control; u32 cpu_based_vm_exec_control; u32 exception_bitmap; u32 page_fault_error_code_mask; u32 page_fault_error_code_match; u32 cr3_target_count; u32 vm_exit_controls; u32 vm_exit_msr_store_count; u32 vm_exit_msr_load_count; u32 vm_entry_controls; u32 vm_entry_msr_load_count; u32 vm_entry_intr_info_field; u32 vm_entry_exception_error_code; u32 vm_entry_instruction_len; u32 tpr_threshold; u32 secondary_vm_exec_control; u32 vm_instruction_error; u32 vm_exit_reason; u32 vm_exit_intr_info; u32 vm_exit_intr_error_code; u32 idt_vectoring_info_field; u32 idt_vectoring_error_code; u32 vm_exit_instruction_len; u32 vmx_instruction_info; u32 guest_es_limit; u32 guest_cs_limit; u32 guest_ss_limit; u32 guest_ds_limit; u32 guest_fs_limit; u32 guest_gs_limit; u32 guest_ldtr_limit; u32 guest_tr_limit; u32 guest_gdtr_limit; u32 guest_idtr_limit; u32 guest_es_ar_bytes; u32 guest_cs_ar_bytes; u32 guest_ss_ar_bytes; u32 guest_ds_ar_bytes; u32 guest_fs_ar_bytes; u32 guest_gs_ar_bytes; u32 guest_ldtr_ar_bytes; u32 guest_tr_ar_bytes; u32 guest_interruptibility_info; u32 guest_activity_state; u32 guest_sysenter_cs; u32 host_ia32_sysenter_cs; u32 vmx_preemption_timer_value; u32 padding32[7]; /* room for future expansion */ u16 virtual_processor_id; u16 posted_intr_nv; u16 guest_es_selector; u16 guest_cs_selector; u16 guest_ss_selector; u16 guest_ds_selector; u16 guest_fs_selector; u16 guest_gs_selector; u16 guest_ldtr_selector; u16 guest_tr_selector; u16 guest_intr_status; u16 guest_pml_index; u16 host_es_selector; u16 host_cs_selector; u16 host_ss_selector; u16 host_ds_selector; u16 host_fs_selector; u16 host_gs_selector; u16 host_tr_selector; }; /* * VMCS12_REVISION is an arbitrary id that should be changed if the content or * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and * VMPTRLD verifies that the VMCS region that L1 is loading contains this id. */ #define VMCS12_REVISION 0x11e57ed0 /* * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region * and any VMCS region. Although only sizeof(struct vmcs12) are used by the * current implementation, 4K are reserved to avoid future complications. */ #define VMCS12_SIZE 0x1000 /* Used to remember the last vmcs02 used for some recently used vmcs12s */ struct vmcs02_list { struct list_head list; gpa_t vmptr; struct loaded_vmcs vmcs02; }; /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. */ struct nested_vmx { /* Has the level1 guest done vmxon? */ bool vmxon; gpa_t vmxon_ptr; bool pml_full; /* The guest-physical address of the current VMCS L1 keeps for L2 */ gpa_t current_vmptr; /* * Cache of the guest's VMCS, existing outside of guest memory. * Loaded from guest memory during VMPTRLD. Flushed to guest * memory during VMCLEAR and VMPTRLD. */ struct vmcs12 *cached_vmcs12; /* * Indicates if the shadow vmcs must be updated with the * data hold by vmcs12 */ bool sync_shadow_vmcs; /* vmcs02_list cache of VMCSs recently used to run L2 guests */ struct list_head vmcs02_pool; int vmcs02_num; bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; /* * Guest pages referred to in vmcs02 with host-physical pointers, so * we must keep them pinned while L2 runs. */ struct page *apic_access_page; struct page *virtual_apic_page; struct page *pi_desc_page; struct pi_desc *pi_desc; bool pi_pending; u16 posted_intr_nv; unsigned long *msr_bitmap; struct hrtimer preemption_timer; bool preemption_timer_expired; /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ u64 vmcs01_debugctl; u16 vpid02; u16 last_vpid; /* * We only store the "true" versions of the VMX capability MSRs. We * generate the "non-true" versions by setting the must-be-1 bits * according to the SDM. */ u32 nested_vmx_procbased_ctls_low; u32 nested_vmx_procbased_ctls_high; u32 nested_vmx_secondary_ctls_low; u32 nested_vmx_secondary_ctls_high; u32 nested_vmx_pinbased_ctls_low; u32 nested_vmx_pinbased_ctls_high; u32 nested_vmx_exit_ctls_low; u32 nested_vmx_exit_ctls_high; u32 nested_vmx_entry_ctls_low; u32 nested_vmx_entry_ctls_high; u32 nested_vmx_misc_low; u32 nested_vmx_misc_high; u32 nested_vmx_ept_caps; u32 nested_vmx_vpid_caps; u64 nested_vmx_basic; u64 nested_vmx_cr0_fixed0; u64 nested_vmx_cr0_fixed1; u64 nested_vmx_cr4_fixed0; u64 nested_vmx_cr4_fixed1; u64 nested_vmx_vmcs_enum; u64 nested_vmx_vmfunc_controls; }; #define POSTED_INTR_ON 0 #define POSTED_INTR_SN 1 /* Posted-Interrupt Descriptor */ struct pi_desc { u32 pir[8]; /* Posted interrupt requested */ union { struct { /* bit 256 - Outstanding Notification */ u16 on : 1, /* bit 257 - Suppress Notification */ sn : 1, /* bit 271:258 - Reserved */ rsvd_1 : 14; /* bit 279:272 - Notification Vector */ u8 nv; /* bit 287:280 - Reserved */ u8 rsvd_2; /* bit 319:288 - Notification Destination */ u32 ndst; }; u64 control; }; u32 rsvd[6]; } __aligned(64); static bool pi_test_and_set_on(struct pi_desc *pi_desc) { return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static bool pi_test_and_clear_on(struct pi_desc *pi_desc) { return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) { return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } static inline void pi_clear_sn(struct pi_desc *pi_desc) { return clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } static inline void pi_set_sn(struct pi_desc *pi_desc) { return set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } static inline void pi_clear_on(struct pi_desc *pi_desc) { clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static inline int pi_test_on(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control); } static inline int pi_test_sn(struct pi_desc *pi_desc) { return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; u8 fail; u32 exit_intr_info; u32 idt_vectoring_info; ulong rflags; struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; unsigned long host_idt_base; #ifdef CONFIG_X86_64 u64 msr_host_kernel_gs_base; u64 msr_guest_kernel_gs_base; #endif u32 vm_entry_controls_shadow; u32 vm_exit_controls_shadow; u32 secondary_exec_control; /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested * guest (L2), it points to a different VMCS. */ struct loaded_vmcs vmcs01; struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { unsigned nr; struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; } msr_autoload; struct { int loaded; u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; #endif int gs_ldt_reload_needed; int fs_reload_needed; u64 msr_host_bndcfgs; unsigned long vmcs_host_cr3; /* May not match real cr3 */ unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; ulong save_rflags; struct kvm_segment segs[8]; } rmode; struct { u32 bitmask; /* 4 bits per segment (1 bit per field) */ struct kvm_save_segment { u16 selector; unsigned long base; u32 limit; u32 ar; } seg[8]; } segment_cache; int vpid; bool emulation_required; u32 exit_reason; /* Posted interrupt descriptor */ struct pi_desc pi_desc; /* Support for a guest hypervisor (nested VMX) */ struct nested_vmx nested; /* Dynamic PLE window. */ int ple_window; bool ple_window_dirty; /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; /* apic deadline value in host tsc */ u64 hv_deadline_tsc; u64 current_tsc_ratio; u32 host_pkru; /* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included * in msr_ia32_feature_control_valid_bits. */ u64 msr_ia32_feature_control; u64 msr_ia32_feature_control_valid_bits; }; enum segment_cache_field { SEG_FIELD_SEL = 0, SEG_FIELD_BASE = 1, SEG_FIELD_LIMIT = 2, SEG_FIELD_AR = 3, SEG_FIELD_NR = 4 }; static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) { return container_of(vcpu, struct vcpu_vmx, vcpu); } static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) { return &(to_vmx(vcpu)->pi_desc); } #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) #define FIELD(number, name) [number] = VMCS12_OFFSET(name) #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ [number##_HIGH] = VMCS12_OFFSET(name)+4 static unsigned long shadow_read_only_fields[] = { /* * We do NOT shadow fields that are modified when L0 * traps and emulates any vmx instruction (e.g. VMPTRLD, * VMXON...) executed by L1. * For example, VM_INSTRUCTION_ERROR is read * by L1 if a vmx instruction fails (part of the error path). * Note the code assumes this logic. If for some reason * we start shadowing these fields then we need to * force a shadow sync when L0 emulates vmx instructions * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified * by nested_vmx_failValid) */ VM_EXIT_REASON, VM_EXIT_INTR_INFO, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_INFO_FIELD, IDT_VECTORING_ERROR_CODE, VM_EXIT_INTR_ERROR_CODE, EXIT_QUALIFICATION, GUEST_LINEAR_ADDRESS, GUEST_PHYSICAL_ADDRESS }; static int max_shadow_read_only_fields = ARRAY_SIZE(shadow_read_only_fields); static unsigned long shadow_read_write_fields[] = { TPR_THRESHOLD, GUEST_RIP, GUEST_RSP, GUEST_CR0, GUEST_CR3, GUEST_CR4, GUEST_INTERRUPTIBILITY_INFO, GUEST_RFLAGS, GUEST_CS_SELECTOR, GUEST_CS_AR_BYTES, GUEST_CS_LIMIT, GUEST_CS_BASE, GUEST_ES_BASE, GUEST_BNDCFGS, CR0_GUEST_HOST_MASK, CR0_READ_SHADOW, CR4_READ_SHADOW, TSC_OFFSET, EXCEPTION_BITMAP, CPU_BASED_VM_EXEC_CONTROL, VM_ENTRY_EXCEPTION_ERROR_CODE, VM_ENTRY_INTR_INFO_FIELD, VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE, HOST_FS_BASE, HOST_GS_BASE, HOST_FS_SELECTOR, HOST_GS_SELECTOR }; static int max_shadow_read_write_fields = ARRAY_SIZE(shadow_read_write_fields); static const unsigned short vmcs_field_to_offset_table[] = { FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), FIELD(POSTED_INTR_NV, posted_intr_nv), FIELD(GUEST_ES_SELECTOR, guest_es_selector), FIELD(GUEST_CS_SELECTOR, guest_cs_selector), FIELD(GUEST_SS_SELECTOR, guest_ss_selector), FIELD(GUEST_DS_SELECTOR, guest_ds_selector), FIELD(GUEST_FS_SELECTOR, guest_fs_selector), FIELD(GUEST_GS_SELECTOR, guest_gs_selector), FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector), FIELD(GUEST_TR_SELECTOR, guest_tr_selector), FIELD(GUEST_INTR_STATUS, guest_intr_status), FIELD(GUEST_PML_INDEX, guest_pml_index), FIELD(HOST_ES_SELECTOR, host_es_selector), FIELD(HOST_CS_SELECTOR, host_cs_selector), FIELD(HOST_SS_SELECTOR, host_ss_selector), FIELD(HOST_DS_SELECTOR, host_ds_selector), FIELD(HOST_FS_SELECTOR, host_fs_selector), FIELD(HOST_GS_SELECTOR, host_gs_selector), FIELD(HOST_TR_SELECTOR, host_tr_selector), FIELD64(IO_BITMAP_A, io_bitmap_a), FIELD64(IO_BITMAP_B, io_bitmap_b), FIELD64(MSR_BITMAP, msr_bitmap), FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr), FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr), FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr), FIELD64(TSC_OFFSET, tsc_offset), FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr), FIELD64(APIC_ACCESS_ADDR, apic_access_addr), FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr), FIELD64(VM_FUNCTION_CONTROL, vm_function_control), FIELD64(EPT_POINTER, ept_pointer), FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0), FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1), FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2), FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3), FIELD64(EPTP_LIST_ADDRESS, eptp_list_address), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), FIELD64(PML_ADDRESS, pml_address), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), FIELD64(GUEST_IA32_PAT, guest_ia32_pat), FIELD64(GUEST_IA32_EFER, guest_ia32_efer), FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl), FIELD64(GUEST_PDPTR0, guest_pdptr0), FIELD64(GUEST_PDPTR1, guest_pdptr1), FIELD64(GUEST_PDPTR2, guest_pdptr2), FIELD64(GUEST_PDPTR3, guest_pdptr3), FIELD64(GUEST_BNDCFGS, guest_bndcfgs), FIELD64(HOST_IA32_PAT, host_ia32_pat), FIELD64(HOST_IA32_EFER, host_ia32_efer), FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl), FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control), FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control), FIELD(EXCEPTION_BITMAP, exception_bitmap), FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask), FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match), FIELD(CR3_TARGET_COUNT, cr3_target_count), FIELD(VM_EXIT_CONTROLS, vm_exit_controls), FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count), FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count), FIELD(VM_ENTRY_CONTROLS, vm_entry_controls), FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count), FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field), FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code), FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len), FIELD(TPR_THRESHOLD, tpr_threshold), FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control), FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error), FIELD(VM_EXIT_REASON, vm_exit_reason), FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info), FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code), FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field), FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code), FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len), FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info), FIELD(GUEST_ES_LIMIT, guest_es_limit), FIELD(GUEST_CS_LIMIT, guest_cs_limit), FIELD(GUEST_SS_LIMIT, guest_ss_limit), FIELD(GUEST_DS_LIMIT, guest_ds_limit), FIELD(GUEST_FS_LIMIT, guest_fs_limit), FIELD(GUEST_GS_LIMIT, guest_gs_limit), FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit), FIELD(GUEST_TR_LIMIT, guest_tr_limit), FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit), FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit), FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes), FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes), FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes), FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes), FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes), FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes), FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes), FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes), FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info), FIELD(GUEST_ACTIVITY_STATE, guest_activity_state), FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs), FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs), FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value), FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask), FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask), FIELD(CR0_READ_SHADOW, cr0_read_shadow), FIELD(CR4_READ_SHADOW, cr4_read_shadow), FIELD(CR3_TARGET_VALUE0, cr3_target_value0), FIELD(CR3_TARGET_VALUE1, cr3_target_value1), FIELD(CR3_TARGET_VALUE2, cr3_target_value2), FIELD(CR3_TARGET_VALUE3, cr3_target_value3), FIELD(EXIT_QUALIFICATION, exit_qualification), FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address), FIELD(GUEST_CR0, guest_cr0), FIELD(GUEST_CR3, guest_cr3), FIELD(GUEST_CR4, guest_cr4), FIELD(GUEST_ES_BASE, guest_es_base), FIELD(GUEST_CS_BASE, guest_cs_base), FIELD(GUEST_SS_BASE, guest_ss_base), FIELD(GUEST_DS_BASE, guest_ds_base), FIELD(GUEST_FS_BASE, guest_fs_base), FIELD(GUEST_GS_BASE, guest_gs_base), FIELD(GUEST_LDTR_BASE, guest_ldtr_base), FIELD(GUEST_TR_BASE, guest_tr_base), FIELD(GUEST_GDTR_BASE, guest_gdtr_base), FIELD(GUEST_IDTR_BASE, guest_idtr_base), FIELD(GUEST_DR7, guest_dr7), FIELD(GUEST_RSP, guest_rsp), FIELD(GUEST_RIP, guest_rip), FIELD(GUEST_RFLAGS, guest_rflags), FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions), FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp), FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip), FIELD(HOST_CR0, host_cr0), FIELD(HOST_CR3, host_cr3), FIELD(HOST_CR4, host_cr4), FIELD(HOST_FS_BASE, host_fs_base), FIELD(HOST_GS_BASE, host_gs_base), FIELD(HOST_TR_BASE, host_tr_base), FIELD(HOST_GDTR_BASE, host_gdtr_base), FIELD(HOST_IDTR_BASE, host_idtr_base), FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp), FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip), FIELD(HOST_RSP, host_rsp), FIELD(HOST_RIP, host_rip), }; static inline short vmcs_field_to_offset(unsigned long field) { BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || vmcs_field_to_offset_table[field] == 0) return -ENOENT; return vmcs_field_to_offset_table[field]; } static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) { return to_vmx(vcpu)->nested.cached_vmcs12; } static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu); static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); static bool vmx_xsaves_supported(void); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); static bool guest_state_valid(struct kvm_vcpu *vcpu); static u32 vmx_segment_access_rights(struct kvm_segment *var); static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); static int alloc_identity_pagetable(struct kvm *kvm); static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); /* * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it. */ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu); /* * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we * can find which vCPU should be waken up. */ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); enum { VMX_IO_BITMAP_A, VMX_IO_BITMAP_B, VMX_MSR_BITMAP_LEGACY, VMX_MSR_BITMAP_LONGMODE, VMX_MSR_BITMAP_LEGACY_X2APIC_APICV, VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV, VMX_MSR_BITMAP_LEGACY_X2APIC, VMX_MSR_BITMAP_LONGMODE_X2APIC, VMX_VMREAD_BITMAP, VMX_VMWRITE_BITMAP, VMX_BITMAP_NR }; static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; #define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A]) #define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B]) #define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY]) #define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE]) #define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV]) #define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV]) #define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC]) #define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC]) #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) static bool cpu_has_load_ia32_efer; static bool cpu_has_load_perf_global_ctrl; static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); static DEFINE_SPINLOCK(vmx_vpid_lock); static struct vmcs_config { int size; int order; u32 basic_cap; u32 revision_id; u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; } vmcs_config; static struct vmx_capability { u32 ept; u32 vpid; } vmx_capability; #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ .base = GUEST_##seg##_BASE, \ .limit = GUEST_##seg##_LIMIT, \ .ar_bytes = GUEST_##seg##_AR_BYTES, \ } static const struct kvm_vmx_segment_field { unsigned selector; unsigned base; unsigned limit; unsigned ar_bytes; } kvm_vmx_segment_fields[] = { VMX_SEGMENT_FIELD(CS), VMX_SEGMENT_FIELD(DS), VMX_SEGMENT_FIELD(ES), VMX_SEGMENT_FIELD(FS), VMX_SEGMENT_FIELD(GS), VMX_SEGMENT_FIELD(SS), VMX_SEGMENT_FIELD(TR), VMX_SEGMENT_FIELD(LDTR), }; static u64 host_efer; static void ept_save_pdptrs(struct kvm_vcpu *vcpu); /* * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it * away by decrementing the array size. */ static const u32 vmx_msr_index[] = { #ifdef CONFIG_X86_64 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, #endif MSR_EFER, MSR_TSC_AUX, MSR_STAR, }; static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); } static inline bool is_debug(u32 intr_info) { return is_exception_n(intr_info, DB_VECTOR); } static inline bool is_breakpoint(u32 intr_info) { return is_exception_n(intr_info, BP_VECTOR); } static inline bool is_page_fault(u32 intr_info) { return is_exception_n(intr_info, PF_VECTOR); } static inline bool is_no_device(u32 intr_info) { return is_exception_n(intr_info, NM_VECTOR); } static inline bool is_invalid_opcode(u32 intr_info) { return is_exception_n(intr_info, UD_VECTOR); } static inline bool is_external_interrupt(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); } static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; } static inline bool cpu_has_vmx_tpr_shadow(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; } static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu) { return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu); } static inline bool cpu_has_secondary_exec_ctrls(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; } static inline bool cpu_has_vmx_virtualize_apic_accesses(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; } static inline bool cpu_has_vmx_virtualize_x2apic_mode(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } static inline bool cpu_has_vmx_apic_register_virt(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_APIC_REGISTER_VIRT; } static inline bool cpu_has_vmx_virtual_intr_delivery(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; } /* * Comment's format: document - errata name - stepping - processor name. * Refer from * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp */ static u32 vmx_preemption_cpu_tfms[] = { /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */ 0x000206E6, /* 323056.pdf - AAX65 - C2 - Xeon L3406 */ /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */ /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */ 0x00020652, /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */ 0x00020655, /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */ /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */ /* * 320767.pdf - AAP86 - B1 - * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile */ 0x000106E5, /* 321333.pdf - AAM126 - C0 - Xeon 3500 */ 0x000106A0, /* 321333.pdf - AAM126 - C1 - Xeon 3500 */ 0x000106A1, /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */ 0x000106A4, /* 321333.pdf - AAM126 - D0 - Xeon 3500 */ /* 321324.pdf - AAK139 - D0 - Xeon 5500 */ /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */ 0x000106A5, }; static inline bool cpu_has_broken_vmx_preemption_timer(void) { u32 eax = cpuid_eax(0x00000001), i; /* Clear the reserved bits */ eax &= ~(0x3U << 14 | 0xfU << 28); for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++) if (eax == vmx_preemption_cpu_tfms[i]) return true; return false; } static inline bool cpu_has_vmx_preemption_timer(void) { return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VMX_PREEMPTION_TIMER; } static inline bool cpu_has_vmx_posted_intr(void) { return IS_ENABLED(CONFIG_X86_LOCAL_APIC) && vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; } static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && cpu_has_vmx_virtual_intr_delivery() && cpu_has_vmx_posted_intr(); } static inline bool cpu_has_vmx_flexpriority(void) { return cpu_has_vmx_tpr_shadow() && cpu_has_vmx_virtualize_apic_accesses(); } static inline bool cpu_has_vmx_ept_execute_only(void) { return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; } static inline bool cpu_has_vmx_ept_2m_page(void) { return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_1g_page(void) { return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; } static inline bool cpu_has_vmx_ept_4levels(void) { return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; } static inline bool cpu_has_vmx_ept_mt_wb(void) { return vmx_capability.ept & VMX_EPTP_WB_BIT; } static inline bool cpu_has_vmx_ept_5levels(void) { return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT; } static inline bool cpu_has_vmx_ept_ad_bits(void) { return vmx_capability.ept & VMX_EPT_AD_BIT; } static inline bool cpu_has_vmx_invept_context(void) { return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; } static inline bool cpu_has_vmx_invept_global(void) { return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; } static inline bool cpu_has_vmx_invvpid_single(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; } static inline bool cpu_has_vmx_invvpid_global(void) { return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; } static inline bool cpu_has_vmx_invvpid(void) { return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; } static inline bool cpu_has_vmx_ept(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_EPT; } static inline bool cpu_has_vmx_unrestricted_guest(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_UNRESTRICTED_GUEST; } static inline bool cpu_has_vmx_ple(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PAUSE_LOOP_EXITING; } static inline bool cpu_has_vmx_basic_inout(void) { return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT); } static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu) { return flexpriority_enabled && lapic_in_kernel(vcpu); } static inline bool cpu_has_vmx_vpid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_VPID; } static inline bool cpu_has_vmx_rdtscp(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDTSCP; } static inline bool cpu_has_vmx_invpcid(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_INVPCID; } static inline bool cpu_has_vmx_wbinvd_exit(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_WBINVD_EXITING; } static inline bool cpu_has_vmx_shadow_vmcs(void) { u64 vmx_msr; rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); /* check if the cpu supports writing r/o exit information fields */ if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS)) return false; return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_SHADOW_VMCS; } static inline bool cpu_has_vmx_pml(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML; } static inline bool cpu_has_vmx_tsc_scaling(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_TSC_SCALING; } static inline bool cpu_has_vmx_vmfunc(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_VMFUNC; } static inline bool report_flexpriority(void) { return flexpriority_enabled; } static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) { return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low); } static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) { return vmcs12->cpu_based_vm_exec_control & bit; } static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) { return (vmcs12->cpu_based_vm_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && (vmcs12->secondary_vm_exec_control & bit); } static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; } static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER; } static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); } static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); } static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML); } static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); } static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); } static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); } static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); } static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) { return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; } static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) { return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); } static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) { return nested_cpu_has_vmfunc(vmcs12) && (vmcs12->vm_function_control & VMX_VMFUNC_EPTP_SWITCHING); } static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); } static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { int i; for (i = 0; i < vmx->nmsrs; ++i) if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) return i; return -1; } static inline void __invvpid(int ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; u64 rsvd : 48; u64 gva; } operand = { vpid, 0, gva }; asm volatile (__ex(ASM_VMX_INVVPID) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:" : : "a"(&operand), "c"(ext) : "cc", "memory"); } static inline void __invept(int ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; asm volatile (__ex(ASM_VMX_INVEPT) /* CF==1 or ZF==1 --> rc = -1 */ "; ja 1f ; ud2 ; 1:\n" : : "a" (&operand), "c" (ext) : "cc", "memory"); } static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; i = __find_msr_index(vmx, msr); if (i >= 0) return &vmx->guest_msrs[i]; return NULL; } static void vmcs_clear(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); } static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) { vmcs_clear(loaded_vmcs->vmcs); if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) vmcs_clear(loaded_vmcs->shadow_vmcs); loaded_vmcs->cpu = -1; loaded_vmcs->launched = 0; } static void vmcs_load(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); u8 error; asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr) : "cc", "memory"); if (error) printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); } #ifdef CONFIG_KEXEC_CORE /* * This bitmap is used to indicate whether the vmclear * operation is enabled on all cpus. All disabled by * default. */ static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE; static inline void crash_enable_local_vmclear(int cpu) { cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline void crash_disable_local_vmclear(int cpu) { cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap); } static inline int crash_local_vmclear_enabled(int cpu) { return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap); } static void crash_vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v; if (!crash_local_vmclear_enabled(cpu)) return; list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) vmcs_clear(v->vmcs); } #else static inline void crash_enable_local_vmclear(int cpu) { } static inline void crash_disable_local_vmclear(int cpu) { } #endif /* CONFIG_KEXEC_CORE */ static void __loaded_vmcs_clear(void *arg) { struct loaded_vmcs *loaded_vmcs = arg; int cpu = raw_smp_processor_id(); if (loaded_vmcs->cpu != cpu) return; /* vcpu migration can race with cpu offline */ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) per_cpu(current_vmcs, cpu) = NULL; crash_disable_local_vmclear(cpu); list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); /* * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link * is before setting loaded_vmcs->vcpu to -1 which is done in * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist * then adds the vmcs into percpu list before it is deleted. */ smp_wmb(); loaded_vmcs_init(loaded_vmcs); crash_enable_local_vmclear(cpu); } static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs) { int cpu = loaded_vmcs->cpu; if (cpu != -1) smp_call_function_single(cpu, __loaded_vmcs_clear, loaded_vmcs, 1); } static inline void vpid_sync_vcpu_single(int vpid) { if (vpid == 0) return; if (cpu_has_vmx_invvpid_single()) __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0); } static inline void vpid_sync_vcpu_global(void) { if (cpu_has_vmx_invvpid_global()) __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); } static inline void vpid_sync_context(int vpid) { if (cpu_has_vmx_invvpid_single()) vpid_sync_vcpu_single(vpid); else vpid_sync_vcpu_global(); } static inline void ept_sync_global(void) { if (cpu_has_vmx_invept_global()) __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); } static inline void ept_sync_context(u64 eptp) { if (enable_ept) { if (cpu_has_vmx_invept_context()) __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); else ept_sync_global(); } } static __always_inline void vmcs_check16(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, "16-bit accessor invalid for 64-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "16-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "16-bit accessor invalid for 32-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "16-bit accessor invalid for natural width field"); } static __always_inline void vmcs_check32(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "32-bit accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "32-bit accessor invalid for natural width field"); } static __always_inline void vmcs_check64(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "64-bit accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "64-bit accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "64-bit accessor invalid for 32-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000, "64-bit accessor invalid for natural width field"); } static __always_inline void vmcs_checkl(unsigned long field) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0, "Natural width accessor invalid for 16-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000, "Natural width accessor invalid for 64-bit field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001, "Natural width accessor invalid for 64-bit high field"); BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000, "Natural width accessor invalid for 32-bit field"); } static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") : "=a"(value) : "d"(field) : "cc"); return value; } static __always_inline u16 vmcs_read16(unsigned long field) { vmcs_check16(field); return __vmcs_readl(field); } static __always_inline u32 vmcs_read32(unsigned long field) { vmcs_check32(field); return __vmcs_readl(field); } static __always_inline u64 vmcs_read64(unsigned long field) { vmcs_check64(field); #ifdef CONFIG_X86_64 return __vmcs_readl(field); #else return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32); #endif } static __always_inline unsigned long vmcs_readl(unsigned long field) { vmcs_checkl(field); return __vmcs_readl(field); } static noinline void vmwrite_error(unsigned long field, unsigned long value) { printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); dump_stack(); } static __always_inline void __vmcs_writel(unsigned long field, unsigned long value) { u8 error; asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" : "=q"(error) : "a"(value), "d"(field) : "cc"); if (unlikely(error)) vmwrite_error(field, value); } static __always_inline void vmcs_write16(unsigned long field, u16 value) { vmcs_check16(field); __vmcs_writel(field, value); } static __always_inline void vmcs_write32(unsigned long field, u32 value) { vmcs_check32(field); __vmcs_writel(field, value); } static __always_inline void vmcs_write64(unsigned long field, u64 value) { vmcs_check64(field); __vmcs_writel(field, value); #ifndef CONFIG_X86_64 asm volatile (""); __vmcs_writel(field+1, value >> 32); #endif } static __always_inline void vmcs_writel(unsigned long field, unsigned long value) { vmcs_checkl(field); __vmcs_writel(field, value); } static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_clear_bits does not support 64-bit fields"); __vmcs_writel(field, __vmcs_readl(field) & ~mask); } static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_set_bits does not support 64-bit fields"); __vmcs_writel(field, __vmcs_readl(field) | mask); } static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx) { vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS); } static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_ENTRY_CONTROLS, val); vmx->vm_entry_controls_shadow = val; } static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_entry_controls_shadow != val) vm_entry_controls_init(vmx, val); } static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_entry_controls_shadow; } static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); } static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); } static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx) { vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS); } static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) { vmcs_write32(VM_EXIT_CONTROLS, val); vmx->vm_exit_controls_shadow = val; } static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) { if (vmx->vm_exit_controls_shadow != val) vm_exit_controls_init(vmx, val); } static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) { return vmx->vm_exit_controls_shadow; } static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); } static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) { vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); } static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) { vmx->segment_cache.bitmask = 0; } static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, unsigned field) { bool ret; u32 mask = 1 << (seg * SEG_FIELD_NR + field); if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); vmx->segment_cache.bitmask = 0; } ret = vmx->segment_cache.bitmask & mask; vmx->segment_cache.bitmask |= mask; return ret; } static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) { u16 *p = &vmx->segment_cache.seg[seg].selector; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); return *p; } static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) { ulong *p = &vmx->segment_cache.seg[seg].base; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); return *p; } static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].limit; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); return *p; } static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) { u32 *p = &vmx->segment_cache.seg[seg].ar; if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); return *p; } static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR); if ((vcpu->guest_debug & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) eb |= 1u << BP_VECTOR; if (to_vmx(vcpu)->rmode.vm86_active) eb = ~0; if (enable_ept) eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ /* When we are running a nested L2 guest and L1 specified for it a * certain exception bitmap, we must trap the same exceptions and pass * them to L1. When running L2, we will only handle the exceptions * specified above if L1 did not want them. */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; vmcs_write32(EXCEPTION_BITMAP, eb); } static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit) { vm_entry_controls_clearbit(vmx, entry); vm_exit_controls_clearbit(vmx, exit); } static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { clear_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); return; } break; } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == m->nr) return; --m->nr; m->guest[i] = m->guest[m->nr]; m->host[i] = m->host[m->nr]; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, unsigned long entry, unsigned long exit, unsigned long guest_val_vmcs, unsigned long host_val_vmcs, u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); vmcs_write64(host_val_vmcs, host_val); vm_entry_controls_setbit(vmx, entry); vm_exit_controls_setbit(vmx, exit); } static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { unsigned i; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { case MSR_EFER: if (cpu_has_load_ia32_efer) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER, GUEST_IA32_EFER, HOST_IA32_EFER, guest_val, host_val); return; } break; case MSR_CORE_PERF_GLOBAL_CTRL: if (cpu_has_load_perf_global_ctrl) { add_atomic_switch_msr_special(vmx, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, GUEST_IA32_PERF_GLOBAL_CTRL, HOST_IA32_PERF_GLOBAL_CTRL, guest_val, host_val); return; } break; case MSR_IA32_PEBS_ENABLE: /* PEBS needs a quiescent period after being disabled (to write * a record). Disabling PEBS through VMX MSR swapping doesn't * provide that period, so a CPU could write host's record into * guest's memory. */ wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } for (i = 0; i < m->nr; ++i) if (m->guest[i].index == msr) break; if (i == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; } else if (i == m->nr) { ++m->nr; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); } m->guest[i].index = msr; m->guest[i].value = guest_val; m->host[i].index = msr; m->host[i].value = host_val; } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) { u64 guest_efer = vmx->vcpu.arch.efer; u64 ignore_bits = 0; if (!enable_ept) { /* * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing * host CPUID is more efficient than testing guest CPUID * or CR4. Host SMEP is anyway a requirement for guest SMEP. */ if (boot_cpu_has(X86_FEATURE_SMEP)) guest_efer |= EFER_NX; else if (!(guest_efer & EFER_NX)) ignore_bits |= EFER_NX; } /* * LMA and LME handled by hardware; SCE meaningless outside long mode. */ ignore_bits |= EFER_SCE; #ifdef CONFIG_X86_64 ignore_bits |= EFER_LMA | EFER_LME; /* SCE is meaningful only in long mode on Intel */ if (guest_efer & EFER_LMA) ignore_bits &= ~(u64)EFER_SCE; #endif clear_atomic_switch_msr(vmx, MSR_EFER); /* * On EPT, we can't emulate NX, so we must switch EFER atomically. * On CPUs that support "load IA32_EFER", always switch EFER * atomically, since it's faster than switching it manually. */ if (cpu_has_load_ia32_efer || (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { if (!(guest_efer & EFER_LMA)) guest_efer &= ~EFER_LME; if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); return false; } else { guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; vmx->guest_msrs[efer_offset].data = guest_efer; vmx->guest_msrs[efer_offset].mask = ~ignore_bits; return true; } } #ifdef CONFIG_X86_32 /* * On 32-bit kernels, VM exits still load the FS and GS bases from the * VMCS rather than the segment table. KVM uses this helper to figure * out the current bases to poke them into the VMCS before entry. */ static unsigned long segment_base(u16 selector) { struct desc_struct *table; unsigned long v; if (!(selector & ~SEGMENT_RPL_MASK)) return 0; table = get_current_gdt_ro(); if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) { u16 ldt_selector = kvm_read_ldt(); if (!(ldt_selector & ~SEGMENT_RPL_MASK)) return 0; table = (struct desc_struct *)segment_base(ldt_selector); } v = get_desc_base(&table[selector >> 3]); return v; } #endif static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int i; if (vmx->host_state.loaded) return; vmx->host_state.loaded = 1; /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; } else { vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { vmcs_write16(HOST_GS_SELECTOR, 0); vmx->host_state.gs_ldt_reload_needed = 1; } #ifdef CONFIG_X86_64 savesegment(ds, vmx->host_state.ds_sel); savesegment(es, vmx->host_state.es_sel); #endif #ifdef CONFIG_X86_64 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); #else vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); #endif #ifdef CONFIG_X86_64 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (boot_cpu_has(X86_FEATURE_MPX)) rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); for (i = 0; i < vmx->save_nmsrs; ++i) kvm_set_shared_msr(vmx->guest_msrs[i].index, vmx->guest_msrs[i].data, vmx->guest_msrs[i].mask); } static void __vmx_load_host_state(struct vcpu_vmx *vmx) { if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); #endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); #ifdef CONFIG_X86_64 load_gs_index(vmx->host_state.gs_sel); #else loadsegment(gs, vmx->host_state.gs_sel); #endif } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); #ifdef CONFIG_X86_64 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } #endif invalidate_tss_limit(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif if (vmx->host_state.msr_host_bndcfgs) wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); load_fixmap_gdt(raw_smp_processor_id()); } static void vmx_load_host_state(struct vcpu_vmx *vmx) { preempt_disable(); __vmx_load_host_state(vmx); preempt_enable(); } static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); struct pi_desc old, new; unsigned int dest; if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return; do { old.control = new.control = pi_desc->control; /* * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there * are two possible cases: * 1. After running 'pre_block', context switch * happened. For this case, 'sn' was set in * vmx_vcpu_put(), so we need to clear it here. * 2. After running 'pre_block', we were blocked, * and woken up by some other guy. For this case, * we don't need to do anything, 'pi_post_block' * will do everything for us. However, we cannot * check whether it is case #1 or case #2 here * (maybe, not needed), so we also clear sn here, * I think it is not a big deal. */ if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) { if (vcpu->cpu != cpu) { dest = cpu_physical_id(cpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; } /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; } /* Allow posting non-urgent interrupts */ new.sn = 0; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); } static void decache_tsc_multiplier(struct vcpu_vmx *vmx) { vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); } /* * Switches to specified vcpu, until a matching vcpu_put(), but assumes * vcpu mutex is already taken. */ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); bool already_loaded = vmx->loaded_vmcs->cpu == cpu; if (!already_loaded) { loaded_vmcs_clear(vmx->loaded_vmcs); local_irq_disable(); crash_disable_local_vmclear(cpu); /* * Read loaded_vmcs->cpu should be before fetching * loaded_vmcs->loaded_vmcss_on_cpu_link. * See the comments in __loaded_vmcs_clear(). */ smp_rmb(); list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, &per_cpu(loaded_vmcss_on_cpu, cpu)); crash_enable_local_vmclear(cpu); local_irq_enable(); } if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); } if (!already_loaded) { void *gdt = get_current_gdt_ro(); unsigned long sysenter_esp; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); /* * Linux uses per-cpu TSS and GDT, so set these when switching * processors. See 22.2.4. */ vmcs_writel(HOST_TR_BASE, (unsigned long)this_cpu_ptr(&cpu_tss)); vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ /* * VM exits change the host TR limit to 0x67 after a VM * exit. This is okay, since 0x67 covers everything except * the IO bitmap and have have code to handle the IO bitmap * being lost after a VM exit. */ BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67); rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmx->loaded_vmcs->cpu = cpu; } /* Setup TSC multiplier */ if (kvm_has_tsc_control && vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) decache_tsc_multiplier(vmx); vmx_vcpu_pi_load(vcpu, cpu); vmx->host_pkru = read_pkru(); } static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return; /* Set SN when the vCPU is preempted */ if (vcpu->preempted) pi_set_sn(pi_desc); } static void vmx_vcpu_put(struct kvm_vcpu *vcpu) { vmx_vcpu_pi_put(vcpu); __vmx_load_host_state(to_vmx(vcpu)); } static bool emulation_required(struct kvm_vcpu *vcpu) { return emulate_invalid_guest_state && !guest_state_valid(vcpu); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); /* * Return the cr0 value that a nested guest would read. This is a combination * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by * its hypervisor (cr0_read_shadow). */ static inline unsigned long nested_read_cr0(struct vmcs12 *fields) { return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | (fields->cr0_read_shadow & fields->cr0_guest_host_mask); } static inline unsigned long nested_read_cr4(struct vmcs12 *fields) { return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | (fields->cr4_read_shadow & fields->cr4_guest_host_mask); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags, save_rflags; if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); rflags = vmcs_readl(GUEST_RFLAGS); if (to_vmx(vcpu)->rmode.vm86_active) { rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; save_rflags = to_vmx(vcpu)->rmode.save_rflags; rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; } to_vmx(vcpu)->rflags = rflags; } return to_vmx(vcpu)->rflags; } static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { unsigned long old_rflags = vmx_get_rflags(vcpu); __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); to_vmx(vcpu)->rflags = rflags; if (to_vmx(vcpu)->rmode.vm86_active) { to_vmx(vcpu)->rmode.save_rflags = rflags; rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; } vmcs_writel(GUEST_RFLAGS, rflags); if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) to_vmx(vcpu)->emulation_required = emulation_required(vcpu); } static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); int ret = 0; if (interruptibility & GUEST_INTR_STATE_STI) ret |= KVM_X86_SHADOW_INT_STI; if (interruptibility & GUEST_INTR_STATE_MOV_SS) ret |= KVM_X86_SHADOW_INT_MOV_SS; return ret; } static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) { u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); u32 interruptibility = interruptibility_old; interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); if (mask & KVM_X86_SHADOW_INT_MOV_SS) interruptibility |= GUEST_INTR_STATE_MOV_SS; else if (mask & KVM_X86_SHADOW_INT_STI) interruptibility |= GUEST_INTR_STATE_STI; if ((interruptibility != interruptibility_old)) vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); } static void skip_emulated_instruction(struct kvm_vcpu *vcpu) { unsigned long rip; rip = kvm_rip_read(vcpu); rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_rip_write(vcpu, rip); /* skipping an emulated instruction also counts */ vmx_set_interrupt_shadow(vcpu, 0); } static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, unsigned long exit_qual) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; u32 intr_info = nr | INTR_INFO_VALID_MASK; if (vcpu->arch.exception.has_error_code) { vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; intr_info |= INTR_INFO_DELIVER_CODE_MASK; } if (kvm_exception_is_soft(nr)) intr_info |= INTR_TYPE_SOFT_EXCEPTION; else intr_info |= INTR_TYPE_HARD_EXCEPTION; if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && vmx_get_nmi_mask(vcpu)) intr_info |= INTR_INFO_UNBLOCK_NMI; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); } /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. */ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; if (nr == PF_VECTOR) { if (vcpu->arch.exception.nested_apf) { *exit_qual = vcpu->arch.apf.nested_apf_token; return 1; } /* * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception. * The fix is to add the ancillary datum (CR2 or DR6) to structs * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 * can be written only when inject_pending_event runs. This should be * conditional on a new capability---if the capability is disabled, * kvm_multiple_exception would write the ancillary information to * CR2 or DR6, for backwards ABI-compatibility. */ if (nested_vmx_is_page_fault_vmexit(vmcs12, vcpu->arch.exception.error_code)) { *exit_qual = vcpu->arch.cr2; return 1; } } else { if (vmcs12->exception_bitmap & (1u << nr)) { if (nr == DB_VECTOR) *exit_qual = vcpu->arch.dr6; else *exit_qual = 0; return 1; } } return 0; } static void vmx_queue_exception(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned nr = vcpu->arch.exception.nr; bool has_error_code = vcpu->arch.exception.has_error_code; u32 error_code = vcpu->arch.exception.error_code; u32 intr_info = nr | INTR_INFO_VALID_MASK; if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; } if (vmx->rmode.vm86_active) { int inc_eip = 0; if (kvm_exception_is_soft(nr)) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } if (kvm_exception_is_soft(nr)) { vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); intr_info |= INTR_TYPE_SOFT_EXCEPTION; } else intr_info |= INTR_TYPE_HARD_EXCEPTION; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); } static bool vmx_rdtscp_supported(void) { return cpu_has_vmx_rdtscp(); } static bool vmx_invpcid_supported(void) { return cpu_has_vmx_invpcid() && enable_ept; } /* * Swap MSR entry in host/guest MSR entry array. */ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) { struct shared_msr_entry tmp; tmp = vmx->guest_msrs[to]; vmx->guest_msrs[to] = vmx->guest_msrs[from]; vmx->guest_msrs[from] = tmp; } static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu) { unsigned long *msr_bitmap; if (is_guest_mode(vcpu)) msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap; else if (cpu_has_secondary_exec_ctrls() && (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv; else msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv; } else { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode_x2apic; else msr_bitmap = vmx_msr_bitmap_legacy_x2apic; } } else { if (is_long_mode(vcpu)) msr_bitmap = vmx_msr_bitmap_longmode; else msr_bitmap = vmx_msr_bitmap_legacy; } vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); } /* * Set up the vmcs to automatically save and restore system * msrs. Don't touch the 64-bit msrs if the guest is in legacy * mode, as fiddling with msrs is very expensive. */ static void setup_msrs(struct vcpu_vmx *vmx) { int save_nmsrs, index; save_nmsrs = 0; #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) { index = __find_msr_index(vmx, MSR_SYSCALL_MASK); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_LSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_CSTAR); if (index >= 0) move_msr_up(vmx, index, save_nmsrs++); index = __find_msr_index(vmx, MSR_TSC_AUX); if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) move_msr_up(vmx, index, save_nmsrs++); /* * MSR_STAR is only needed on long mode guests, and only * if efer.sce is enabled. */ index = __find_msr_index(vmx, MSR_STAR); if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) move_msr_up(vmx, index, save_nmsrs++); } #endif index = __find_msr_index(vmx, MSR_EFER); if (index >= 0 && update_transition_efer(vmx, index)) move_msr_up(vmx, index, save_nmsrs++); vmx->save_nmsrs = save_nmsrs; if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(&vmx->vcpu); } /* * reads and returns guest's timestamp counter "register" * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 */ static u64 guest_read_tsc(struct kvm_vcpu *vcpu) { u64 host_tsc, tsc_offset; host_tsc = rdtsc(); tsc_offset = vmcs_read64(TSC_OFFSET); return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; } /* * writes 'offset' into guest's timestamp counter offset register */ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { if (is_guest_mode(vcpu)) { /* * We're here if L1 chose not to trap WRMSR to TSC. According * to the spec, this should set L1's TSC; The offset that L1 * set for L2 remains unchanged, and still needs to be added * to the newly set TSC to get L2's TSC. */ struct vmcs12 *vmcs12; /* recalculate vmcs02.TSC_OFFSET: */ vmcs12 = get_vmcs12(vcpu); vmcs_write64(TSC_OFFSET, offset + (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? vmcs12->tsc_offset : 0)); } else { trace_kvm_write_tsc_offset(vcpu->vcpu_id, vmcs_read64(TSC_OFFSET), offset); vmcs_write64(TSC_OFFSET, offset); } } /* * nested_vmx_allowed() checks whether a guest should be allowed to use VMX * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for * all guests if the "nested" module option is off, and can also be disabled * for a single guest by disabling its VMX cpuid bit. */ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) { return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); } /* * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be * returned for the various VMX controls MSRs when nested VMX is enabled. * The same values should also be used to verify that vmcs12 control fields are * valid during nested entry from L1 to L2. * Each of these control msrs has a low and high 32-bit half: A low bit is on * if the corresponding bit in the (32-bit) control field *must* be on, and a * bit in the high half is on if the corresponding bit in the control field * may be on. See also vmx_control_verify(). */ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) { /* * Note that as a general rule, the high half of the MSRs (bits in * the control fields which may be 1) should be initialized by the * intersection of the underlying hardware's MSR (i.e., features which * can be supported) and the list of features we want to expose - * because they are known to be properly supported in our code. * Also, usually, the low half of the MSRs (bits which must be 1) can * be set to 0, meaning that L1 may turn off any of these bits. The * reason is that if one of these bits is necessary, it will appear * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control * fields of vmcs01 and vmcs02, will turn these bits off - and * nested_vmx_exit_reflected() will not pass related exits to L1. * These rules have exceptions below. */ /* pin-based controls */ rdmsr(MSR_IA32_VMX_PINBASED_CTLS, vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high); vmx->nested.nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; vmx->nested.nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; if (kvm_vcpu_apicv_active(&vmx->vcpu)) vmx->nested.nested_vmx_pinbased_ctls_high |= PIN_BASED_POSTED_INTR; /* exit controls */ rdmsr(MSR_IA32_VMX_EXIT_CTLS, vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); vmx->nested.nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_exit_ctls_high &= #ifdef CONFIG_X86_64 VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; if (kvm_mpx_supported()) vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; /* entry controls */ rdmsr(MSR_IA32_VMX_ENTRY_CTLS, vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); vmx->nested.nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_entry_ctls_high &= #ifdef CONFIG_X86_64 VM_ENTRY_IA32E_MODE | #endif VM_ENTRY_LOAD_IA32_PAT; vmx->nested.nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); if (kvm_mpx_supported()) vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; /* cpu-based controls */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); vmx->nested.nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; vmx->nested.nested_vmx_procbased_ctls_high &= CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; /* * We can allow some features even when not supported by the * hardware. For example, L1 can specify an MSR bitmap - and we * can use it to avoid exits to L1 - even when L0 runs L2 * without MSR bitmaps. */ vmx->nested.nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | CPU_BASED_USE_MSR_BITMAPS; /* We support free control of CR3 access interception. */ vmx->nested.nested_vmx_procbased_ctls_low &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); /* * secondary cpu-based controls. Do not include those that * depend on CPUID bits, they are added later by vmx_cpuid_update. */ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); vmx->nested.nested_vmx_secondary_ctls_low = 0; vmx->nested.nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_DESC | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_WBINVD_EXITING; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; if (cpu_has_vmx_ept_execute_only()) vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXECUTE_ONLY_BIT; vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept; vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_1GB_PAGE_BIT; if (enable_ept_ad_bits) { vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_PML; vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT; } } else vmx->nested.nested_vmx_ept_caps = 0; if (cpu_has_vmx_vmfunc()) { vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_VMFUNC; /* * Advertise EPTP switching unconditionally * since we emulate it */ vmx->nested.nested_vmx_vmfunc_controls = VMX_VMFUNC_EPTP_SWITCHING; } /* * Old versions of KVM use the single-context version without * checking for support, so declare that it is supported even * though it is treated as global context. The alternative is * not failing the single-context invvpid, and it is worse. */ if (enable_vpid) { vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_VPID; vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | VMX_VPID_EXTENT_SUPPORTED_MASK; } else vmx->nested.nested_vmx_vpid_caps = 0; if (enable_unrestricted_guest) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_UNRESTRICTED_GUEST; /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; vmx->nested.nested_vmx_misc_low |= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | VMX_MISC_ACTIVITY_HLT; vmx->nested.nested_vmx_misc_high = 0; /* * This MSR reports some information about VMX support. We * should return information about the VMX we emulate for the * guest, and the VMCS structure we give it - not about the * VMX support of the underlying hardware. */ vmx->nested.nested_vmx_basic = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); if (cpu_has_vmx_basic_inout()) vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT; /* * These MSRs specify bits which the guest must keep fixed on * while L1 is in VMXON mode (in L1's root mode, or running an L2). * We picked the standard core2 setting. */ #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) #define VMXON_CR4_ALWAYSON X86_CR4_VMXE vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON; vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON; /* These MSRs specify bits which the guest must keep fixed off. */ rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1); rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1); /* highest index: VMX_PREEMPTION_TIMER_VALUE */ vmx->nested.nested_vmx_vmcs_enum = 0x2e; } /* * if fixed0[i] == 1: val[i] must be 1 * if fixed1[i] == 0: val[i] must be 0 */ static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1) { return ((val & fixed1) | fixed0) == val; } static inline bool vmx_control_verify(u32 control, u32 low, u32 high) { return fixed_bits_valid(control, low, high); } static inline u64 vmx_control_msr(u32 low, u32 high) { return low | ((u64)high << 32); } static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) { superset &= mask; subset &= mask; return (superset | subset) == superset; } static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) { const u64 feature_and_reserved = /* feature (except bit 48; see below) */ BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | /* reserved */ BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); u64 vmx_basic = vmx->nested.nested_vmx_basic; if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) return -EINVAL; /* * KVM does not emulate a version of VMX that constrains physical * addresses of VMX structures (e.g. VMCS) to 32-bits. */ if (data & BIT_ULL(48)) return -EINVAL; if (vmx_basic_vmcs_revision_id(vmx_basic) != vmx_basic_vmcs_revision_id(data)) return -EINVAL; if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) return -EINVAL; vmx->nested.nested_vmx_basic = data; return 0; } static int vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) { u64 supported; u32 *lowp, *highp; switch (msr_index) { case MSR_IA32_VMX_TRUE_PINBASED_CTLS: lowp = &vmx->nested.nested_vmx_pinbased_ctls_low; highp = &vmx->nested.nested_vmx_pinbased_ctls_high; break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: lowp = &vmx->nested.nested_vmx_procbased_ctls_low; highp = &vmx->nested.nested_vmx_procbased_ctls_high; break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: lowp = &vmx->nested.nested_vmx_exit_ctls_low; highp = &vmx->nested.nested_vmx_exit_ctls_high; break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: lowp = &vmx->nested.nested_vmx_entry_ctls_low; highp = &vmx->nested.nested_vmx_entry_ctls_high; break; case MSR_IA32_VMX_PROCBASED_CTLS2: lowp = &vmx->nested.nested_vmx_secondary_ctls_low; highp = &vmx->nested.nested_vmx_secondary_ctls_high; break; default: BUG(); } supported = vmx_control_msr(*lowp, *highp); /* Check must-be-1 bits are still 1. */ if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) return -EINVAL; /* Check must-be-0 bits are still 0. */ if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) return -EINVAL; *lowp = data; *highp = data >> 32; return 0; } static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) { const u64 feature_and_reserved_bits = /* feature */ BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | /* reserved */ GENMASK_ULL(13, 9) | BIT_ULL(31); u64 vmx_misc; vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) return -EINVAL; if ((vmx->nested.nested_vmx_pinbased_ctls_high & PIN_BASED_VMX_PREEMPTION_TIMER) && vmx_misc_preemption_timer_rate(data) != vmx_misc_preemption_timer_rate(vmx_misc)) return -EINVAL; if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) return -EINVAL; if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) return -EINVAL; if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) return -EINVAL; vmx->nested.nested_vmx_misc_low = data; vmx->nested.nested_vmx_misc_high = data >> 32; return 0; } static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) { u64 vmx_ept_vpid_cap; vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps, vmx->nested.nested_vmx_vpid_caps); /* Every bit is either reserved or a feature bit. */ if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) return -EINVAL; vmx->nested.nested_vmx_ept_caps = data; vmx->nested.nested_vmx_vpid_caps = data >> 32; return 0; } static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) { u64 *msr; switch (msr_index) { case MSR_IA32_VMX_CR0_FIXED0: msr = &vmx->nested.nested_vmx_cr0_fixed0; break; case MSR_IA32_VMX_CR4_FIXED0: msr = &vmx->nested.nested_vmx_cr4_fixed0; break; default: BUG(); } /* * 1 bits (which indicates bits which "must-be-1" during VMX operation) * must be 1 in the restored value. */ if (!is_bitwise_subset(data, *msr, -1ULL)) return -EINVAL; *msr = data; return 0; } /* * Called when userspace is restoring VMX MSRs. * * Returns 0 on success, non-0 otherwise. */ static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) { struct vcpu_vmx *vmx = to_vmx(vcpu); switch (msr_index) { case MSR_IA32_VMX_BASIC: return vmx_restore_vmx_basic(vmx, data); case MSR_IA32_VMX_PINBASED_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS: case MSR_IA32_VMX_EXIT_CTLS: case MSR_IA32_VMX_ENTRY_CTLS: /* * The "non-true" VMX capability MSRs are generated from the * "true" MSRs, so we do not support restoring them directly. * * If userspace wants to emulate VMX_BASIC[55]=0, userspace * should restore the "true" MSRs with the must-be-1 bits * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND * DEFAULT SETTINGS". */ return -EINVAL; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS2: return vmx_restore_control_msr(vmx, msr_index, data); case MSR_IA32_VMX_MISC: return vmx_restore_vmx_misc(vmx, data); case MSR_IA32_VMX_CR0_FIXED0: case MSR_IA32_VMX_CR4_FIXED0: return vmx_restore_fixed0_msr(vmx, msr_index, data); case MSR_IA32_VMX_CR0_FIXED1: case MSR_IA32_VMX_CR4_FIXED1: /* * These MSRs are generated based on the vCPU's CPUID, so we * do not support restoring them directly. */ return -EINVAL; case MSR_IA32_VMX_EPT_VPID_CAP: return vmx_restore_vmx_ept_vpid_cap(vmx, data); case MSR_IA32_VMX_VMCS_ENUM: vmx->nested.nested_vmx_vmcs_enum = data; return 0; default: /* * The rest of the VMX capability MSRs do not support restore. */ return -EINVAL; } } /* Returns 0 on success, non-0 otherwise. */ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { struct vcpu_vmx *vmx = to_vmx(vcpu); switch (msr_index) { case MSR_IA32_VMX_BASIC: *pdata = vmx->nested.nested_vmx_basic; break; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high); if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_EXIT_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); if (msr_index == MSR_IA32_VMX_EXIT_CTLS) *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_ENTRY_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_MISC: *pdata = vmx_control_msr( vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); break; case MSR_IA32_VMX_CR0_FIXED0: *pdata = vmx->nested.nested_vmx_cr0_fixed0; break; case MSR_IA32_VMX_CR0_FIXED1: *pdata = vmx->nested.nested_vmx_cr0_fixed1; break; case MSR_IA32_VMX_CR4_FIXED0: *pdata = vmx->nested.nested_vmx_cr4_fixed0; break; case MSR_IA32_VMX_CR4_FIXED1: *pdata = vmx->nested.nested_vmx_cr4_fixed1; break; case MSR_IA32_VMX_VMCS_ENUM: *pdata = vmx->nested.nested_vmx_vmcs_enum; break; case MSR_IA32_VMX_PROCBASED_CTLS2: *pdata = vmx_control_msr( vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: *pdata = vmx->nested.nested_vmx_ept_caps | ((u64)vmx->nested.nested_vmx_vpid_caps << 32); break; case MSR_IA32_VMX_VMFUNC: *pdata = vmx->nested.nested_vmx_vmfunc_controls; break; default: return 1; } return 0; } static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, uint64_t val) { uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; return !(val & ~valid_bits); } /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct shared_msr_entry *msr; switch (msr_info->index) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: msr_info->data = vmcs_readl(GUEST_FS_BASE); break; case MSR_GS_BASE: msr_info->data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(to_vmx(vcpu)); msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_info); case MSR_IA32_TSC: msr_info->data = guest_read_tsc(vcpu); break; case MSR_IA32_SYSENTER_CS: msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); break; case MSR_IA32_SYSENTER_EIP: msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); break; case MSR_IA32_SYSENTER_ESP: msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; case MSR_IA32_MCG_EXT_CTL: if (!msr_info->host_initiated && !(to_vmx(vcpu)->msr_ia32_feature_control & FEATURE_CONTROL_LMCE)) return 1; msr_info->data = vcpu->arch.mcg_ext_ctl; break; case MSR_IA32_FEATURE_CONTROL: msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data); case MSR_IA32_XSS: if (!vmx_xsaves_supported()) return 1; msr_info->data = vcpu->arch.ia32_xss; break; case MSR_TSC_AUX: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(to_vmx(vcpu), msr_info->index); if (msr) { msr_info->data = msr->data; break; } return kvm_get_msr_common(vcpu, msr_info); } return 0; } static void vmx_leave_nested(struct kvm_vcpu *vcpu); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr; int ret = 0; u32 msr_index = msr_info->index; u64 data = msr_info->data; switch (msr_index) { case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); break; #ifdef CONFIG_X86_64 case MSR_FS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_FS_BASE, data); break; case MSR_GS_BASE: vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_GS_BASE, data); break; case MSR_KERNEL_GS_BASE: vmx_load_host_state(vmx); vmx->msr_guest_kernel_gs_base = data; break; #endif case MSR_IA32_SYSENTER_CS: vmcs_write32(GUEST_SYSENTER_CS, data); break; case MSR_IA32_SYSENTER_EIP: vmcs_writel(GUEST_SYSENTER_EIP, data); break; case MSR_IA32_SYSENTER_ESP: vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: if (!kvm_mpx_supported() || (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_MPX))) return 1; if (is_noncanonical_address(data & PAGE_MASK, vcpu) || (data & MSR_IA32_BNDCFGS_RSVD)) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; case MSR_IA32_TSC: kvm_write_tsc(vcpu, msr_info); break; case MSR_IA32_CR_PAT: if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) return 1; vmcs_write64(GUEST_IA32_PAT, data); vcpu->arch.pat = data; break; } ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_TSC_ADJUST: ret = kvm_set_msr_common(vcpu, msr_info); break; case MSR_IA32_MCG_EXT_CTL: if ((!msr_info->host_initiated && !(to_vmx(vcpu)->msr_ia32_feature_control & FEATURE_CONTROL_LMCE)) || (data & ~MCG_EXT_CTL_LMCE_EN)) return 1; vcpu->arch.mcg_ext_ctl = data; break; case MSR_IA32_FEATURE_CONTROL: if (!vmx_feature_control_msr_valid(vcpu, data) || (to_vmx(vcpu)->msr_ia32_feature_control & FEATURE_CONTROL_LOCKED && !msr_info->host_initiated)) return 1; vmx->msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!msr_info->host_initiated) return 1; /* they are read-only */ if (!nested_vmx_allowed(vcpu)) return 1; return vmx_set_vmx_msr(vcpu, msr_index, data); case MSR_IA32_XSS: if (!vmx_xsaves_supported()) return 1; /* * The only supported bit as of Skylake is bit 8, but * it is not supported on KVM. */ if (data != 0) return 1; vcpu->arch.ia32_xss = data; if (vcpu->arch.ia32_xss != host_xss) add_atomic_switch_msr(vmx, MSR_IA32_XSS, vcpu->arch.ia32_xss, host_xss); else clear_atomic_switch_msr(vmx, MSR_IA32_XSS); break; case MSR_TSC_AUX: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) return 1; /* Check reserved bit, higher 32 bits should be zero */ if ((data >> 32) != 0) return 1; /* Otherwise falls through */ default: msr = find_msr_entry(vmx, msr_index); if (msr) { u64 old_msr_data = msr->data; msr->data = data; if (msr - vmx->guest_msrs < vmx->save_nmsrs) { preempt_disable(); ret = kvm_set_shared_msr(msr->index, msr->data, msr->mask); preempt_enable(); if (ret) msr->data = old_msr_data; } break; } ret = kvm_set_msr_common(vcpu, msr_info); } return ret; } static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); switch (reg) { case VCPU_REGS_RSP: vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); break; case VCPU_REGS_RIP: vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); break; case VCPU_EXREG_PDPTR: if (enable_ept) ept_save_pdptrs(vcpu); break; default: break; } } static __init int cpu_has_kvm_support(void) { return cpu_has_vmx(); } static __init int vmx_disabled_by_bios(void) { u64 msr; rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); if (msr & FEATURE_CONTROL_LOCKED) { /* launched w/ TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && tboot_enabled()) return 1; /* launched w/o TXT and VMX only enabled w/ TXT */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) && !tboot_enabled()) { printk(KERN_WARNING "kvm: disable TXT in the BIOS or " "activate TXT before enabling KVM\n"); return 1; } /* launched w/o TXT and VMX disabled */ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) && !tboot_enabled()) return 1; } return 0; } static void kvm_cpu_vmxon(u64 addr) { cr4_set_bits(X86_CR4_VMXE); intel_pt_handle_vmx(1); asm volatile (ASM_VMX_VMXON_RAX : : "a"(&addr), "m"(addr) : "memory", "cc"); } static int hardware_enable(void) { int cpu = raw_smp_processor_id(); u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 old, test_bits; if (cr4_read_shadow() & X86_CR4_VMXE) return -EBUSY; INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); /* * Now we can enable the vmclear operation in kdump * since the loaded_vmcss_on_cpu list on this cpu * has been initialized. * * Though the cpu is not in VMX operation now, there * is no problem to enable the vmclear operation * for the loaded_vmcss_on_cpu list is empty! */ crash_enable_local_vmclear(cpu); rdmsrl(MSR_IA32_FEATURE_CONTROL, old); test_bits = FEATURE_CONTROL_LOCKED; test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; if (tboot_enabled()) test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; if ((old & test_bits) != test_bits) { /* enable and lock */ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); } kvm_cpu_vmxon(phys_addr); ept_sync_global(); return 0; } static void vmclear_local_loaded_vmcss(void) { int cpu = raw_smp_processor_id(); struct loaded_vmcs *v, *n; list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu), loaded_vmcss_on_cpu_link) __loaded_vmcs_clear(v); } /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() * tricks. */ static void kvm_cpu_vmxoff(void) { asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); } static void hardware_disable(void) { vmclear_local_loaded_vmcss(); kvm_cpu_vmxoff(); } static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) { u32 vmx_msr_low, vmx_msr_high; u32 ctl = ctl_min | ctl_opt; rdmsr(msr, vmx_msr_low, vmx_msr_high); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ /* Ensure minimum (required) set of control bits are supported. */ if (ctl_min & ~ctl) return -EIO; *result = ctl; return 0; } static __init bool allow_1_setting(u32 msr, u32 ctl) { u32 vmx_msr_low, vmx_msr_high; rdmsr(msr, vmx_msr_low, vmx_msr_high); return vmx_msr_high & ctl; } static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; u32 min, opt, min2, opt2; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; u32 _vmexit_control = 0; u32 _vmentry_control = 0; min = CPU_BASED_HLT_EXITING | #ifdef CONFIG_X86_64 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_INVLPG_EXITING | CPU_BASED_RDPMC_EXITING; if (!kvm_mwait_in_guest()) min |= CPU_BASED_MWAIT_EXITING | CPU_BASED_MONITOR_EXITING; opt = CPU_BASED_TPR_SHADOW | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, &_cpu_based_exec_control) < 0) return -EIO; #ifdef CONFIG_X86_64 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & ~CPU_BASED_CR8_STORE_EXITING; #endif if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { min2 = 0; opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_WBINVD_EXITING | SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_RDSEED | SECONDARY_EXEC_RDRAND | SECONDARY_EXEC_ENABLE_PML | SECONDARY_EXEC_TSC_SCALING | SECONDARY_EXEC_ENABLE_VMFUNC; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) return -EIO; } #ifndef CONFIG_X86_64 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) _cpu_based_2nd_exec_control &= ~( SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { /* CR3 accesses and invlpg don't need to cause VM Exits when EPT enabled */ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | CPU_BASED_INVLPG_EXITING); rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, vmx_capability.ept, vmx_capability.vpid); } min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; #ifdef CONFIG_X86_64 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; #endif opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_CLEAR_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, &_vmexit_control) < 0) return -EIO; min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, &_pin_based_exec_control) < 0) return -EIO; if (cpu_has_broken_vmx_preemption_timer()) _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)) _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR; min = VM_ENTRY_LOAD_DEBUG_CONTROLS; opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS; if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, &_vmentry_control) < 0) return -EIO; rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) return -EIO; #ifdef CONFIG_X86_64 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ if (vmx_msr_high & (1u<<16)) return -EIO; #endif /* Require Write-Back (WB) memory type for VMCS accesses. */ if (((vmx_msr_high >> 18) & 15) != 6) return -EIO; vmcs_conf->size = vmx_msr_high & 0x1fff; vmcs_conf->order = get_order(vmcs_conf->size); vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; vmcs_conf->revision_id = vmx_msr_low; vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; vmcs_conf->vmexit_ctrl = _vmexit_control; vmcs_conf->vmentry_ctrl = _vmentry_control; cpu_has_load_ia32_efer = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_EFER) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_EFER); cpu_has_load_perf_global_ctrl = allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS, VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL); /* * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL * but due to errata below it can't be used. Workaround is to use * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL. * * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32] * * AAK155 (model 26) * AAP115 (model 30) * AAT100 (model 37) * BC86,AAY89,BD102 (model 44) * BA97 (model 46) * */ if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) { switch (boot_cpu_data.x86_model) { case 26: case 30: case 37: case 44: case 46: cpu_has_load_perf_global_ctrl = false; printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " "does not work properly. Using workaround\n"); break; default: break; } } if (boot_cpu_has(X86_FEATURE_XSAVES)) rdmsrl(MSR_IA32_XSS, host_xss); return 0; } static struct vmcs *alloc_vmcs_cpu(int cpu) { int node = cpu_to_node(cpu); struct page *pages; struct vmcs *vmcs; pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); memset(vmcs, 0, vmcs_config.size); vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ return vmcs; } static struct vmcs *alloc_vmcs(void) { return alloc_vmcs_cpu(raw_smp_processor_id()); } static void free_vmcs(struct vmcs *vmcs) { free_pages((unsigned long)vmcs, vmcs_config.order); } /* * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded */ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) { if (!loaded_vmcs->vmcs) return; loaded_vmcs_clear(loaded_vmcs); free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; WARN_ON(loaded_vmcs->shadow_vmcs != NULL); } static void free_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { free_vmcs(per_cpu(vmxarea, cpu)); per_cpu(vmxarea, cpu) = NULL; } } enum vmcs_field_type { VMCS_FIELD_TYPE_U16 = 0, VMCS_FIELD_TYPE_U64 = 1, VMCS_FIELD_TYPE_U32 = 2, VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 }; static inline int vmcs_field_type(unsigned long field) { if (0x1 & field) /* the *_HIGH fields are all 32 bit */ return VMCS_FIELD_TYPE_U32; return (field >> 13) & 0x3 ; } static inline int vmcs_field_readonly(unsigned long field) { return (((field >> 10) & 0x3) == 1); } static void init_vmcs_shadow_fields(void) { int i, j; /* No checks for read only fields yet */ for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: if (!kvm_mpx_supported()) continue; break; default: break; } if (j < i) shadow_read_write_fields[j] = shadow_read_write_fields[i]; j++; } max_shadow_read_write_fields = j; /* shadowed fields guest access without vmexit */ for (i = 0; i < max_shadow_read_write_fields; i++) { unsigned long field = shadow_read_write_fields[i]; clear_bit(field, vmx_vmwrite_bitmap); clear_bit(field, vmx_vmread_bitmap); if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) { clear_bit(field + 1, vmx_vmwrite_bitmap); clear_bit(field + 1, vmx_vmread_bitmap); } } for (i = 0; i < max_shadow_read_only_fields; i++) { unsigned long field = shadow_read_only_fields[i]; clear_bit(field, vmx_vmread_bitmap); if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) clear_bit(field + 1, vmx_vmread_bitmap); } } static __init int alloc_kvm_area(void) { int cpu; for_each_possible_cpu(cpu) { struct vmcs *vmcs; vmcs = alloc_vmcs_cpu(cpu); if (!vmcs) { free_kvm_area(); return -ENOMEM; } per_cpu(vmxarea, cpu) = vmcs; } return 0; } static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, struct kvm_segment *save) { if (!emulate_invalid_guest_state) { /* * CS and SS RPL should be equal during guest entry according * to VMX spec, but in reality it is not always so. Since vcpu * is in the middle of the transition from real mode to * protected mode it is safe to assume that RPL 0 is a good * default value. */ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) save->selector &= ~SEGMENT_RPL_MASK; save->dpl = save->selector & SEGMENT_RPL_MASK; save->s = 1; } vmx_set_segment(vcpu, save, seg); } static void enter_pmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Update real mode segment cache. It may be not up-to-date if sement * register was written while vcpu was in a guest mode. */ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 0; vmx_segment_cache_clear(vmx); vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); flags = vmcs_readl(GUEST_RFLAGS); flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); update_exception_bitmap(vcpu); fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); } static void fix_rmode_seg(int seg, struct kvm_segment *save) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; struct kvm_segment var = *save; var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; if (!emulate_invalid_guest_state) { var.selector = var.base >> 4; var.base = var.base & 0xffff0; var.limit = 0xffff; var.g = 0; var.db = 0; var.present = 1; var.s = 1; var.l = 0; var.unusable = 0; var.type = 0x3; var.avl = 0; if (save->base & 0xf) printk_once(KERN_WARNING "kvm: segment base is not " "paragraph aligned when entering " "protected mode (seg=%d)", seg); } vmcs_write16(sf->selector, var.selector); vmcs_writel(sf->base, var.base); vmcs_write32(sf->limit, var.limit); vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); } static void enter_rmode(struct kvm_vcpu *vcpu) { unsigned long flags; struct vcpu_vmx *vmx = to_vmx(vcpu); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); vmx->rmode.vm86_active = 1; /* * Very old userspace does not call KVM_SET_TSS_ADDR before entering * vcpu. Warn the user that an update is overdue. */ if (!vcpu->kvm->arch.tss_addr) printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " "called before entering vcpu\n"); vmx_segment_cache_clear(vmx); vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); vmx->rmode.save_rflags = flags; flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); update_exception_bitmap(vcpu); fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); kvm_mmu_reset_context(vcpu); } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); if (!msr) return; /* * Force kernel_gs_base reloading before EFER changes, as control * of this msr depends on is_long_mode(). */ vmx_load_host_state(to_vmx(vcpu)); vcpu->arch.efer = efer; if (efer & EFER_LMA) { vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer; } else { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); msr->data = efer & ~EFER_LME; } setup_msrs(vmx); } #ifdef CONFIG_X86_64 static void enter_lmode(struct kvm_vcpu *vcpu) { u32 guest_tr_ar; vmx_segment_cache_clear(to_vmx(vcpu)); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) { pr_debug_ratelimited("%s: tss fixup for long mode. \n", __func__); vmcs_write32(GUEST_TR_AR_BYTES, (guest_tr_ar & ~VMX_AR_TYPE_MASK) | VMX_AR_TYPE_BUSY_64_TSS); } vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); } static void exit_lmode(struct kvm_vcpu *vcpu) { vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); } #endif static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) { if (enable_ept) { if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); } else { vpid_sync_context(vpid); } } static void vmx_flush_tlb(struct kvm_vcpu *vcpu) { __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); } static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) { if (enable_ept) vmx_flush_tlb(vcpu); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; vcpu->arch.cr0 &= ~cr0_guest_owned_bits; vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; } static void vmx_decache_cr3(struct kvm_vcpu *vcpu) { if (enable_ept && is_paging(vcpu)) vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) { ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; vcpu->arch.cr4 &= ~cr4_guest_owned_bits; vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; } static void ept_load_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty)) return; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); } } static void ept_save_pdptrs(struct kvm_vcpu *vcpu) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); } __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); } static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1; struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_UNRESTRICTED_GUEST && nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); return fixed_bits_valid(val, fixed0, fixed1); } static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1; return fixed_bits_valid(val, fixed0, fixed1); } static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) { u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0; u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1; return fixed_bits_valid(val, fixed0, fixed1); } /* No difference in the restrictions on guest and host CR4 in VMX operation. */ #define nested_guest_cr4_valid nested_cr4_valid #define nested_host_cr4_valid nested_cr4_valid static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | (CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING)); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } if (!(cr0 & X86_CR0_WP)) *hw_cr0 &= ~X86_CR0_WP; } static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) enter_pmode(vcpu); if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) enter_rmode(vcpu); } #ifdef CONFIG_X86_64 if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) enter_lmode(vcpu); if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) exit_lmode(vcpu); } #endif if (enable_ept) ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); vmcs_writel(CR0_READ_SHADOW, cr0); vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; /* depends on vcpu->arch.cr0 to be set to a new value */ vmx->emulation_required = emulation_required(vcpu); } static int get_ept_level(struct kvm_vcpu *vcpu) { if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) return 5; return 4; } static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa) { u64 eptp = VMX_EPTP_MT_WB; eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4; if (enable_ept_ad_bits && (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu))) eptp |= VMX_EPTP_AD_ENABLE_BIT; eptp |= (root_hpa & PAGE_MASK); return eptp; } static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { unsigned long guest_cr3; u64 eptp; guest_cr3 = cr3; if (enable_ept) { eptp = construct_eptp(vcpu, cr3); vmcs_write64(EPT_POINTER, eptp); if (is_paging(vcpu) || is_guest_mode(vcpu)) guest_cr3 = kvm_read_cr3(vcpu); else guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; ept_load_pdptrs(vcpu); } vmx_flush_tlb(vcpu); vmcs_writel(GUEST_CR3, guest_cr3); } static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { /* * Pass through host's Machine Check Enable value to hw_cr4, which * is in force while we are in guest mode. Do not let guests control * this bit, even if host CR4.MCE == 0. */ unsigned long hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE) | (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); if (cr4 & X86_CR4_VMXE) { /* * To use VMXON (and later other VMX instructions), a guest * must first be able to turn on cr4.VMXE (see handle_vmon()). * So basically the check on whether to allow nested VMX * is here. */ if (!nested_vmx_allowed(vcpu)) return 1; } if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) return 1; vcpu->arch.cr4 = cr4; if (enable_ept) { if (!is_paging(vcpu)) { hw_cr4 &= ~X86_CR4_PAE; hw_cr4 |= X86_CR4_PSE; } else if (!(cr4 & X86_CR4_PAE)) { hw_cr4 &= ~X86_CR4_PAE; } } if (!enable_unrestricted_guest && !is_paging(vcpu)) /* * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in * hardware. To emulate this behavior, SMEP/SMAP/PKU needs * to be manually disabled when guest switches to non-paging * mode. * * If !enable_unrestricted_guest, the CPU is always running * with CR0.PG=1 and CR4 needs to be modified. * If enable_unrestricted_guest, the CPU automatically * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0. */ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); return 0; } static void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 ar; if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { *var = vmx->rmode.segs[seg]; if (seg == VCPU_SREG_TR || var->selector == vmx_read_guest_seg_selector(vmx, seg)) return; var->base = vmx_read_guest_seg_base(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); return; } var->base = vmx_read_guest_seg_base(vmx, seg); var->limit = vmx_read_guest_seg_limit(vmx, seg); var->selector = vmx_read_guest_seg_selector(vmx, seg); ar = vmx_read_guest_seg_ar(vmx, seg); var->unusable = (ar >> 16) & 1; var->type = ar & 15; var->s = (ar >> 4) & 1; var->dpl = (ar >> 5) & 3; /* * Some userspaces do not preserve unusable property. Since usable * segment has to be present according to VMX spec we can use present * property to amend userspace bug by making unusable segment always * nonpresent. vmx_segment_access_rights() already marks nonpresent * segment as unusable. */ var->present = !var->unusable; var->avl = (ar >> 12) & 1; var->l = (ar >> 13) & 1; var->db = (ar >> 14) & 1; var->g = (ar >> 15) & 1; } static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment s; if (to_vmx(vcpu)->rmode.vm86_active) { vmx_get_segment(vcpu, &s, seg); return s.base; } return vmx_read_guest_seg_base(to_vmx(vcpu), seg); } static int vmx_get_cpl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (unlikely(vmx->rmode.vm86_active)) return 0; else { int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); return VMX_AR_DPL(ar); } } static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; if (var->unusable || !var->present) ar = 1 << 16; else { ar = var->type & 15; ar |= (var->s & 1) << 4; ar |= (var->dpl & 3) << 5; ar |= (var->present & 1) << 7; ar |= (var->avl & 1) << 12; ar |= (var->l & 1) << 13; ar |= (var->db & 1) << 14; ar |= (var->g & 1) << 15; } return ar; } static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { struct vcpu_vmx *vmx = to_vmx(vcpu); const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; vmx_segment_cache_clear(vmx); if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { vmx->rmode.segs[seg] = *var; if (seg == VCPU_SREG_TR) vmcs_write16(sf->selector, var->selector); else if (var->s) fix_rmode_seg(seg, &vmx->rmode.segs[seg]); goto out; } vmcs_writel(sf->base, var->base); vmcs_write32(sf->limit, var->limit); vmcs_write16(sf->selector, var->selector); /* * Fix the "Accessed" bit in AR field of segment registers for older * qemu binaries. * IA32 arch specifies that at the time of processor reset the * "Accessed" bit in the AR field of segment registers is 1. And qemu * is setting it to 0 in the userland code. This causes invalid guest * state vmexit when "unrestricted guest" mode is turned on. * Fix for this setup issue in cpu_reset is being pushed in the qemu * tree. Newer qemu binaries with that qemu fix would not need this * kvm hack. */ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) var->type |= 0x1; /* Accessed */ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); out: vmx->emulation_required = emulation_required(vcpu); } static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); *db = (ar >> 14) & 1; *l = (ar >> 13) & 1; } static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_IDTR_LIMIT); dt->address = vmcs_readl(GUEST_IDTR_BASE); } static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_IDTR_LIMIT, dt->size); vmcs_writel(GUEST_IDTR_BASE, dt->address); } static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { dt->size = vmcs_read32(GUEST_GDTR_LIMIT); dt->address = vmcs_readl(GUEST_GDTR_BASE); } static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { vmcs_write32(GUEST_GDTR_LIMIT, dt->size); vmcs_writel(GUEST_GDTR_BASE, dt->address); } static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; u32 ar; vmx_get_segment(vcpu, &var, seg); var.dpl = 0x3; if (seg == VCPU_SREG_CS) var.type = 0x3; ar = vmx_segment_access_rights(&var); if (var.base != (var.selector << 4)) return false; if (var.limit != 0xffff) return false; if (ar != 0xf3) return false; return true; } static bool code_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment cs; unsigned int cs_rpl; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); cs_rpl = cs.selector & SEGMENT_RPL_MASK; if (cs.unusable) return false; if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK)) return false; if (!cs.s) return false; if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) { if (cs.dpl > cs_rpl) return false; } else { if (cs.dpl != cs_rpl) return false; } if (!cs.present) return false; /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ return true; } static bool stack_segment_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ss; unsigned int ss_rpl; vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); ss_rpl = ss.selector & SEGMENT_RPL_MASK; if (ss.unusable) return true; if (ss.type != 3 && ss.type != 7) return false; if (!ss.s) return false; if (ss.dpl != ss_rpl) /* DPL != RPL */ return false; if (!ss.present) return false; return true; } static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) { struct kvm_segment var; unsigned int rpl; vmx_get_segment(vcpu, &var, seg); rpl = var.selector & SEGMENT_RPL_MASK; if (var.unusable) return true; if (!var.s) return false; if (!var.present) return false; if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) { if (var.dpl < rpl) /* DPL < RPL */ return false; } /* TODO: Add other members to kvm_segment_field to allow checking for other access * rights flags */ return true; } static bool tr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment tr; vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); if (tr.unusable) return false; if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; if (!tr.present) return false; return true; } static bool ldtr_valid(struct kvm_vcpu *vcpu) { struct kvm_segment ldtr; vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); if (ldtr.unusable) return true; if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) return false; if (!ldtr.present) return false; return true; } static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ss; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); return ((cs.selector & SEGMENT_RPL_MASK) == (ss.selector & SEGMENT_RPL_MASK)); } /* * Check if guest state is valid. Returns true if valid, false if * not. * We assume that registers are always usable */ static bool guest_state_valid(struct kvm_vcpu *vcpu) { if (enable_unrestricted_guest) return true; /* real mode guest state checks */ if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) { if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) return false; } else { /* protected mode guest state checks */ if (!cs_ss_rpl_check(vcpu)) return false; if (!code_segment_valid(vcpu)) return false; if (!stack_segment_valid(vcpu)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_DS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_ES)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_FS)) return false; if (!data_segment_valid(vcpu, VCPU_SREG_GS)) return false; if (!tr_valid(vcpu)) return false; if (!ldtr_valid(vcpu)) return false; } /* TODO: * - Add checks on RIP * - Add checks on RFLAGS */ return true; } static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) { return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); } static int init_rmode_tss(struct kvm *kvm) { gfn_t fn; u16 data = 0; int idx, r; idx = srcu_read_lock(&kvm->srcu); fn = kvm->arch.tss_addr >> PAGE_SHIFT; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; r = kvm_write_guest_page(kvm, fn++, &data, TSS_IOPB_BASE_OFFSET, sizeof(u16)); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); if (r < 0) goto out; r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); if (r < 0) goto out; data = ~0; r = kvm_write_guest_page(kvm, fn, &data, RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, sizeof(u8)); out: srcu_read_unlock(&kvm->srcu, idx); return r; } static int init_rmode_identity_map(struct kvm *kvm) { int i, idx, r = 0; kvm_pfn_t identity_map_pfn; u32 tmp; if (!enable_ept) return 0; /* Protect kvm->arch.ept_identity_pagetable_done. */ mutex_lock(&kvm->slots_lock); if (likely(kvm->arch.ept_identity_pagetable_done)) goto out2; identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; r = alloc_identity_pagetable(kvm); if (r < 0) goto out2; idx = srcu_read_lock(&kvm->srcu); r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); if (r < 0) goto out; /* Set up identity-mapping pagetable for EPT in real mode */ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); r = kvm_write_guest_page(kvm, identity_map_pfn, &tmp, i * sizeof(tmp), sizeof(tmp)); if (r < 0) goto out; } kvm->arch.ept_identity_pagetable_done = true; out: srcu_read_unlock(&kvm->srcu, idx); out2: mutex_unlock(&kvm->slots_lock); return r; } static void seg_setup(int seg) { const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; unsigned int ar; vmcs_write16(sf->selector, 0); vmcs_writel(sf->base, 0); vmcs_write32(sf->limit, 0xffff); ar = 0x93; if (seg == VCPU_SREG_CS) ar |= 0x08; /* code segment */ vmcs_write32(sf->ar_bytes, ar); } static int alloc_apic_access_page(struct kvm *kvm) { struct page *page; int r = 0; mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) goto out; r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); if (r) goto out; page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); if (is_error_page(page)) { r = -EFAULT; goto out; } /* * Do not pin the page in memory, so that memory hot-unplug * is able to migrate it. */ put_page(page); kvm->arch.apic_access_page_done = true; out: mutex_unlock(&kvm->slots_lock); return r; } static int alloc_identity_pagetable(struct kvm *kvm) { /* Called with kvm->slots_lock held. */ int r = 0; BUG_ON(kvm->arch.ept_identity_pagetable_done); r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, kvm->arch.ept_identity_map_addr, PAGE_SIZE); return r; } static int allocate_vpid(void) { int vpid; if (!enable_vpid) return 0; spin_lock(&vmx_vpid_lock); vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); if (vpid < VMX_NR_VPIDS) __set_bit(vpid, vmx_vpid_bitmap); else vpid = 0; spin_unlock(&vmx_vpid_lock); return vpid; } static void free_vpid(int vpid) { if (!enable_vpid || vpid == 0) return; spin_lock(&vmx_vpid_lock); __clear_bit(vpid, vmx_vpid_bitmap); spin_unlock(&vmx_vpid_lock); } #define MSR_TYPE_R 1 #define MSR_TYPE_W 2 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) return; /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R) /* read-low */ __clear_bit(msr, msr_bitmap + 0x000 / f); if (type & MSR_TYPE_W) /* write-low */ __clear_bit(msr, msr_bitmap + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R) /* read-high */ __clear_bit(msr, msr_bitmap + 0x400 / f); if (type & MSR_TYPE_W) /* write-high */ __clear_bit(msr, msr_bitmap + 0xc00 / f); } } /* * If a msr is allowed by L0, we should check whether it is allowed by L1. * The corresponding bit will be cleared unless both of L0 and L1 allow it. */ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, unsigned long *msr_bitmap_nested, u32 msr, int type) { int f = sizeof(unsigned long); if (!cpu_has_vmx_msr_bitmap()) { WARN_ON(1); return; } /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. */ if (msr <= 0x1fff) { if (type & MSR_TYPE_R && !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) /* read-low */ __clear_bit(msr, msr_bitmap_nested + 0x000 / f); if (type & MSR_TYPE_W && !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) /* write-low */ __clear_bit(msr, msr_bitmap_nested + 0x800 / f); } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { msr &= 0x1fff; if (type & MSR_TYPE_R && !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) /* read-high */ __clear_bit(msr, msr_bitmap_nested + 0x400 / f); if (type & MSR_TYPE_W && !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) /* write-high */ __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); } } static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) { if (!longmode_only) __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr, MSR_TYPE_R | MSR_TYPE_W); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr, MSR_TYPE_R | MSR_TYPE_W); } static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active) { if (apicv_active) { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv, msr, type); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv, msr, type); } else { __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic, msr, type); __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic, msr, type); } } static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu) { return enable_apicv; } static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); gfn_t gfn; /* * Don't need to mark the APIC access page dirty; it is never * written to by the CPU during APIC virtualization. */ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; kvm_vcpu_mark_page_dirty(vcpu, gfn); } if (nested_cpu_has_posted_intr(vmcs12)) { gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; kvm_vcpu_mark_page_dirty(vcpu, gfn); } } static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; void *vapic_page; u16 status; if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) return; vmx->nested.pi_pending = false; if (!pi_test_and_clear_on(vmx->nested.pi_desc)) return; max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); if (max_irr != 256) { vapic_page = kmap(vmx->nested.virtual_apic_page); __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); kunmap(vmx->nested.virtual_apic_page); status = vmcs_read16(GUEST_INTR_STATUS); if ((u8)max_irr > ((u8)status & 0xff)) { status &= ~0xff; status |= (u8)max_irr; vmcs_write16(GUEST_INTR_STATUS, status); } } nested_mark_vmcs12_pages_dirty(vcpu); } static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, bool nested) { #ifdef CONFIG_SMP int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; if (vcpu->mode == IN_GUEST_MODE) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently, we don't support urgent interrupt, * all interrupts are recognized as non-urgent * interrupt, so we cannot post interrupts when * 'SN' is set. * * If the vcpu is in guest mode, it means it is * running instead of being scheduled out and * waiting in the run queue, and that's the only * case when 'SN' is set currently, warning if * 'SN' is set. */ WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); return true; } #endif return false; } static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { /* the PIR and ON have been set by L1. */ kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } return -1; } /* * Send interrupt to vcpu via posted interrupt way. * 1. If target vcpu is running(non-root mode), send posted interrupt * notification to vcpu and hardware will sync PIR to vIRR atomically. * 2. If target vcpu isn't running(root mode), kick it to pick up the * interrupt from PIR in next vmentry. */ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vmx_deliver_nested_posted_interrupt(vcpu, vector); if (!r) return; if (pi_test_and_set_pir(vector, &vmx->pi_desc)) return; /* If a previous notification has sent the IPI, nothing to do. */ if (pi_test_and_set_on(&vmx->pi_desc)) return; if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) kvm_vcpu_kick(vcpu); } /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. * Note that host-state that does change is set elsewhere. E.g., host-state * that is set differently for each CPU is set in vmx_vcpu_load(), not here. */ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) { u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; unsigned long cr0, cr3, cr4; cr0 = read_cr0(); WARN_ON(cr0 & X86_CR0_TS); vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */ /* * Save the most likely value for this task's CR3 in the VMCS. * We can't use __get_current_cr3_fast() because we're not atomic. */ cr3 = __read_cr3(); vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ vmx->host_state.vmcs_host_cr3 = cr3; /* Save the most likely value for this task's CR4 in the VMCS. */ cr4 = cr4_read_shadow(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ vmx->host_state.vmcs_host_cr4 = cr4; vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 /* * Load null selectors, so we can avoid reloading them in * __vmx_load_host_state(), in case userspace uses the null selectors * too (the expected case). */ vmcs_write16(HOST_DS_SELECTOR, 0); vmcs_write16(HOST_ES_SELECTOR, 0); #else vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #endif vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ vmx->host_idt_base = dt.address; vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl); vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } } static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) { vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; if (enable_ept) vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; if (is_guest_mode(&vmx->vcpu)) vmx->vcpu.arch.cr4_guest_owned_bits &= ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); } static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) { u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; if (!kvm_vcpu_apicv_active(&vmx->vcpu)) pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; /* Enable the preemption timer dynamically */ pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; return pin_based_exec_ctrl; } static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); if (cpu_has_secondary_exec_ctrls()) { if (kvm_vcpu_apicv_active(vcpu)) vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); else vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); } if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(vcpu); } static u32 vmx_exec_control(struct vcpu_vmx *vmx) { u32 exec_control = vmcs_config.cpu_based_exec_ctrl; if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) exec_control &= ~CPU_BASED_MOV_DR_EXITING; if (!cpu_need_tpr_shadow(&vmx->vcpu)) { exec_control &= ~CPU_BASED_TPR_SHADOW; #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING; #endif } if (!enable_ept) exec_control |= CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_INVLPG_EXITING; return exec_control; } static bool vmx_rdrand_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDRAND; } static bool vmx_rdseed_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_RDSEED; } static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) { struct kvm_vcpu *vcpu = &vmx->vcpu; u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; if (!cpu_need_virtualize_apic_accesses(vcpu)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; if (!enable_ept) { exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; enable_unrestricted_guest = 0; /* Enable INVPCID for non-ept guests may cause performance regression. */ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; } if (!enable_unrestricted_guest) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; if (!ple_gap) exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; if (!kvm_vcpu_apicv_active(vcpu)) exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD (handle_vmptrld). We can NOT enable shadow_vmcs here because we don't have yet a current VMCS12 */ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; if (!enable_pml) exec_control &= ~SECONDARY_EXEC_ENABLE_PML; if (vmx_xsaves_supported()) { /* Exposing XSAVES only when XSAVE is exposed */ bool xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && guest_cpuid_has(vcpu, X86_FEATURE_XSAVES); if (!xsaves_enabled) exec_control &= ~SECONDARY_EXEC_XSAVES; if (nested) { if (xsaves_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_XSAVES; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_XSAVES; } } if (vmx_rdtscp_supported()) { bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP); if (!rdtscp_enabled) exec_control &= ~SECONDARY_EXEC_RDTSCP; if (nested) { if (rdtscp_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_RDTSCP; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_RDTSCP; } } if (vmx_invpcid_supported()) { /* Exposing INVPCID only when PCID is exposed */ bool invpcid_enabled = guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) && guest_cpuid_has(vcpu, X86_FEATURE_PCID); if (!invpcid_enabled) { exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID; guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID); } if (nested) { if (invpcid_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_INVPCID; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_ENABLE_INVPCID; } } if (vmx_rdrand_supported()) { bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); if (rdrand_enabled) exec_control &= ~SECONDARY_EXEC_RDRAND; if (nested) { if (rdrand_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_RDRAND; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_RDRAND; } } if (vmx_rdseed_supported()) { bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); if (rdseed_enabled) exec_control &= ~SECONDARY_EXEC_RDSEED; if (nested) { if (rdseed_enabled) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_RDSEED; else vmx->nested.nested_vmx_secondary_ctls_high &= ~SECONDARY_EXEC_RDSEED; } } vmx->secondary_exec_control = exec_control; } static void ept_set_mmio_spte_mask(void) { /* * EPT Misconfigurations can be generated if the value of bits 2:0 * of an EPT paging-structure entry is 110b (write/execute). */ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, VMX_EPT_MISCONFIG_WX_VALUE); } #define VMX_XSS_EXIT_BITMAP 0 /* * Sets up the vmcs for emulated real mode. */ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) { #ifdef CONFIG_X86_64 unsigned long a; #endif int i; /* I/O */ vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); if (enable_shadow_vmcs) { vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); } if (cpu_has_vmx_msr_bitmap()) vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ /* Control */ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); vmx->hv_deadline_tsc = -1; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); if (cpu_has_secondary_exec_ctrls()) { vmx_compute_secondary_exec_control(vmx); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, vmx->secondary_exec_control); } if (kvm_vcpu_apicv_active(&vmx->vcpu)) { vmcs_write64(EOI_EXIT_BITMAP0, 0); vmcs_write64(EOI_EXIT_BITMAP1, 0); vmcs_write64(EOI_EXIT_BITMAP2, 0); vmcs_write64(EOI_EXIT_BITMAP3, 0); vmcs_write16(GUEST_INTR_STATUS, 0); vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); } if (ple_gap) { vmcs_write32(PLE_GAP, ple_gap); vmx->ple_window = ple_window; vmx->ple_window_dirty = true; } vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmx_set_constant_host_state(vmx); #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ rdmsrl(MSR_GS_BASE, a); vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ #else vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif if (cpu_has_vmx_vmfunc()) vmcs_write64(VM_FUNCTION_CONTROL, 0); vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { u32 index = vmx_msr_index[i]; u32 data_low, data_high; int j = vmx->nmsrs; if (rdmsr_safe(index, &data_low, &data_high) < 0) continue; if (wrmsr_safe(index, data_low, data_high) < 0) continue; vmx->guest_msrs[j].index = i; vmx->guest_msrs[j].data = 0; vmx->guest_msrs[j].mask = -1ull; ++vmx->nmsrs; } vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); /* 22.2.1, 20.8.1 */ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); set_cr4_guest_host_mask(vmx); if (vmx_xsaves_supported()) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); if (enable_pml) { ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } return 0; } static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct msr_data apic_base_msr; u64 cr0; vmx->rmode.vm86_active = 0; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); if (!init_event) { apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_reset_bsp(vcpu)) apic_base_msr.data |= MSR_IA32_APICBASE_BSP; apic_base_msr.host_initiated = true; kvm_set_apic_base(vcpu, &apic_base_msr); } vmx_segment_cache_clear(vmx); seg_setup(VCPU_SREG_CS); vmcs_write16(GUEST_CS_SELECTOR, 0xf000); vmcs_writel(GUEST_CS_BASE, 0xffff0000ul); seg_setup(VCPU_SREG_DS); seg_setup(VCPU_SREG_ES); seg_setup(VCPU_SREG_FS); seg_setup(VCPU_SREG_GS); seg_setup(VCPU_SREG_SS); vmcs_write16(GUEST_TR_SELECTOR, 0); vmcs_writel(GUEST_TR_BASE, 0); vmcs_write32(GUEST_TR_LIMIT, 0xffff); vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); vmcs_write16(GUEST_LDTR_SELECTOR, 0); vmcs_writel(GUEST_LDTR_BASE, 0); vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); if (!init_event) { vmcs_write32(GUEST_SYSENTER_CS, 0); vmcs_writel(GUEST_SYSENTER_ESP, 0); vmcs_writel(GUEST_SYSENTER_EIP, 0); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); } vmcs_writel(GUEST_RFLAGS, 0x02); kvm_rip_write(vcpu, 0xfff0); vmcs_writel(GUEST_GDTR_BASE, 0); vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); vmcs_writel(GUEST_IDTR_BASE, 0); vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); setup_msrs(vmx); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ if (cpu_has_vmx_tpr_shadow() && !init_event) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); if (cpu_need_tpr_shadow(vcpu)) vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, __pa(vcpu->arch.apic->regs)); vmcs_write32(TPR_THRESHOLD, 0); } kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); if (kvm_vcpu_apicv_active(vcpu)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); if (vmx->vpid != 0) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx->vcpu.arch.cr0 = cr0; vmx_set_cr0(vcpu, cr0); /* enter rmode */ vmx_set_cr4(vcpu, 0); vmx_set_efer(vcpu, 0); update_exception_bitmap(vcpu); vpid_sync_context(vmx->vpid); } /* * In nested virtualization, check if L1 asked to exit on external interrupts. * For most existing hypervisors, this will always return true. */ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_EXT_INTR_MASK; } /* * In nested virtualization, check if L1 has set * VM_EXIT_ACK_INTR_ON_EXIT */ static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_ACK_INTR_ON_EXIT; } static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) { return get_vmcs12(vcpu)->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; } static void enable_irq_window(struct kvm_vcpu *vcpu) { vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_INTR_PENDING); } static void enable_nmi_window(struct kvm_vcpu *vcpu) { if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { enable_irq_window(vcpu); return; } vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_NMI_PENDING); } static void vmx_inject_irq(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); uint32_t intr; int irq = vcpu->arch.interrupt.nr; trace_kvm_inj_virq(irq); ++vcpu->stat.irq_injections; if (vmx->rmode.vm86_active) { int inc_eip = 0; if (vcpu->arch.interrupt.soft) inc_eip = vcpu->arch.event_exit_inst_len; if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } intr = irq | INTR_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { intr |= INTR_TYPE_SOFT_INTR; vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmx->vcpu.arch.event_exit_inst_len); } else intr |= INTR_TYPE_EXT_INTR; vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); } static void vmx_inject_nmi(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); ++vcpu->stat.nmi_injections; vmx->loaded_vmcs->nmi_known_unmasked = false; if (vmx->rmode.vm86_active) { if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); } static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); bool masked; if (vmx->loaded_vmcs->nmi_known_unmasked) return false; masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; vmx->loaded_vmcs->nmi_known_unmasked = !masked; return masked; } static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) { struct vcpu_vmx *vmx = to_vmx(vcpu); vmx->loaded_vmcs->nmi_known_unmasked = !masked; if (masked) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); } static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) { if (to_vmx(vcpu)->nested.nested_run_pending) return 0; return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); } static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { return (!to_vmx(vcpu)->nested.nested_run_pending && vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, PAGE_SIZE * 3); if (ret) return ret; kvm->arch.tss_addr = addr; return init_rmode_tss(kvm); } static bool rmode_exception(struct kvm_vcpu *vcpu, int vec) { switch (vec) { case BP_VECTOR: /* * Update instruction length as we may reinject the exception * from user space while in guest debugging mode. */ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; /* fall through */ case DB_VECTOR: if (vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; /* fall through */ case DE_VECTOR: case OF_VECTOR: case BR_VECTOR: case UD_VECTOR: case DF_VECTOR: case SS_VECTOR: case GP_VECTOR: case MF_VECTOR: return true; break; } return false; } static int handle_rmode_exception(struct kvm_vcpu *vcpu, int vec, u32 err_code) { /* * Instruction with address size override prefix opcode 0x67 * Cause the #SS fault with 0 error code in VM86 mode. */ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; return kvm_vcpu_halt(vcpu); } return 1; } return 0; } /* * Forward all other exceptions that are valid in real mode. * FIXME: Breaks guest debugging in real mode, needs to be fixed with * the required debugging infrastructure rework. */ kvm_queue_exception(vcpu, vec); return 1; } /* * Trigger machine check on the host. We assume all the MSRs are already set up * by the CPU and that we still run on the same CPU as the MCE occurred on. * We pass a fake environment to the machine check handler because we want * the guest to be always treated like user space, no matter what context * it used internally. */ static void kvm_machine_check(void) { #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) struct pt_regs regs = { .cs = 3, /* Fake ring 3 no matter what the guest ran on */ .flags = X86_EFLAGS_IF, }; do_machine_check(&regs, 0); #endif } static int handle_machine_check(struct kvm_vcpu *vcpu) { /* already handled by vcpu_run */ return 1; } static int handle_exception(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 intr_info, ex_no, error_code; unsigned long cr2, rip, dr6; u32 vect_info; enum emulation_result er; vect_info = vmx->idt_vectoring_info; intr_info = vmx->exit_intr_info; if (is_machine_check(intr_info)) return handle_machine_check(vcpu); if (is_nmi(intr_info)) return 1; /* already handled by vmx_vcpu_run() */ if (is_invalid_opcode(intr_info)) { if (is_guest_mode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); return 1; } error_code = 0; if (intr_info & INTR_INFO_DELIVER_CODE_MASK) error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); /* * The #PF with PFEC.RSVD = 1 indicates the guest is accessing * MMIO, it is better to report an internal error. * See the comments in vmx_handle_exit. */ if ((vect_info & VECTORING_INFO_VALID_MASK) && !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vect_info; vcpu->run->internal.data[1] = intr_info; vcpu->run->internal.data[2] = error_code; return 0; } if (is_page_fault(intr_info)) { cr2 = vmcs_readl(EXIT_QUALIFICATION); /* EPT won't cause page fault directly */ WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0, true); } ex_no = intr_info & INTR_INFO_VECTOR_MASK; if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) return handle_rmode_exception(vcpu, ex_no, error_code); switch (ex_no) { case AC_VECTOR: kvm_queue_exception_e(vcpu, AC_VECTOR, error_code); return 1; case DB_VECTOR: dr6 = vmcs_readl(EXIT_QUALIFICATION); if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; if (!(dr6 & ~DR6_RESERVED)) /* icebp */ skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); return 1; } kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); /* fall through */ case BP_VECTOR: /* * Update instruction length as we may reinject #BP from * user space while in guest debugging mode. Reading it for * #DB as well causes no harm, it is not used in that case. */ vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_run->exit_reason = KVM_EXIT_DEBUG; rip = kvm_rip_read(vcpu); kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; kvm_run->debug.arch.exception = ex_no; break; default: kvm_run->exit_reason = KVM_EXIT_EXCEPTION; kvm_run->ex.exception = ex_no; kvm_run->ex.error_code = error_code; break; } return 0; } static int handle_external_interrupt(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; return 1; } static int handle_triple_fault(struct kvm_vcpu *vcpu) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; vcpu->mmio_needed = 0; return 0; } static int handle_io(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int size, in, string, ret; unsigned port; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); string = (exit_qualification & 16) != 0; in = (exit_qualification & 8) != 0; ++vcpu->stat.io_exits; if (string || in) return emulate_instruction(vcpu, 0) == EMULATE_DONE; port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; ret = kvm_skip_emulated_instruction(vcpu); /* * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered * KVM_EXIT_DEBUG here. */ return kvm_fast_pio_out(vcpu, size, port) && ret; } static void vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) { /* * Patch in the VMCALL instruction: */ hypercall[0] = 0x0f; hypercall[1] = 0x01; hypercall[2] = 0xc1; } /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* * We get here when L2 changed cr0 in a way that did not change * any of L1's shadowed bits (see nested_vmx_exit_handled_cr), * but did change L0 shadowed bits. So we first calculate the * effective cr0 value that L1 would like to write into the * hardware. It consists of the L2-owned bits from the new * value combined with the L1-owned bits from L1's guest_cr0. */ val = (val & ~vmcs12->cr0_guest_host_mask) | (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); if (!nested_guest_cr0_valid(vcpu, val)) return 1; if (kvm_set_cr0(vcpu, val)) return 1; vmcs_writel(CR0_READ_SHADOW, orig_val); return 0; } else { if (to_vmx(vcpu)->nested.vmxon && !nested_host_cr0_valid(vcpu, val)) return 1; return kvm_set_cr0(vcpu, val); } } static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) { if (is_guest_mode(vcpu)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned long orig_val = val; /* analogously to handle_set_cr0 */ val = (val & ~vmcs12->cr4_guest_host_mask) | (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); if (kvm_set_cr4(vcpu, val)) return 1; vmcs_writel(CR4_READ_SHADOW, orig_val); return 0; } else return kvm_set_cr4(vcpu, val); } static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; int cr; int reg; int err; int ret; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ val = kvm_register_readl(vcpu, reg); trace_kvm_cr_write(cr, val); switch (cr) { case 0: err = handle_set_cr0(vcpu, val); return kvm_complete_insn_gp(vcpu, err); case 3: err = kvm_set_cr3(vcpu, val); return kvm_complete_insn_gp(vcpu, err); case 4: err = handle_set_cr4(vcpu, val); return kvm_complete_insn_gp(vcpu, err); case 8: { u8 cr8_prev = kvm_get_cr8(vcpu); u8 cr8 = (u8)val; err = kvm_set_cr8(vcpu, cr8); ret = kvm_complete_insn_gp(vcpu, err); if (lapic_in_kernel(vcpu)) return ret; if (cr8_prev <= cr8) return ret; /* * TODO: we might be squashing a * KVM_GUESTDBG_SINGLESTEP-triggered * KVM_EXIT_DEBUG here. */ vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } } break; case 2: /* clts */ WARN_ONCE(1, "Guest should always own CR0.TS"); vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); return kvm_skip_emulated_instruction(vcpu); case 1: /*mov from cr*/ switch (cr) { case 3: val = kvm_read_cr3(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); return kvm_skip_emulated_instruction(vcpu); case 8: val = kvm_get_cr8(vcpu); kvm_register_write(vcpu, reg, val); trace_kvm_cr_read(cr, val); return kvm_skip_emulated_instruction(vcpu); } break; case 3: /* lmsw */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); kvm_lmsw(vcpu, val); return kvm_skip_emulated_instruction(vcpu); default: break; } vcpu->run->exit_reason = 0; vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n", (int)(exit_qualification >> 4) & 3, cr); return 0; } static int handle_dr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; int dr, dr7, reg; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); dr = exit_qualification & DEBUG_REG_ACCESS_NUM; /* First, if DR does not exist, trigger UD */ if (!kvm_require_dr(vcpu, dr)) return 1; /* Do not handle if the CPL > 0, will trigger GP on re-entry */ if (!kvm_require_cpl(vcpu, 0)) return 1; dr7 = vmcs_readl(GUEST_DR7); if (dr7 & DR7_GD) { /* * As the vm-exit takes precedence over the debug trap, we * need to emulate the latter, either for the host or the * guest debugging itself. */ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; vcpu->run->debug.arch.dr7 = dr7; vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); vcpu->run->debug.arch.exception = DB_VECTOR; vcpu->run->exit_reason = KVM_EXIT_DEBUG; return 0; } else { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BD | DR6_RTM; kvm_queue_exception(vcpu, DB_VECTOR); return 1; } } if (vcpu->guest_debug == 0) { vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); /* * No more DR vmexits; force a reload of the debug registers * and reenter on this instruction. The next vmexit will * retrieve the full state of the debug registers. */ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; return 1; } reg = DEBUG_REG_ACCESS_REG(exit_qualification); if (exit_qualification & TYPE_MOV_FROM_DR) { unsigned long val; if (kvm_get_dr(vcpu, dr, &val)) return 1; kvm_register_write(vcpu, reg, val); } else if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) return 1; return kvm_skip_emulated_instruction(vcpu); } static u64 vmx_get_dr6(struct kvm_vcpu *vcpu) { return vcpu->arch.dr6; } static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) { } static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) { get_debugreg(vcpu->arch.db[0], 0); get_debugreg(vcpu->arch.db[1], 1); get_debugreg(vcpu->arch.db[2], 2); get_debugreg(vcpu->arch.db[3], 3); get_debugreg(vcpu->arch.dr6, 6); vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING); } static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) { vmcs_writel(GUEST_DR7, val); } static int handle_cpuid(struct kvm_vcpu *vcpu) { return kvm_emulate_cpuid(vcpu); } static int handle_rdmsr(struct kvm_vcpu *vcpu) { u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; struct msr_data msr_info; msr_info.index = ecx; msr_info.host_initiated = false; if (vmx_get_msr(vcpu, &msr_info)) { trace_kvm_msr_read_ex(ecx); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_read(ecx, msr_info.data); /* FIXME: handling of bits 32:63 of rax, rdx */ vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; return kvm_skip_emulated_instruction(vcpu); } static int handle_wrmsr(struct kvm_vcpu *vcpu) { struct msr_data msr; u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); msr.data = data; msr.index = ecx; msr.host_initiated = false; if (kvm_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; } trace_kvm_msr_write(ecx, data); return kvm_skip_emulated_instruction(vcpu); } static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) { kvm_apic_update_ppr(vcpu); return 1; } static int handle_interrupt_window(struct kvm_vcpu *vcpu) { vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_INTR_PENDING); kvm_make_request(KVM_REQ_EVENT, vcpu); ++vcpu->stat.irq_window_exits; return 1; } static int handle_halt(struct kvm_vcpu *vcpu) { return kvm_emulate_halt(vcpu); } static int handle_vmcall(struct kvm_vcpu *vcpu) { return kvm_emulate_hypercall(vcpu); } static int handle_invd(struct kvm_vcpu *vcpu) { return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_invlpg(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); kvm_mmu_invlpg(vcpu, exit_qualification); return kvm_skip_emulated_instruction(vcpu); } static int handle_rdpmc(struct kvm_vcpu *vcpu) { int err; err = kvm_rdpmc(vcpu); return kvm_complete_insn_gp(vcpu, err); } static int handle_wbinvd(struct kvm_vcpu *vcpu) { return kvm_emulate_wbinvd(vcpu); } static int handle_xsetbv(struct kvm_vcpu *vcpu) { u64 new_bv = kvm_read_edx_eax(vcpu); u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); if (kvm_set_xcr(vcpu, index, new_bv) == 0) return kvm_skip_emulated_instruction(vcpu); return 1; } static int handle_xsaves(struct kvm_vcpu *vcpu) { kvm_skip_emulated_instruction(vcpu); WARN(1, "this should never happen\n"); return 1; } static int handle_xrstors(struct kvm_vcpu *vcpu) { kvm_skip_emulated_instruction(vcpu); WARN(1, "this should never happen\n"); return 1; } static int handle_apic_access(struct kvm_vcpu *vcpu) { if (likely(fasteoi)) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int access_type, offset; access_type = exit_qualification & APIC_ACCESS_TYPE; offset = exit_qualification & APIC_ACCESS_OFFSET; /* * Sane guest uses MOV to write EOI, with written value * not cared. So make a short-circuit here by avoiding * heavy instruction emulation. */ if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) && (offset == APIC_EOI)) { kvm_lapic_set_eoi(vcpu); return kvm_skip_emulated_instruction(vcpu); } } return emulate_instruction(vcpu, 0) == EMULATE_DONE; } static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int vector = exit_qualification & 0xff; /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ kvm_apic_set_eoi_accelerated(vcpu, vector); return 1; } static int handle_apic_write(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 offset = exit_qualification & 0xfff; /* APIC-write VM exit is trap-like and thus no need to adjust IP */ kvm_apic_write_nodecode(vcpu, offset); return 1; } static int handle_task_switch(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification; bool has_error_code = false; u32 error_code = 0; u16 tss_selector; int reason, type, idt_v, idt_index; idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); reason = (u32)exit_qualification >> 30; if (reason == TASK_SWITCH_GATE && idt_v) { switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = false; vmx_set_nmi_mask(vcpu, true); break; case INTR_TYPE_EXT_INTR: case INTR_TYPE_SOFT_INTR: kvm_clear_interrupt_queue(vcpu); break; case INTR_TYPE_HARD_EXCEPTION: if (vmx->idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { has_error_code = true; error_code = vmcs_read32(IDT_VECTORING_ERROR_CODE); } /* fall through */ case INTR_TYPE_SOFT_EXCEPTION: kvm_clear_exception_queue(vcpu); break; default: break; } } tss_selector = exit_qualification; if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && type != INTR_TYPE_EXT_INTR && type != INTR_TYPE_NMI_INTR)) skip_emulated_instruction(vcpu); if (kvm_task_switch(vcpu, tss_selector, type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, has_error_code, error_code) == EMULATE_FAIL) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } /* * TODO: What about debug traps on tss switch? * Are we supposed to inject them and update dr6? */ return 1; } static int handle_ept_violation(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; gpa_t gpa; u64 error_code; exit_qualification = vmcs_readl(EXIT_QUALIFICATION); /* * EPT violation happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. * There are errata that may cause this bit to not be set: * AAK134, BY25. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); trace_kvm_page_fault(gpa, exit_qualification); /* Is it a read fault? */ error_code = (exit_qualification & EPT_VIOLATION_ACC_READ) ? PFERR_USER_MASK : 0; /* Is it a write fault? */ error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE) ? PFERR_WRITE_MASK : 0; /* Is it a fetch fault? */ error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR) ? PFERR_FETCH_MASK : 0; /* ept page table entry is present? */ error_code |= (exit_qualification & (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE | EPT_VIOLATION_EXECUTABLE)) ? PFERR_PRESENT_MASK : 0; error_code |= (exit_qualification & 0x100) != 0 ? PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; vcpu->arch.exit_qualification = exit_qualification; return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); } static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { int ret; gpa_t gpa; /* * A nested guest cannot optimize MMIO vmexits, because we have an * nGPA here instead of the required GPA. */ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); if (!is_guest_mode(vcpu) && !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { trace_kvm_fast_mmio(gpa); return kvm_skip_emulated_instruction(vcpu); } ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); if (ret >= 0) return ret; /* It is the real ept misconfig */ WARN_ON(1); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; return 0; } static int handle_nmi_window(struct kvm_vcpu *vcpu) { vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_VIRTUAL_NMI_PENDING); ++vcpu->stat.nmi_window_exits; kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); enum emulation_result err = EMULATE_DONE; int ret = 1; u32 cpu_exec_ctrl; bool intr_window_requested; unsigned count = 130; cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; while (vmx->emulation_required && count-- != 0) { if (intr_window_requested && vmx_interrupt_allowed(vcpu)) return handle_interrupt_window(&vmx->vcpu); if (kvm_test_request(KVM_REQ_EVENT, vcpu)) return 1; err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); if (err == EMULATE_USER_EXIT) { ++vcpu->stat.mmio_exits; ret = 0; goto out; } if (err != EMULATE_DONE) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; return 0; } if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; ret = kvm_vcpu_halt(vcpu); goto out; } if (signal_pending(current)) goto out; if (need_resched()) schedule(); } out: return ret; } static int __grow_ple_window(int val) { if (ple_window_grow < 1) return ple_window; val = min(val, ple_window_actual_max); if (ple_window_grow < ple_window) val *= ple_window_grow; else val += ple_window_grow; return val; } static int __shrink_ple_window(int val, int modifier, int minimum) { if (modifier < 1) return ple_window; if (modifier < ple_window) val /= modifier; else val -= modifier; return max(val, minimum); } static void grow_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __grow_ple_window(old); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); } static void shrink_ple_window(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int old = vmx->ple_window; vmx->ple_window = __shrink_ple_window(old, ple_window_shrink, ple_window); if (vmx->ple_window != old) vmx->ple_window_dirty = true; trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); } /* * ple_window_actual_max is computed to be one grow_ple_window() below * ple_window_max. (See __grow_ple_window for the reason.) * This prevents overflows, because ple_window_max is int. * ple_window_max effectively rounded down to a multiple of ple_window_grow in * this process. * ple_window_max is also prevented from setting vmx->ple_window < ple_window. */ static void update_ple_window_actual_max(void) { ple_window_actual_max = __shrink_ple_window(max(ple_window_max, ple_window), ple_window_grow, INT_MIN); } /* * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR. */ static void wakeup_handler(void) { struct kvm_vcpu *vcpu; int cpu = smp_processor_id(); spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), blocked_vcpu_list) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (pi_test_on(pi_desc) == 1) kvm_vcpu_kick(vcpu); } spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); } void vmx_enable_tdp(void) { kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK, enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull, enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK, cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, VMX_EPT_RWX_MASK, 0ull); ept_set_mmio_spte_mask(); kvm_enable_tdp(); } static __init int hardware_setup(void) { int r = -ENOMEM, i, msr; rdmsrl_safe(MSR_EFER, &host_efer); for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) kvm_define_shared_msr(i, vmx_msr_index[i]); for (i = 0; i < VMX_BITMAP_NR; i++) { vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx_bitmap[i]) goto out; } vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). */ memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); clear_bit(0x80, vmx_io_bitmap_a); memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out; } if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) enable_vpid = 0; if (!cpu_has_vmx_shadow_vmcs()) enable_shadow_vmcs = 0; if (enable_shadow_vmcs) init_vmcs_shadow_fields(); if (!cpu_has_vmx_ept() || !cpu_has_vmx_ept_4levels() || !cpu_has_vmx_ept_mt_wb()) { enable_ept = 0; enable_unrestricted_guest = 0; enable_ept_ad_bits = 0; } if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) enable_ept_ad_bits = 0; if (!cpu_has_vmx_unrestricted_guest()) enable_unrestricted_guest = 0; if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; /* * set_apic_access_page_addr() is used to reload apic access * page upon invalidation. No need to do anything if not * using the APIC_ACCESS_ADDR VMCS field. */ if (!flexpriority_enabled) kvm_x86_ops->set_apic_access_page_addr = NULL; if (!cpu_has_vmx_tpr_shadow()) kvm_x86_ops->update_cr8_intercept = NULL; if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); if (!cpu_has_vmx_ple()) ple_gap = 0; if (!cpu_has_vmx_apicv()) { enable_apicv = 0; kvm_x86_ops->sync_pir_to_irr = NULL; } if (cpu_has_vmx_tsc_scaling()) { kvm_has_tsc_control = true; kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX; kvm_tsc_scaling_ratio_frac_bits = 48; } vmx_disable_intercept_for_msr(MSR_FS_BASE, false); vmx_disable_intercept_for_msr(MSR_GS_BASE, false); vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); memcpy(vmx_msr_bitmap_legacy_x2apic_apicv, vmx_msr_bitmap_legacy, PAGE_SIZE); memcpy(vmx_msr_bitmap_longmode_x2apic_apicv, vmx_msr_bitmap_longmode, PAGE_SIZE); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); memcpy(vmx_msr_bitmap_longmode_x2apic, vmx_msr_bitmap_longmode, PAGE_SIZE); set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ for (msr = 0x800; msr <= 0x8ff; msr++) { if (msr == 0x839 /* TMCCT */) continue; vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true); } /* * TPR reads and writes can be virtualized even if virtual interrupt * delivery is not in use. */ vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true); vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false); /* EOI */ vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true); /* SELF-IPI */ vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true); if (enable_ept) vmx_enable_tdp(); else kvm_disable_tdp(); update_ple_window_actual_max(); /* * Only enable PML when hardware supports PML feature, and both EPT * and EPT A/D bit features are enabled -- PML depends on them to work. */ if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml()) enable_pml = 0; if (!enable_pml) { kvm_x86_ops->slot_enable_log_dirty = NULL; kvm_x86_ops->slot_disable_log_dirty = NULL; kvm_x86_ops->flush_log_dirty = NULL; kvm_x86_ops->enable_log_dirty_pt_masked = NULL; } if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { u64 vmx_msr; rdmsrl(MSR_IA32_VMX_MISC, vmx_msr); cpu_preemption_timer_multi = vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK; } else { kvm_x86_ops->set_hv_timer = NULL; kvm_x86_ops->cancel_hv_timer = NULL; } kvm_set_posted_intr_wakeup_handler(wakeup_handler); kvm_mce_cap_supported |= MCG_LMCE_P; return alloc_kvm_area(); out: for (i = 0; i < VMX_BITMAP_NR; i++) free_page((unsigned long)vmx_bitmap[i]); return r; } static __exit void hardware_unsetup(void) { int i; for (i = 0; i < VMX_BITMAP_NR; i++) free_page((unsigned long)vmx_bitmap[i]); free_kvm_area(); } /* * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE * exiting, so only get here on cpu with PAUSE-Loop-Exiting. */ static int handle_pause(struct kvm_vcpu *vcpu) { if (ple_gap) grow_ple_window(vcpu); /* * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" * VM-execution control is ignored if CPL > 0. OTOH, KVM * never set PAUSE_EXITING and just set PLE if supported, * so the vcpu must be CPL=0 if it gets a PAUSE exit. */ kvm_vcpu_on_spin(vcpu, true); return kvm_skip_emulated_instruction(vcpu); } static int handle_nop(struct kvm_vcpu *vcpu) { return kvm_skip_emulated_instruction(vcpu); } static int handle_mwait(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); return handle_nop(vcpu); } static int handle_invalid_op(struct kvm_vcpu *vcpu) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } static int handle_monitor_trap(struct kvm_vcpu *vcpu) { return 1; } static int handle_monitor(struct kvm_vcpu *vcpu) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return handle_nop(vcpu); } /* * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12. * We could reuse a single VMCS for all the L2 guests, but we also want the * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this * allows keeping them loaded on the processor, and in the future will allow * optimizations where prepare_vmcs02 doesn't need to set all the fields on * every entry if they never change. * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first. * * The following functions allocate and free a vmcs02 in this pool. */ /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmx->nested.current_vmptr) { list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { /* Recycle the least recently used VMCS. */ item = list_last_entry(&vmx->nested.vmcs02_pool, struct vmcs02_list, list); item->vmptr = vmx->nested.current_vmptr; list_move(&item->list, &vmx->nested.vmcs02_pool); return &item->vmcs02; } /* Create a new VMCS */ item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); if (!item) return NULL; item->vmcs02.vmcs = alloc_vmcs(); item->vmcs02.shadow_vmcs = NULL; if (!item->vmcs02.vmcs) { kfree(item); return NULL; } loaded_vmcs_init(&item->vmcs02); item->vmptr = vmx->nested.current_vmptr; list_add(&(item->list), &(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num++; return &item->vmcs02; } /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) { struct vmcs02_list *item; list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) if (item->vmptr == vmptr) { free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; return; } } /* * Free all VMCSs saved for this vcpu, except the one pointed by * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs * must be &vmx->vmcs01. */ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) { struct vmcs02_list *item, *n; WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { /* * Something will leak if the above WARN triggers. Better than * a use-after-free. */ if (vmx->loaded_vmcs == &item->vmcs02) continue; free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); vmx->nested.vmcs02_num--; } } /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), * set the success or error code of an emulated VMX instruction, as specified * by Vol 2B, VMX Instruction Reference, "Conventions". */ static void nested_vmx_succeed(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); } static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_CF); } static void nested_vmx_failValid(struct kvm_vcpu *vcpu, u32 vm_instruction_error) { if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { /* * failValid writes the error number to the current VMCS, which * can't be done there isn't a current VMCS. */ nested_vmx_failInvalid(vcpu); return; } vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_ZF); get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; /* * We don't need to force a shadow sync because * VM_INSTRUCTION_ERROR is not shadowed */ } static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) { /* TODO: not to reset guest simply here. */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); } static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) { struct vcpu_vmx *vmx = container_of(timer, struct vcpu_vmx, nested.preemption_timer); vmx->nested.preemption_timer_expired = true; kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); kvm_vcpu_kick(&vmx->vcpu); return HRTIMER_NORESTART; } /* * Decode the memory-address operand of a vmx instruction, as recorded on an * exit caused by such an instruction (run by a guest hypervisor). * On success, returns 0. When the operand is invalid, returns 1 and throws * #UD or #GP. */ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, u32 vmx_instruction_info, bool wr, gva_t *ret) { gva_t off; bool exn; struct kvm_segment s; /* * According to Vol. 3B, "Information for VM Exits Due to Instruction * Execution", on an exit, vmx_instruction_info holds most of the * addressing components of the operand. Only the displacement part * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). * For how an actual address is calculated from all these components, * refer to Vol. 1, "Operand Addressing". */ int scaling = vmx_instruction_info & 3; int addr_size = (vmx_instruction_info >> 7) & 7; bool is_reg = vmx_instruction_info & (1u << 10); int seg_reg = (vmx_instruction_info >> 15) & 7; int index_reg = (vmx_instruction_info >> 18) & 0xf; bool index_is_valid = !(vmx_instruction_info & (1u << 22)); int base_reg = (vmx_instruction_info >> 23) & 0xf; bool base_is_valid = !(vmx_instruction_info & (1u << 27)); if (is_reg) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } /* Addr = segment_base + offset */ /* offset = base + [index * scale] + displacement */ off = exit_qualification; /* holds the displacement */ if (base_is_valid) off += kvm_register_read(vcpu, base_reg); if (index_is_valid) off += kvm_register_read(vcpu, index_reg)<<scaling; vmx_get_segment(vcpu, &s, seg_reg); *ret = s.base + off; if (addr_size == 1) /* 32 bit */ *ret &= 0xffffffff; /* Checks for #GP/#SS exceptions. */ exn = false; if (is_long_mode(vcpu)) { /* Long mode: #GP(0)/#SS(0) if the memory address is in a * non-canonical form. This is the only check on the memory * destination for long mode! */ exn = is_noncanonical_address(*ret, vcpu); } else if (is_protmode(vcpu)) { /* Protected mode: apply checks for segment validity in the * following order: * - segment type check (#GP(0) may be thrown) * - usability check (#GP(0)/#SS(0)) * - limit check (#GP(0)/#SS(0)) */ if (wr) /* #GP(0) if the destination operand is located in a * read-only data segment or any code segment. */ exn = ((s.type & 0xa) == 0 || (s.type & 8)); else /* #GP(0) if the source operand is located in an * execute-only code segment */ exn = ((s.type & 0xa) == 8); if (exn) { kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return 1; } /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. */ exn = (s.unusable != 0); /* Protected mode: #GP(0)/#SS(0) if the memory * operand is outside the segment limit. */ exn = exn || (off + sizeof(u64) > s.limit); } if (exn) { kvm_queue_exception_e(vcpu, seg_reg == VCPU_SREG_SS ? SS_VECTOR : GP_VECTOR, 0); return 1; } return 0; } static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) { gva_t gva; struct x86_exception e; if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, sizeof(*vmpointer), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } return 0; } static int enter_vmx_operation(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs *shadow_vmcs; if (cpu_has_vmx_msr_bitmap()) { vmx->nested.msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); if (!vmx->nested.msr_bitmap) goto out_msr_bitmap; } vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); if (!vmx->nested.cached_vmcs12) goto out_cached_vmcs12; if (enable_shadow_vmcs) { shadow_vmcs = alloc_vmcs(); if (!shadow_vmcs) goto out_shadow_vmcs; /* mark vmcs as shadow */ shadow_vmcs->revision_id |= (1u << 31); /* init shadow vmcs */ vmcs_clear(shadow_vmcs); vmx->vmcs01.shadow_vmcs = shadow_vmcs; } INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); vmx->nested.vmcs02_num = 0; hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; vmx->nested.vmxon = true; return 0; out_shadow_vmcs: kfree(vmx->nested.cached_vmcs12); out_cached_vmcs12: free_page((unsigned long)vmx->nested.msr_bitmap); out_msr_bitmap: return -ENOMEM; } /* * Emulate the VMXON instruction. * Currently, we just remember that VMX is active, and do not save or even * inspect the argument to VMXON (the so-called "VMXON pointer") because we * do not currently need to store anything in that guest-allocated memory * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their * argument is different from the VMXON pointer (which the spec says they do). */ static int handle_vmon(struct kvm_vcpu *vcpu) { int ret; gpa_t vmptr; struct page *page; struct vcpu_vmx *vmx = to_vmx(vcpu); const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; /* * The Intel VMX Instruction Reference lists a bunch of bits that are * prerequisite to running VMXON, most notably cr4.VMXE must be set to * 1 (see vmx_set_cr4() for when we allow the guest to set this). * Otherwise, we should fail with #UD. But most faulting conditions * have already been checked by hardware, prior to the VM-exit for * VMXON. We do test guest cr4.VMXE because processor CR4 always has * that bit set to 1 in non-root mode. */ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (vmx->nested.vmxon) { nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); return kvm_skip_emulated_instruction(vcpu); } if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { kvm_inject_gp(vcpu, 0); return 1; } if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; /* * SDM 3: 24.11.5 * The first 4 bytes of VMXON region contain the supported * VMCS revision identifier * * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; * which replaces physical address width with 32 */ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } page = kvm_vcpu_gpa_to_page(vcpu, vmptr); if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } if (*(u32 *)kmap(page) != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } kunmap(page); kvm_release_page_clean(page); vmx->nested.vmxon_ptr = vmptr; ret = enter_vmx_operation(vcpu); if (ret) return ret; nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* * Intel's VMX Instruction Reference specifies a common set of prerequisites * for running VMX instructions (except VMXON, whose prerequisites are * slightly different). It also specifies what exception to inject otherwise. * Note that many of these exceptions have priority over VM exits, so they * don't have to be checked again here. */ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) { if (!to_vmx(vcpu)->nested.vmxon) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } return 1; } static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) { vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, -1ull); } static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) { if (vmx->nested.current_vmptr == -1ull) return; if (enable_shadow_vmcs) { /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); vmx->nested.sync_shadow_vmcs = false; vmx_disable_shadow_vmcs(vmx); } vmx->nested.posted_intr_nv = -1; /* Flush VMCS12 to guest memory */ kvm_vcpu_write_guest_page(&vmx->vcpu, vmx->nested.current_vmptr >> PAGE_SHIFT, vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); vmx->nested.current_vmptr = -1ull; } /* * Free whatever needs to be freed from vmx->nested when L1 goes down, or * just stops using VMX. */ static void free_nested(struct vcpu_vmx *vmx) { if (!vmx->nested.vmxon) return; vmx->nested.vmxon = false; free_vpid(vmx->nested.vpid02); vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; if (vmx->nested.msr_bitmap) { free_page((unsigned long)vmx->nested.msr_bitmap); vmx->nested.msr_bitmap = NULL; } if (enable_shadow_vmcs) { vmx_disable_shadow_vmcs(vmx); vmcs_clear(vmx->vmcs01.shadow_vmcs); free_vmcs(vmx->vmcs01.shadow_vmcs); vmx->vmcs01.shadow_vmcs = NULL; } kfree(vmx->nested.cached_vmcs12); /* Unpin physical memory we referred to in current vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } nested_free_all_saved_vmcss(vmx); } /* Emulate the VMXOFF instruction */ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; free_nested(to_vmx(vcpu)); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* Emulate the VMCLEAR instruction */ static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 zero = 0; gpa_t vmptr; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); return kvm_skip_emulated_instruction(vcpu); } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); return kvm_skip_emulated_instruction(vcpu); } if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); kvm_vcpu_write_guest(vcpu, vmptr + offsetof(struct vmcs12, launch_state), &zero, sizeof(zero)); nested_free_vmcs02(vmx, vmptr); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); /* Emulate the VMLAUNCH instruction */ static int handle_vmlaunch(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, true); } /* Emulate the VMRESUME instruction */ static int handle_vmresume(struct kvm_vcpu *vcpu) { return nested_vmx_run(vcpu, false); } /* * Read a vmcs12 field. Since these can have varying lengths and we return * one type, we chose the biggest type (u64) and zero-extend the return value * to that size. Note that the caller, handle_vmread, might need to use only * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of * 64-bit fields are to be returned). */ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, unsigned long field, u64 *ret) { short offset = vmcs_field_to_offset(field); char *p; if (offset < 0) return offset; p = ((char *)(get_vmcs12(vcpu))) + offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_NATURAL_WIDTH: *ret = *((natural_width *)p); return 0; case VMCS_FIELD_TYPE_U16: *ret = *((u16 *)p); return 0; case VMCS_FIELD_TYPE_U32: *ret = *((u32 *)p); return 0; case VMCS_FIELD_TYPE_U64: *ret = *((u64 *)p); return 0; default: WARN_ON(1); return -ENOENT; } } static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, unsigned long field, u64 field_value){ short offset = vmcs_field_to_offset(field); char *p = ((char *) get_vmcs12(vcpu)) + offset; if (offset < 0) return offset; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: *(u16 *)p = field_value; return 0; case VMCS_FIELD_TYPE_U32: *(u32 *)p = field_value; return 0; case VMCS_FIELD_TYPE_U64: *(u64 *)p = field_value; return 0; case VMCS_FIELD_TYPE_NATURAL_WIDTH: *(natural_width *)p = field_value; return 0; default: WARN_ON(1); return -ENOENT; } } static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) { int i; unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; preempt_disable(); vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { field = fields[i]; switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: field_value = vmcs_read16(field); break; case VMCS_FIELD_TYPE_U32: field_value = vmcs_read32(field); break; case VMCS_FIELD_TYPE_U64: field_value = vmcs_read64(field); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: field_value = vmcs_readl(field); break; default: WARN_ON(1); continue; } vmcs12_write_any(&vmx->vcpu, field, field_value); } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); preempt_enable(); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) { const unsigned long *fields[] = { shadow_read_write_fields, shadow_read_only_fields }; const int max_fields[] = { max_shadow_read_write_fields, max_shadow_read_only_fields }; int i, q; unsigned long field; u64 field_value = 0; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; vmcs_load(shadow_vmcs); for (q = 0; q < ARRAY_SIZE(fields); q++) { for (i = 0; i < max_fields[q]; i++) { field = fields[q][i]; vmcs12_read_any(&vmx->vcpu, field, &field_value); switch (vmcs_field_type(field)) { case VMCS_FIELD_TYPE_U16: vmcs_write16(field, (u16)field_value); break; case VMCS_FIELD_TYPE_U32: vmcs_write32(field, (u32)field_value); break; case VMCS_FIELD_TYPE_U64: vmcs_write64(field, (u64)field_value); break; case VMCS_FIELD_TYPE_NATURAL_WIDTH: vmcs_writel(field, (long)field_value); break; default: WARN_ON(1); break; } } } vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); } /* * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was * used before) all generate the same failure when it is missing. */ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmx->nested.current_vmptr == -1ull) { nested_vmx_failInvalid(vcpu); return 0; } return 1; } static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; u64 field_value; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t gva = 0; if (!nested_vmx_check_permission(vcpu)) return 1; if (!nested_vmx_check_vmcs12(vcpu)) return kvm_skip_emulated_instruction(vcpu); /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ if (vmcs12_read_any(vcpu, field, &field_value) < 0) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); return kvm_skip_emulated_instruction(vcpu); } /* * Now copy part of this value to register or memory, as requested. * Note that the number of bits actually copied is 32 or 64 depending * on the guest's mode (32 or 64 bit), not on the given field's length. */ if (vmx_instruction_info & (1u << 10)) { kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), field_value); } else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &gva)) return 1; /* _system ok, as hardware has verified cpl=0 */ kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static int handle_vmwrite(struct kvm_vcpu *vcpu) { unsigned long field; gva_t gva; unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 * bit (field_value), and then copies only the appropriate number of * bits into the vmcs12 field. */ u64 field_value = 0; struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; if (!nested_vmx_check_vmcs12(vcpu)) return kvm_skip_emulated_instruction(vcpu); if (vmx_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 3) & 0xf)); else { if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } } field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); if (vmcs_field_readonly(field)) { nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); return kvm_skip_emulated_instruction(vcpu); } if (vmcs12_write_any(vcpu, field, field_value) < 0) { nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); return kvm_skip_emulated_instruction(vcpu); } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) { vmx->nested.current_vmptr = vmptr; if (enable_shadow_vmcs) { vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->vmcs01.shadow_vmcs)); vmx->nested.sync_shadow_vmcs = true; } } /* Emulate the VMPTRLD instruction */ static int handle_vmptrld(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t vmptr; if (!nested_vmx_check_permission(vcpu)) return 1; if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); return kvm_skip_emulated_instruction(vcpu); } if (vmptr == vmx->nested.vmxon_ptr) { nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); return kvm_skip_emulated_instruction(vcpu); } if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; page = kvm_vcpu_gpa_to_page(vcpu, vmptr); if (is_error_page(page)) { nested_vmx_failInvalid(vcpu); return kvm_skip_emulated_instruction(vcpu); } new_vmcs12 = kmap(page); if (new_vmcs12->revision_id != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); return kvm_skip_emulated_instruction(vcpu); } nested_release_vmcs12(vmx); /* * Load VMCS12 from guest memory since it is not already * cached. */ memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); kunmap(page); kvm_release_page_clean(page); set_current_vmptr(vmx, vmptr); } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* Emulate the VMPTRST instruction */ static int handle_vmptrst(struct kvm_vcpu *vcpu) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); gva_t vmcs_gva; struct x86_exception e; if (!nested_vmx_check_permission(vcpu)) return 1; if (get_vmx_mem_address(vcpu, exit_qualification, vmx_instruction_info, true, &vmcs_gva)) return 1; /* ok to use *_system, as hardware has verified cpl=0 */ if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, (void *)&to_vmx(vcpu)->nested.current_vmptr, sizeof(u64), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } /* Emulate the INVEPT instruction */ static int handle_invept(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmx_instruction_info, types; unsigned long type; gva_t gva; struct x86_exception e; struct { u64 eptp, gpa; } operand; if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) || !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; if (type >= 32 || !(types & (1 << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } switch (type) { case VMX_EPT_EXTENT_GLOBAL: /* * TODO: track mappings and invalidate * single context requests appropriately */ case VMX_EPT_EXTENT_CONTEXT: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); nested_vmx_succeed(vcpu); break; default: BUG_ON(1); break; } return kvm_skip_emulated_instruction(vcpu); } static int handle_invvpid(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 vmx_instruction_info; unsigned long type, types; gva_t gva; struct x86_exception e; struct { u64 vpid; u64 gla; } operand; if (!(vmx->nested.nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_VPID) || !(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } if (!nested_vmx_check_permission(vcpu)) return 1; vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); types = (vmx->nested.nested_vmx_vpid_caps & VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; if (type >= 32 || !(types & (1 << type))) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } /* according to the intel vmx instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) */ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), vmx_instruction_info, false, &gva)) return 1; if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, sizeof(operand), &e)) { kvm_inject_page_fault(vcpu, &e); return 1; } if (operand.vpid >> 16) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: if (is_noncanonical_address(operand.gla, vcpu)) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } /* fall through */ case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: if (!operand.vpid) { nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); return kvm_skip_emulated_instruction(vcpu); } break; case VMX_VPID_EXTENT_ALL_CONTEXT: break; default: WARN_ON_ONCE(1); return kvm_skip_emulated_instruction(vcpu); } __vmx_flush_tlb(vcpu, vmx->nested.vpid02); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } static int handle_pml_full(struct kvm_vcpu *vcpu) { unsigned long exit_qualification; trace_kvm_pml_full(vcpu->vcpu_id); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); /* * PML buffer FULL happened while executing iret from NMI, * "blocked by NMI" bit has to be set before next VM entry. */ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_qualification & INTR_INFO_UNBLOCK_NMI)) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); /* * PML buffer already flushed at beginning of VMEXIT. Nothing to do * here.., and there's no userspace involvement needed for PML. */ return 1; } static int handle_preemption_timer(struct kvm_vcpu *vcpu) { kvm_lapic_expired_hv_timer(vcpu); return 1; } static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) { struct vcpu_vmx *vmx = to_vmx(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu); /* Check for memory type validity */ switch (address & VMX_EPTP_MT_MASK) { case VMX_EPTP_MT_UC: if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_UC_BIT)) return false; break; case VMX_EPTP_MT_WB: if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPTP_WB_BIT)) return false; break; default: return false; } /* only 4 levels page-walk length are valid */ if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4) return false; /* Reserved bits should not be set */ if (address >> maxphyaddr || ((address >> 7) & 0x1f)) return false; /* AD, if set, should be supported */ if (address & VMX_EPTP_AD_ENABLE_BIT) { if (!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_AD_BIT)) return false; } return true; } static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u32 index = vcpu->arch.regs[VCPU_REGS_RCX]; u64 address; bool accessed_dirty; struct kvm_mmu *mmu = vcpu->arch.walk_mmu; if (!nested_cpu_has_eptp_switching(vmcs12) || !nested_cpu_has_ept(vmcs12)) return 1; if (index >= VMFUNC_EPTP_ENTRIES) return 1; if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, &address, index * 8, 8)) return 1; accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); /* * If the (L2) guest does a vmfunc to the currently * active ept pointer, we don't have to do anything else */ if (vmcs12->ept_pointer != address) { if (!valid_ept_address(vcpu, address)) return 1; kvm_mmu_unload(vcpu); mmu->ept_ad = accessed_dirty; mmu->base_role.ad_disabled = !accessed_dirty; vmcs12->ept_pointer = address; /* * TODO: Check what's the correct approach in case * mmu reload fails. Currently, we just let the next * reload potentially fail */ kvm_mmu_reload(vcpu); } return 0; } static int handle_vmfunc(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12; u32 function = vcpu->arch.regs[VCPU_REGS_RAX]; /* * VMFUNC is only supported for nested guests, but we always enable the * secondary control for simplicity; for non-nested mode, fake that we * didn't by injecting #UD. */ if (!is_guest_mode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } vmcs12 = get_vmcs12(vcpu); if ((vmcs12->vm_function_control & (1 << function)) == 0) goto fail; switch (function) { case 0: if (nested_vmx_eptp_switching(vcpu, vmcs12)) goto fail; break; default: goto fail; } return kvm_skip_emulated_instruction(vcpu); fail: nested_vmx_vmexit(vcpu, vmx->exit_reason, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; } /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs * to be done to userspace and return 0. */ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_EXCEPTION_NMI] = handle_exception, [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, [EXIT_REASON_IO_INSTRUCTION] = handle_io, [EXIT_REASON_CR_ACCESS] = handle_cr, [EXIT_REASON_DR_ACCESS] = handle_dr, [EXIT_REASON_CPUID] = handle_cpuid, [EXIT_REASON_MSR_READ] = handle_rdmsr, [EXIT_REASON_MSR_WRITE] = handle_wrmsr, [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, [EXIT_REASON_HLT] = handle_halt, [EXIT_REASON_INVD] = handle_invd, [EXIT_REASON_INVLPG] = handle_invlpg, [EXIT_REASON_RDPMC] = handle_rdpmc, [EXIT_REASON_VMCALL] = handle_vmcall, [EXIT_REASON_VMCLEAR] = handle_vmclear, [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, [EXIT_REASON_VMPTRLD] = handle_vmptrld, [EXIT_REASON_VMPTRST] = handle_vmptrst, [EXIT_REASON_VMREAD] = handle_vmread, [EXIT_REASON_VMRESUME] = handle_vmresume, [EXIT_REASON_VMWRITE] = handle_vmwrite, [EXIT_REASON_VMOFF] = handle_vmoff, [EXIT_REASON_VMON] = handle_vmon, [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_APIC_WRITE] = handle_apic_write, [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, [EXIT_REASON_INVEPT] = handle_invept, [EXIT_REASON_INVVPID] = handle_invvpid, [EXIT_REASON_RDRAND] = handle_invalid_op, [EXIT_REASON_RDSEED] = handle_invalid_op, [EXIT_REASON_XSAVES] = handle_xsaves, [EXIT_REASON_XRSTORS] = handle_xrstors, [EXIT_REASON_PML_FULL] = handle_pml_full, [EXIT_REASON_VMFUNC] = handle_vmfunc, [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, }; static const int kvm_vmx_max_exit_handlers = ARRAY_SIZE(kvm_vmx_exit_handlers); static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification; gpa_t bitmap, last_bitmap; unsigned int port; int size; u8 b; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); exit_qualification = vmcs_readl(EXIT_QUALIFICATION); port = exit_qualification >> 16; size = (exit_qualification & 7) + 1; last_bitmap = (gpa_t)-1; b = -1; while (size > 0) { if (port < 0x8000) bitmap = vmcs12->io_bitmap_a; else if (port < 0x10000) bitmap = vmcs12->io_bitmap_b; else return true; bitmap += (port & 0x7fff) / 8; if (last_bitmap != bitmap) if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) return true; if (b & (1 << (port & 7))) return true; port++; size--; last_bitmap = bitmap; } return false; } /* * Return 1 if we should exit from L2 to L1 to handle an MSR access access, * rather than handle it ourselves in L0. I.e., check whether L1 expressed * disinterest in the current event (read or write a specific MSR) by using an * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. */ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason) { u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; gpa_t bitmap; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return true; /* * The MSR_BITMAP page is divided into four 1024-byte bitmaps, * for the four combinations of read/write and low/high MSR numbers. * First we need to figure out which of the four to use: */ bitmap = vmcs12->msr_bitmap; if (exit_reason == EXIT_REASON_MSR_WRITE) bitmap += 2048; if (msr_index >= 0xc0000000) { msr_index -= 0xc0000000; bitmap += 1024; } /* Then read the msr_index'th bit from this bitmap: */ if (msr_index < 1024*8) { unsigned char b; if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) return true; return 1 & (b >> (msr_index & 7)); } else return true; /* let L1 handle the wrong parameter */ } /* * Return 1 if we should exit from L2 to L1 to handle a CR access exit, * rather than handle it ourselves in L0. I.e., check if L1 wanted to * intercept (via guest_host_mask etc.) the current event. */ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int cr = exit_qualification & 15; int reg; unsigned long val; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ reg = (exit_qualification >> 8) & 15; val = kvm_register_readl(vcpu, reg); switch (cr) { case 0: if (vmcs12->cr0_guest_host_mask & (val ^ vmcs12->cr0_read_shadow)) return true; break; case 3: if ((vmcs12->cr3_target_count >= 1 && vmcs12->cr3_target_value0 == val) || (vmcs12->cr3_target_count >= 2 && vmcs12->cr3_target_value1 == val) || (vmcs12->cr3_target_count >= 3 && vmcs12->cr3_target_value2 == val) || (vmcs12->cr3_target_count >= 4 && vmcs12->cr3_target_value3 == val)) return false; if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) return true; break; case 4: if (vmcs12->cr4_guest_host_mask & (vmcs12->cr4_read_shadow ^ val)) return true; break; case 8: if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) return true; break; } break; case 2: /* clts */ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && (vmcs12->cr0_read_shadow & X86_CR0_TS)) return true; break; case 1: /* mov from cr */ switch (cr) { case 3: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR3_STORE_EXITING) return true; break; case 8: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR8_STORE_EXITING) return true; break; } break; case 3: /* lmsw */ /* * lmsw can change bits 1..3 of cr0, and only set bit 0 of * cr0. Other attempted changes are ignored, with no exit. */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; if (vmcs12->cr0_guest_host_mask & 0xe & (val ^ vmcs12->cr0_read_shadow)) return true; if ((vmcs12->cr0_guest_host_mask & 0x1) && !(vmcs12->cr0_read_shadow & 0x1) && (val & 0x1)) return true; break; } return false; } /* * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we * should handle it ourselves in L0 (and then continue L2). Only call this * when in is_guest_mode (L2). */ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) { u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, vmcs_readl(EXIT_QUALIFICATION), vmx->idt_vectoring_info, intr_info, vmcs_read32(VM_EXIT_INTR_ERROR_CODE), KVM_ISA_VMX); /* * The host physical addresses of some pages of guest memory * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU * may write to these pages via their host physical address while * L2 is running, bypassing any address-translation-based dirty * tracking (e.g. EPT write protection). * * Mark them dirty on every exit from L2 to prevent them from * getting out of sync with dirty tracking. */ nested_mark_vmcs12_pages_dirty(vcpu); if (vmx->nested.nested_run_pending) return false; if (unlikely(vmx->fail)) { pr_info_ratelimited("%s failed vm entry %x\n", __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); return true; } switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (is_nmi(intr_info)) return false; else if (is_page_fault(intr_info)) return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; else if (is_no_device(intr_info) && !(vmcs12->guest_cr0 & X86_CR0_TS)) return false; else if (is_debug(intr_info) && vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) return false; else if (is_breakpoint(intr_info) && vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) return false; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: return false; case EXIT_REASON_TRIPLE_FAULT: return true; case EXIT_REASON_PENDING_INTERRUPT: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); case EXIT_REASON_NMI_WINDOW: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); case EXIT_REASON_TASK_SWITCH: return true; case EXIT_REASON_CPUID: return true; case EXIT_REASON_HLT: return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); case EXIT_REASON_INVD: return true; case EXIT_REASON_INVLPG: return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_RDPMC: return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); case EXIT_REASON_RDRAND: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND); case EXIT_REASON_RDSEED: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED); case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! */ return true; case EXIT_REASON_CR_ACCESS: return nested_vmx_exit_handled_cr(vcpu, vmcs12); case EXIT_REASON_DR_ACCESS: return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); case EXIT_REASON_IO_INSTRUCTION: return nested_vmx_exit_handled_io(vcpu, vmcs12); case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); case EXIT_REASON_MSR_READ: case EXIT_REASON_MSR_WRITE: return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); case EXIT_REASON_INVALID_STATE: return true; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); case EXIT_REASON_MONITOR_TRAP_FLAG: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); case EXIT_REASON_MONITOR_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); case EXIT_REASON_PAUSE_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || nested_cpu_has2(vmcs12, SECONDARY_EXEC_PAUSE_LOOP_EXITING); case EXIT_REASON_MCE_DURING_VMENTRY: return false; case EXIT_REASON_TPR_BELOW_THRESHOLD: return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); case EXIT_REASON_APIC_ACCESS: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); case EXIT_REASON_APIC_WRITE: case EXIT_REASON_EOI_INDUCED: /* apic_write and eoi_induced should exit unconditionally. */ return true; case EXIT_REASON_EPT_VIOLATION: /* * L0 always deals with the EPT violation. If nested EPT is * used, and the nested mmu code discovers that the address is * missing in the guest EPT table (EPT12), the EPT violation * will be injected with nested_ept_inject_page_fault() */ return false; case EXIT_REASON_EPT_MISCONFIG: /* * L2 never uses directly L1's EPT, but rather L0's own EPT * table (shadow on EPT) or a merged EPT table that L0 built * (EPT on EPT). So any problems with the structure of the * table is L0's fault. */ return false; case EXIT_REASON_INVPCID: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_WBINVD: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: return true; case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: /* * This should never happen, since it is not possible to * set XSS to a non-zero value---neither in L1 nor in L2. * If if it were, XSS would have to be checked against * the XSS exit bitmap in vmcs12. */ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); case EXIT_REASON_PREEMPTION_TIMER: return false; case EXIT_REASON_PML_FULL: /* We emulate PML support to L1. */ return false; case EXIT_REASON_VMFUNC: /* VM functions are emulated through L2->L0 vmexits. */ return false; default: return true; } } static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); /* * At this point, the exit interruption info in exit_intr_info * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT * we need to query the in-kernel LAPIC. */ WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT); if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); } nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, vmcs_readl(EXIT_QUALIFICATION)); return 1; } static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) { *info1 = vmcs_readl(EXIT_QUALIFICATION); *info2 = vmcs_read32(VM_EXIT_INTR_INFO); } static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) { if (vmx->pml_pg) { __free_page(vmx->pml_pg); vmx->pml_pg = NULL; } } static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 *pml_buf; u16 pml_idx; pml_idx = vmcs_read16(GUEST_PML_INDEX); /* Do nothing if PML buffer is empty */ if (pml_idx == (PML_ENTITY_NUM - 1)) return; /* PML index always points to next available PML buffer entity */ if (pml_idx >= PML_ENTITY_NUM) pml_idx = 0; else pml_idx++; pml_buf = page_address(vmx->pml_pg); for (; pml_idx < PML_ENTITY_NUM; pml_idx++) { u64 gpa; gpa = pml_buf[pml_idx]; WARN_ON(gpa & (PAGE_SIZE - 1)); kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); } /* reset PML index */ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } /* * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap. * Called before reporting dirty_bitmap to userspace. */ static void kvm_flush_pml_buffers(struct kvm *kvm) { int i; struct kvm_vcpu *vcpu; /* * We only need to kick vcpu out of guest mode here, as PML buffer * is flushed at beginning of all VMEXITs, and it's obvious that only * vcpus running in guest are possible to have unflushed GPAs in PML * buffer. */ kvm_for_each_vcpu(i, vcpu, kvm) kvm_vcpu_kick(vcpu); } static void vmx_dump_sel(char *name, uint32_t sel) { pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n", name, vmcs_read16(sel), vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); } static void vmx_dump_dtsel(char *name, uint32_t limit) { pr_err("%s limit=0x%08x, base=0x%016lx\n", name, vmcs_read32(limit), vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); } static void dump_vmcs(void) { u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS); u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS); u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL); u32 secondary_exec_control = 0; unsigned long cr4 = vmcs_readl(GUEST_CR4); u64 efer = vmcs_read64(GUEST_IA32_EFER); int i, n; if (cpu_has_secondary_exec_ctrls()) secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); pr_err("*** Guest State ***\n"); pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), vmcs_readl(CR0_GUEST_HOST_MASK)); pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK)); pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3)); if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) && (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA)) { pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n", vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1)); pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n", vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3)); } pr_err("RSP = 0x%016lx RIP = 0x%016lx\n", vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP)); pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n", vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7)); pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmcs_readl(GUEST_SYSENTER_ESP), vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP)); vmx_dump_sel("CS: ", GUEST_CS_SELECTOR); vmx_dump_sel("DS: ", GUEST_DS_SELECTOR); vmx_dump_sel("SS: ", GUEST_SS_SELECTOR); vmx_dump_sel("ES: ", GUEST_ES_SELECTOR); vmx_dump_sel("FS: ", GUEST_FS_SELECTOR); vmx_dump_sel("GS: ", GUEST_GS_SELECTOR); vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT); vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR); vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT); vmx_dump_sel("TR: ", GUEST_TR_SELECTOR); if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) || (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER))) pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", efer, vmcs_read64(GUEST_IA32_PAT)); pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n", vmcs_read64(GUEST_IA32_DEBUGCTL), vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS)); if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) pr_err("PerfGlobCtl = 0x%016llx\n", vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL)); if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS)); pr_err("Interruptibility = %08x ActivityState = %08x\n", vmcs_read32(GUEST_INTERRUPTIBILITY_INFO), vmcs_read32(GUEST_ACTIVITY_STATE)); if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) pr_err("InterruptStatus = %04x\n", vmcs_read16(GUEST_INTR_STATUS)); pr_err("*** Host State ***\n"); pr_err("RIP = 0x%016lx RSP = 0x%016lx\n", vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP)); pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n", vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR), vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR), vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR), vmcs_read16(HOST_TR_SELECTOR)); pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n", vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE), vmcs_readl(HOST_TR_BASE)); pr_err("GDTBase=%016lx IDTBase=%016lx\n", vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE)); pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n", vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3), vmcs_readl(HOST_CR4)); pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n", vmcs_readl(HOST_IA32_SYSENTER_ESP), vmcs_read32(HOST_IA32_SYSENTER_CS), vmcs_readl(HOST_IA32_SYSENTER_EIP)); if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER)) pr_err("EFER = 0x%016llx PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_EFER), vmcs_read64(HOST_IA32_PAT)); if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) pr_err("PerfGlobCtl = 0x%016llx\n", vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL)); pr_err("*** Control State ***\n"); pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n", pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control); pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl); pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n", vmcs_read32(EXCEPTION_BITMAP), vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK), vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH)); pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n", vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE), vmcs_read32(VM_ENTRY_INSTRUCTION_LEN)); pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n", vmcs_read32(VM_EXIT_INTR_INFO), vmcs_read32(VM_EXIT_INTR_ERROR_CODE), vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); pr_err(" reason=%08x qualification=%016lx\n", vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION)); pr_err("IDTVectoring: info=%08x errcode=%08x\n", vmcs_read32(IDT_VECTORING_INFO_FIELD), vmcs_read32(IDT_VECTORING_ERROR_CODE)); pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET)); if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING) pr_err("TSC Multiplier = 0x%016llx\n", vmcs_read64(TSC_MULTIPLIER)); if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD)); if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR) pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV)); if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)) pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER)); n = vmcs_read32(CR3_TARGET_COUNT); for (i = 0; i + 1 < n; i += 4) pr_err("CR3 target%u=%016lx target%u=%016lx\n", i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2), i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2)); if (i < n) pr_err("CR3 target%u=%016lx\n", i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2)); if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) pr_err("PLE Gap=%08x Window=%08x\n", vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW)); if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID) pr_err("Virtual processor ID = 0x%04x\n", vmcs_read16(VIRTUAL_PROCESSOR_ID)); } /* * The guest has exited. See if we can fix it or if we need userspace * assistance. */ static int vmx_handle_exit(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); /* * Flush logged GPAs PML buffer, this will make dirty_bitmap more * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before * querying dirty_bitmap, we only need to kick all vcpus out of guest * mode as if vcpus is in root mode, the PML buffer must has been * flushed already. */ if (enable_pml) vmx_flush_pml_buffer(vcpu); /* If guest state is invalid, start emulating */ if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason)) return nested_vmx_reflect_vmexit(vcpu, exit_reason); if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { dump_vmcs(); vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = exit_reason; return 0; } if (unlikely(vmx->fail)) { vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; vcpu->run->fail_entry.hardware_entry_failure_reason = vmcs_read32(VM_INSTRUCTION_ERROR); return 0; } /* * Note: * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by * delivery event since it indicates guest is accessing MMIO. * The vm-exit can be triggered again after return to guest that * will cause infinite loop. */ if ((vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_PML_FULL && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; vcpu->run->internal.ndata = 3; vcpu->run->internal.data[0] = vectoring_info; vcpu->run->internal.data[1] = exit_reason; vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; if (exit_reason == EXIT_REASON_EPT_MISCONFIG) { vcpu->run->internal.ndata++; vcpu->run->internal.data[3] = vmcs_read64(GUEST_PHYSICAL_ADDRESS); } return 0; } if (exit_reason < kvm_vmx_max_exit_handlers && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); if (is_guest_mode(vcpu) && nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return; if (irr == -1 || tpr < irr) { vmcs_write32(TPR_THRESHOLD, 0); return; } vmcs_write32(TPR_THRESHOLD, irr); } static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { u32 sec_exec_control; /* Postpone execution until vmcs01 is the current VMCS. */ if (is_guest_mode(vcpu)) { to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; return; } if (!cpu_has_vmx_virtualize_x2apic_mode()) return; if (!cpu_need_tpr_shadow(vcpu)) return; sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); if (set) { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; vmx_flush_tlb_ept_only(vcpu); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); vmx_set_msr_bitmap(vcpu); } static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) { struct vcpu_vmx *vmx = to_vmx(vcpu); /* * Currently we do not handle the nested case where L2 has an * APIC access page of its own; that page is still pinned. * Hence, we skip the case where the VCPU is in guest mode _and_ * L1 prepared an APIC access page for L2. * * For the case where L1 and L2 share the same APIC access page * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear * in the vmcs12), this function will only update either the vmcs01 * or the vmcs02. If the former, the vmcs02 will be updated by * prepare_vmcs02. If the latter, the vmcs01 will be updated in * the next L2->L1 exit. */ if (!is_guest_mode(vcpu) || !nested_cpu_has2(get_vmcs12(&vmx->vcpu), SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmcs_write64(APIC_ACCESS_ADDR, hpa); vmx_flush_tlb_ept_only(vcpu); } } static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) { u16 status; u8 old; if (max_isr == -1) max_isr = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = status >> 8; if (max_isr != old) { status &= 0xff; status |= max_isr << 8; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_set_rvi(int vector) { u16 status; u8 old; if (vector == -1) vector = 0; status = vmcs_read16(GUEST_INTR_STATUS); old = (u8)status & 0xff; if ((u8)vector != old) { status &= ~0xff; status |= (u8)vector; vmcs_write16(GUEST_INTR_STATUS, status); } } static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { if (!is_guest_mode(vcpu)) { vmx_set_rvi(max_irr); return; } if (max_irr == -1) return; /* * In guest mode. If a vmexit is needed, vmx_check_nested_events * handles it. */ if (nested_exit_on_intr(vcpu)) return; /* * Else, fall back to pre-APICv interrupt injection since L2 * is run without virtual interrupt delivery. */ if (!kvm_event_needs_reinjection(vcpu) && vmx_interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, max_irr, false); vmx_inject_irq(vcpu); } } static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; WARN_ON(!vcpu->arch.apicv_active); if (pi_test_on(&vmx->pi_desc)) { pi_clear_on(&vmx->pi_desc); /* * IOMMU can write to PIR.ON, so the barrier matters even on UP. * But on x86 this is just a compiler barrier anyway. */ smp_mb__after_atomic(); max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); } else { max_irr = kvm_lapic_find_highest_irr(vcpu); } vmx_hwapic_irr_update(vcpu, max_irr); return max_irr; } static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) { if (!kvm_vcpu_apicv_active(vcpu)) return; vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]); vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]); vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]); } static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); pi_clear_on(&vmx->pi_desc); memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); } static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) { u32 exit_intr_info = 0; u16 basic_exit_reason = (u16)vmx->exit_reason; if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI)) return; if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); vmx->exit_intr_info = exit_intr_info; /* if exit due to PF check for async PF */ if (is_page_fault(exit_intr_info)) vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); /* Handle machine checks before interrupts are enabled */ if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY || is_machine_check(exit_intr_info)) kvm_machine_check(); /* We need to handle NMIs before interrupts are enabled */ if (is_nmi(exit_intr_info)) { kvm_before_handle_nmi(&vmx->vcpu); asm("int $2"); kvm_after_handle_nmi(&vmx->vcpu); } } static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) { u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); register void *__sp asm(_ASM_SP); if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) { unsigned int vector; unsigned long entry; gate_desc *desc; struct vcpu_vmx *vmx = to_vmx(vcpu); #ifdef CONFIG_X86_64 unsigned long tmp; #endif vector = exit_intr_info & INTR_INFO_VECTOR_MASK; desc = (gate_desc *)vmx->host_idt_base + vector; entry = gate_offset(desc); asm volatile( #ifdef CONFIG_X86_64 "mov %%" _ASM_SP ", %[sp]\n\t" "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t" "push $%c[ss]\n\t" "push %[sp]\n\t" #endif "pushf\n\t" __ASM_SIZE(push) " $%c[cs]\n\t" "call *%[entry]\n\t" : #ifdef CONFIG_X86_64 [sp]"=&r"(tmp), #endif "+r"(__sp) : [entry]"r"(entry), [ss]"i"(__KERNEL_DS), [cs]"i"(__KERNEL_CS) ); } } STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); static bool vmx_has_high_real_mode_segbase(void) { return enable_unrestricted_guest || emulate_invalid_guest_state; } static bool vmx_mpx_supported(void) { return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS); } static bool vmx_xsaves_supported(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_XSAVES; } static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; bool unblock_nmi; u8 vector; bool idtv_info_valid; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; if (vmx->loaded_vmcs->nmi_known_unmasked) return; /* * Can't use vmx->exit_intr_info since we're not sure what * the exit reason is. */ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; vector = exit_intr_info & INTR_INFO_VECTOR_MASK; /* * SDM 3: 27.7.1.2 (September 2008) * Re-set bit "block by NMI" before VM entry if vmexit caused by * a guest IRET fault. * SDM 3: 23.2.2 (September 2008) * Bit 12 is undefined in any of the following cases: * If the VM exit sets the valid bit in the IDT-vectoring * information field. * If the VM exit is due to a double fault. */ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && vector != DF_VECTOR && !idtv_info_valid) vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); else vmx->loaded_vmcs->nmi_known_unmasked = !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI); } static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, u32 idt_vectoring_info, int instr_len_field, int error_code_field) { u8 vector; int type; bool idtv_info_valid; idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); if (!idtv_info_valid) return; kvm_make_request(KVM_REQ_EVENT, vcpu); vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; switch (type) { case INTR_TYPE_NMI_INTR: vcpu->arch.nmi_injected = true; /* * SDM 3: 27.7.1.2 (September 2008) * Clear bit "block by NMI" before VM entry if a NMI * delivery faulted. */ vmx_set_nmi_mask(vcpu, false); break; case INTR_TYPE_SOFT_EXCEPTION: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); kvm_requeue_exception_e(vcpu, vector, err); } else kvm_requeue_exception(vcpu, vector); break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_EXT_INTR: kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; default: break; } } static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_ERROR_CODE); } static void vmx_cancel_injection(struct kvm_vcpu *vcpu) { __vmx_complete_interrupts(vcpu, vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE); vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) { int i, nr_msrs; struct perf_guest_switch_msr *msrs; msrs = perf_guest_get_msrs(&nr_msrs); if (!msrs) return; for (i = 0; i < nr_msrs; i++) if (msrs[i].host == msrs[i].guest) clear_atomic_switch_msr(vmx, msrs[i].msr); else add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, msrs[i].host); } static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl; u32 delta_tsc; if (vmx->hv_deadline_tsc == -1) return; tscl = rdtsc(); if (vmx->hv_deadline_tsc > tscl) /* sure to be 32 bit only because checked on set_hv_timer */ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> cpu_preemption_timer_multi); else delta_tsc = 0; vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long debugctlmsr, cr3, cr4; /* Don't enter VMX if guest state is invalid, let the exit handler start emulation until we arrive back to a valid state */ if (vmx->emulation_required) return; if (vmx->ple_window_dirty) { vmx->ple_window_dirty = false; vmcs_write32(PLE_WINDOW, vmx->ple_window); } if (vmx->nested.sync_shadow_vmcs) { copy_vmcs12_to_shadow(vmx); vmx->nested.sync_shadow_vmcs = false; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); cr3 = __get_current_cr3_fast(); if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) { vmcs_writel(HOST_CR3, cr3); vmx->host_state.vmcs_host_cr3 = cr3; } cr4 = cr4_read_shadow(); if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); vmx->host_state.vmcs_host_cr4 = cr4; } /* When single-stepping over STI and MOV SS, we must clear the * corresponding interruptibility bits in the guest state. Otherwise * vmentry fails as it then expects bit 14 (BS) in pending debug * exceptions being set, but that's not correct for the guest debugging * case. */ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vcpu->arch.pkru); atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); vmx_arm_hv_timer(vcpu); vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ "push %%" _ASM_DX "; push %%" _ASM_BP ";" "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */ "push %%" _ASM_CX " \n\t" "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t" "je 1f \n\t" "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t" __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ "mov %c[cr2](%0), %%" _ASM_AX " \n\t" "mov %%cr2, %%" _ASM_DX " \n\t" "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t" "je 2f \n\t" "mov %%" _ASM_AX", %%cr2 \n\t" "2: \n\t" /* Check if vmlaunch of vmresume is needed */ "cmpl $0, %c[launched](%0) \n\t" /* Load guest registers. Don't clobber flags. */ "mov %c[rax](%0), %%" _ASM_AX " \n\t" "mov %c[rbx](%0), %%" _ASM_BX " \n\t" "mov %c[rdx](%0), %%" _ASM_DX " \n\t" "mov %c[rsi](%0), %%" _ASM_SI " \n\t" "mov %c[rdi](%0), %%" _ASM_DI " \n\t" "mov %c[rbp](%0), %%" _ASM_BP " \n\t" #ifdef CONFIG_X86_64 "mov %c[r8](%0), %%r8 \n\t" "mov %c[r9](%0), %%r9 \n\t" "mov %c[r10](%0), %%r10 \n\t" "mov %c[r11](%0), %%r11 \n\t" "mov %c[r12](%0), %%r12 \n\t" "mov %c[r13](%0), %%r13 \n\t" "mov %c[r14](%0), %%r14 \n\t" "mov %c[r15](%0), %%r15 \n\t" #endif "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */ /* Enter guest mode */ "jne 1f \n\t" __ex(ASM_VMX_VMLAUNCH) "\n\t" "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" "mov %%" _ASM_DX ", %c[rdx](%0) \n\t" "mov %%" _ASM_SI ", %c[rsi](%0) \n\t" "mov %%" _ASM_DI ", %c[rdi](%0) \n\t" "mov %%" _ASM_BP ", %c[rbp](%0) \n\t" #ifdef CONFIG_X86_64 "mov %%r8, %c[r8](%0) \n\t" "mov %%r9, %c[r9](%0) \n\t" "mov %%r10, %c[r10](%0) \n\t" "mov %%r11, %c[r11](%0) \n\t" "mov %%r12, %c[r12](%0) \n\t" "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" ".popsection" : : "c"(vmx), "d"((unsigned long)HOST_RSP), [launched]"i"(offsetof(struct vcpu_vmx, __launched)), [fail]"i"(offsetof(struct vcpu_vmx, fail)), [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), #ifdef CONFIG_X86_64 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" #else , "eax", "ebx", "edi", "esi" #endif ); /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ if (debugctlmsr) update_debugctlmsr(debugctlmsr); #ifndef CONFIG_X86_64 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. * * We can't defer this to vmx_load_host_state() since that function * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ loadsegment(ds, __USER_DS); loadsegment(es, __USER_DS); #endif vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_PDPTR) | (1 << VCPU_EXREG_SEGMENTS) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); vmx->loaded_vmcs->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); /* * eager fpu is enabled if PKEY is supported and CR4 is switched * back on host, so it is safe to read guest PKRU from current * XSAVE. */ if (static_cpu_has(X86_FEATURE_PKU) && kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { vcpu->arch.pkru = __read_pkru(); if (vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vmx->host_pkru); } /* * the KVM_REQ_EVENT optimization bit is only on for one entry, and if * we did not inject a still-pending event to L1 now because of * nested_run_pending, we need to re-enable this bit. */ if (vmx->nested.nested_run_pending) kvm_make_request(KVM_REQ_EVENT, vcpu); vmx->nested.nested_run_pending = 0; vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); } STACK_FRAME_NON_STANDARD(vmx_vcpu_run); static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) { struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; if (vmx->loaded_vmcs == vmcs) return; cpu = get_cpu(); vmx->loaded_vmcs = vmcs; vmx_vcpu_put(vcpu); vmx_vcpu_load(vcpu, cpu); vcpu->cpu = cpu; put_cpu(); } /* * Ensure that the current vmcs of the logical processor is the * vmcs01 of the vcpu before calling free_nested(). */ static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int r; r = vcpu_load(vcpu); BUG_ON(r); vmx_switch_vmcs(vcpu, &vmx->vmcs01); free_nested(vmx); vcpu_put(vcpu); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (enable_pml) vmx_destroy_pml_buffer(vmx); free_vpid(vmx->vpid); leave_guest_mode(vcpu); vmx_free_vcpu_nested(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); } static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) { int err; struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); int cpu; if (!vmx) return ERR_PTR(-ENOMEM); vmx->vpid = allocate_vpid(); err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) goto free_vcpu; err = -ENOMEM; /* * If PML is turned on, failure on enabling PML just results in failure * of creating the vcpu, therefore we can simplify PML logic (by * avoiding dealing with cases, such as enabling PML partially on vcpus * for the guest, etc. */ if (enable_pml) { vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!vmx->pml_pg) goto uninit_vcpu; } vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) > PAGE_SIZE); if (!vmx->guest_msrs) goto free_pml; vmx->loaded_vmcs = &vmx->vmcs01; vmx->loaded_vmcs->vmcs = alloc_vmcs(); vmx->loaded_vmcs->shadow_vmcs = NULL; if (!vmx->loaded_vmcs->vmcs) goto free_msrs; loaded_vmcs_init(vmx->loaded_vmcs); cpu = get_cpu(); vmx_vcpu_load(&vmx->vcpu, cpu); vmx->vcpu.cpu = cpu; err = vmx_vcpu_setup(vmx); vmx_vcpu_put(&vmx->vcpu); put_cpu(); if (err) goto free_vmcs; if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { err = alloc_apic_access_page(kvm); if (err) goto free_vmcs; } if (enable_ept) { if (!kvm->arch.ept_identity_map_addr) kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; err = init_rmode_identity_map(kvm); if (err) goto free_vmcs; } if (nested) { nested_vmx_setup_ctls_msrs(vmx); vmx->nested.vpid02 = allocate_vpid(); } vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; return &vmx->vcpu; free_vmcs: free_vpid(vmx->nested.vpid02); free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); free_pml: vmx_destroy_pml_buffer(vmx); uninit_vcpu: kvm_vcpu_uninit(&vmx->vcpu); free_vcpu: free_vpid(vmx->vpid); kmem_cache_free(kvm_vcpu_cache, vmx); return ERR_PTR(err); } static void __init vmx_check_processor_compat(void *rtn) { struct vmcs_config vmcs_conf; *(int *)rtn = 0; if (setup_vmcs_config(&vmcs_conf) < 0) *(int *)rtn = -EIO; if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", smp_processor_id()); *(int *)rtn = -EIO; } } static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) { u8 cache; u64 ipat = 0; /* For VT-d and EPT combination * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ if (is_mmio) { cache = MTRR_TYPE_UNCACHABLE; goto exit; } if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; } if (kvm_read_cr0(vcpu) & X86_CR0_CD) { ipat = VMX_EPT_IPAT_BIT; if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cache = MTRR_TYPE_WRBACK; else cache = MTRR_TYPE_UNCACHABLE; goto exit; } cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); exit: return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; } static int vmx_get_lpage_level(void) { if (enable_ept && !cpu_has_vmx_ept_1g_page()) return PT_DIRECTORY_LEVEL; else /* For shadow and EPT supported 1GB page */ return PT_PDPE_LEVEL; } static void vmcs_set_secondary_exec_control(u32 new_ctl) { /* * These bits in the secondary execution controls field * are dynamic, the others are mostly based on the hypervisor * architecture and the guest's CPUID. Do not touch the * dynamic bits. */ u32 mask = SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, (new_ctl & ~mask) | (cur_ctl & mask)); } /* * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits * (indicating "allowed-1") if they are supported in the guest's CPUID. */ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_cpuid_entry2 *entry; vmx->nested.nested_vmx_cr0_fixed1 = 0xffffffff; vmx->nested.nested_vmx_cr4_fixed1 = X86_CR4_PCE; #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \ if (entry && (entry->_reg & (_cpuid_mask))) \ vmx->nested.nested_vmx_cr4_fixed1 |= (_cr4_mask); \ } while (0) entry = kvm_find_cpuid_entry(vcpu, 0x1, 0); cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME)); cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME)); cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC)); cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE)); cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE)); cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE)); cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE)); cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE)); cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR)); cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM)); cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX)); cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX)); cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID)); cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE)); entry = kvm_find_cpuid_entry(vcpu, 0x7, 0); cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE)); cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP)); cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP)); cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU)); /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */ cr4_fixed1_update(bit(11), ecx, bit(2)); #undef cr4_fixed1_update } static void vmx_cpuid_update(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (cpu_has_secondary_exec_ctrls()) { vmx_compute_secondary_exec_control(vmx); vmcs_set_secondary_exec_control(vmx->secondary_exec_control); } if (nested_vmx_allowed(vcpu)) to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; else to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; if (nested_vmx_allowed(vcpu)) nested_vmx_cr_fixed1_bits_update(vcpu); } static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { if (func == 1 && nested) entry->ecx |= bit(X86_FEATURE_VMX); } static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exit_reason; unsigned long exit_qualification = vcpu->arch.exit_qualification; if (vmx->nested.pml_full) { exit_reason = EXIT_REASON_PML_FULL; vmx->nested.pml_full = false; exit_qualification &= INTR_INFO_UNBLOCK_NMI; } else if (fault->error_code & PFERR_RSVD_MASK) exit_reason = EXIT_REASON_EPT_MISCONFIG; else exit_reason = EXIT_REASON_EPT_VIOLATION; nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); vmcs12->guest_physical_address = fault->address; } static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) { return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT; } /* Callbacks for nested_ept_init_mmu_context: */ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) { /* return the page table to be shadowed - in our case, EPT12 */ return get_vmcs12(vcpu)->ept_pointer; } static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) return 1; kvm_mmu_unload(vcpu); kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT, nested_ept_ad_enabled(vcpu)); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; return 0; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { vcpu->arch.walk_mmu = &vcpu->arch.mmu; } static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, u16 error_code) { bool inequality, bit; bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; inequality = (error_code & vmcs12->page_fault_error_code_mask) != vmcs12->page_fault_error_code_match; return inequality ^ bit; } static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); WARN_ON(!is_guest_mode(vcpu)); if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) { vmcs12->vm_exit_intr_error_code = fault->error_code; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, fault->address); } else { kvm_inject_page_fault(vcpu, fault); } } static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12); static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct page *page; u64 hpa; if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { /* * Translate L1 physical address to host physical * address for vmcs02. Keep the page pinned, so this * physical address remains valid. We keep a reference * to it so we can release it later. */ if (vmx->nested.apic_access_page) { /* shouldn't happen */ kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); /* * If translation failed, no matter: This feature asks * to exit when accessing the given address, and if it * can never be accessed, this feature won't do * anything anyway. */ if (!is_error_page(page)) { vmx->nested.apic_access_page = page; hpa = page_to_phys(vmx->nested.apic_access_page); vmcs_write64(APIC_ACCESS_ADDR, hpa); } else { vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); } } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) && cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); kvm_vcpu_reload_apic_access_page(vcpu); } if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr); /* * If translation failed, VM entry will fail because * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull. * Failing the vm entry is _not_ what the processor * does but it's basically the only possibility we * have. We could still enter the guest if CR8 load * exits are enabled, CR8 store exits are enabled, and * virtualize APIC access is disabled; in this case * the processor would never use the TPR shadow and we * could simply clear the bit from the execution * control. But such a configuration is useless, so * let's keep the code simple. */ if (!is_error_page(page)) { vmx->nested.virtual_apic_page = page; hpa = page_to_phys(vmx->nested.virtual_apic_page); vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa); } } if (nested_cpu_has_posted_intr(vmcs12)) { if (vmx->nested.pi_desc_page) { /* shouldn't happen */ kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; } page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr); if (is_error_page(page)) return; vmx->nested.pi_desc_page = page; vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); vmx->nested.pi_desc = (struct pi_desc *)((void *)vmx->nested.pi_desc + (unsigned long)(vmcs12->posted_intr_desc_addr & (PAGE_SIZE - 1))); vmcs_write64(POSTED_INTR_DESC_ADDR, page_to_phys(vmx->nested.pi_desc_page) + (unsigned long)(vmcs12->posted_intr_desc_addr & (PAGE_SIZE - 1))); } if (cpu_has_vmx_msr_bitmap() && nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) ; else vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_USE_MSR_BITMAPS); } static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) { u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; struct vcpu_vmx *vmx = to_vmx(vcpu); if (vcpu->arch.virtual_tsc_khz == 0) return; /* Make sure short timeouts reliably trigger an immediate vmexit. * hrtimer_start does not guarantee this. */ if (preemption_timeout <= 1) { vmx_preemption_timer_fn(&vmx->nested.preemption_timer); return; } preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; preemption_timeout *= 1000000; do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); hrtimer_start(&vmx->nested.preemption_timer, ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); } static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) return 0; if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) || !page_address_valid(vcpu, vmcs12->io_bitmap_b)) return -EINVAL; return 0; } static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) return 0; if (!page_address_valid(vcpu, vmcs12->msr_bitmap)) return -EINVAL; return 0; } static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return 0; if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)) return -EINVAL; return 0; } /* * Merge L0's and L1's MSR bitmap, return false to indicate that * we do not use the hardware. */ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { int msr; struct page *page; unsigned long *msr_bitmap_l1; unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap; /* This shortcut is ok because we support only x2APIC MSRs so far. */ if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) return false; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); if (is_error_page(page)) return false; msr_bitmap_l1 = (unsigned long *)kmap(page); memset(msr_bitmap_l0, 0xff, PAGE_SIZE); if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { if (nested_cpu_has_apic_reg_virt(vmcs12)) for (msr = 0x800; msr <= 0x8ff; msr++) nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, msr, MSR_TYPE_R); nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, APIC_BASE_MSR + (APIC_TASKPRI >> 4), MSR_TYPE_R | MSR_TYPE_W); if (nested_cpu_has_vid(vmcs12)) { nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, APIC_BASE_MSR + (APIC_EOI >> 4), MSR_TYPE_W); nested_vmx_disable_intercept_for_msr( msr_bitmap_l1, msr_bitmap_l0, APIC_BASE_MSR + (APIC_SELF_IPI >> 4), MSR_TYPE_W); } } kunmap(page); kvm_release_page_clean(page); return true; } static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && !nested_cpu_has_apic_reg_virt(vmcs12) && !nested_cpu_has_vid(vmcs12) && !nested_cpu_has_posted_intr(vmcs12)) return 0; /* * If virtualize x2apic mode is enabled, * virtualize apic access must be disabled. */ if (nested_cpu_has_virt_x2apic_mode(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) return -EINVAL; /* * If virtual interrupt delivery is enabled, * we must exit on external interrupts. */ if (nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)) return -EINVAL; /* * bits 15:8 should be zero in posted_intr_nv, * the descriptor address has been already checked * in nested_get_vmcs12_pages. */ if (nested_cpu_has_posted_intr(vmcs12) && (!nested_cpu_has_vid(vmcs12) || !nested_exit_intr_ack_set(vcpu) || vmcs12->posted_intr_nv & 0xff00)) return -EINVAL; /* tpr shadow is needed by all apicv features. */ if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) return -EINVAL; return 0; } static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, unsigned long count_field, unsigned long addr_field) { int maxphyaddr; u64 count, addr; if (vmcs12_read_any(vcpu, count_field, &count) || vmcs12_read_any(vcpu, addr_field, &addr)) { WARN_ON(1); return -EINVAL; } if (count == 0) return 0; maxphyaddr = cpuid_maxphyaddr(vcpu); if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { pr_debug_ratelimited( "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)", addr_field, maxphyaddr, count, addr); return -EINVAL; } return 0; } static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { if (vmcs12->vm_exit_msr_load_count == 0 && vmcs12->vm_exit_msr_store_count == 0 && vmcs12->vm_entry_msr_load_count == 0) return 0; /* Fast path */ if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, VM_EXIT_MSR_LOAD_ADDR) || nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT, VM_EXIT_MSR_STORE_ADDR) || nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT, VM_ENTRY_MSR_LOAD_ADDR)) return -EINVAL; return 0; } static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u64 address = vmcs12->pml_address; int maxphyaddr = cpuid_maxphyaddr(vcpu); if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) { if (!nested_cpu_has_ept(vmcs12) || !IS_ALIGNED(address, 4096) || address >> maxphyaddr) return -EINVAL; } return 0; } static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { /* x2APIC MSR accesses are not allowed */ if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) return -EINVAL; if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */ e->index == MSR_IA32_UCODE_REV) return -EINVAL; if (e->reserved != 0) return -EINVAL; return 0; } static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { if (e->index == MSR_FS_BASE || e->index == MSR_GS_BASE || e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */ nested_vmx_msr_check_common(vcpu, e)) return -EINVAL; return 0; } static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, struct vmx_msr_entry *e) { if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */ nested_vmx_msr_check_common(vcpu, e)) return -EINVAL; return 0; } /* * Load guest's/host's msr at nested entry/exit. * return 0 for success, entry index for failure. */ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) { u32 i; struct vmx_msr_entry e; struct msr_data msr; msr.host_initiated = false; for (i = 0; i < count; i++) { if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), &e, sizeof(e))) { pr_debug_ratelimited( "%s cannot read MSR entry (%u, 0x%08llx)\n", __func__, i, gpa + i * sizeof(e)); goto fail; } if (nested_vmx_load_msr_check(vcpu, &e)) { pr_debug_ratelimited( "%s check failed (%u, 0x%x, 0x%x)\n", __func__, i, e.index, e.reserved); goto fail; } msr.index = e.index; msr.data = e.value; if (kvm_set_msr(vcpu, &msr)) { pr_debug_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", __func__, i, e.index, e.value); goto fail; } } return 0; fail: return i + 1; } static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) { u32 i; struct vmx_msr_entry e; for (i = 0; i < count; i++) { struct msr_data msr_info; if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), &e, 2 * sizeof(u32))) { pr_debug_ratelimited( "%s cannot read MSR entry (%u, 0x%08llx)\n", __func__, i, gpa + i * sizeof(e)); return -EINVAL; } if (nested_vmx_store_msr_check(vcpu, &e)) { pr_debug_ratelimited( "%s check failed (%u, 0x%x, 0x%x)\n", __func__, i, e.index, e.reserved); return -EINVAL; } msr_info.host_initiated = false; msr_info.index = e.index; if (kvm_get_msr(vcpu, &msr_info)) { pr_debug_ratelimited( "%s cannot read MSR (%u, 0x%x)\n", __func__, i, e.index); return -EINVAL; } if (kvm_vcpu_write_guest(vcpu, gpa + i * sizeof(e) + offsetof(struct vmx_msr_entry, value), &msr_info.data, sizeof(msr_info.data))) { pr_debug_ratelimited( "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", __func__, i, e.index, msr_info.data); return -EINVAL; } } return 0; } static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) { unsigned long invalid_mask; invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); return (val & invalid_mask) == 0; } /* * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are * emulating VM entry into a guest with EPT enabled. * Returns 0 on success, 1 on failure. Invalid state exit qualification code * is assigned to entry_failure_code on failure. */ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, u32 *entry_failure_code) { if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { if (!nested_cr3_valid(vcpu, cr3)) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return 1; } /* * If PAE paging and EPT are both on, CR3 is not used by the CPU and * must not be dereferenced. */ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) && !nested_ept) { if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) { *entry_failure_code = ENTRY_FAIL_PDPTE; return 1; } } vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } kvm_mmu_reset_context(vcpu); return 0; } /* * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 * guest in a way that will both be appropriate to L1's requests, and our * needs. In addition to modifying the active vmcs (which is vmcs02), this * function also has additional necessary side-effects, like setting various * vcpu->arch fields. * Returns 0 on success, 1 on failure. Invalid state exit qualification code * is assigned to entry_failure_code on failure. */ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool from_vmentry, u32 *entry_failure_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); } else { kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } if (from_vmentry) { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, vmcs12->vm_entry_exception_error_code); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmcs12->vm_entry_instruction_len); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs12->guest_interruptibility_info); vmx->loaded_vmcs->nmi_known_unmasked = !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); } else { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs12->guest_pending_dbg_exceptions); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); if (nested_cpu_has_xsaves(vmcs12)) vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); vmcs_write64(VMCS_LINK_POINTER, -1ull); exec_control = vmcs12->pin_based_vm_exec_control; /* Preemption timer setting is only taken from vmcs01. */ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; exec_control |= vmcs_config.pin_based_exec_ctrl; if (vmx->hv_deadline_tsc == -1) exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; /* Posted interrupts setting is only taken from vmcs12. */ if (nested_cpu_has_posted_intr(vmcs12)) { vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; vmx->nested.pi_pending = false; vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); } else { exec_control &= ~PIN_BASED_POSTED_INTR; } vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); vmx->nested.preemption_timer_expired = false; if (nested_cpu_has_preemption_timer(vmcs12)) vmx_start_preemption_timer(vcpu); /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx->secondary_exec_control; /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_ENABLE_VMFUNC); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & ~SECONDARY_EXEC_ENABLE_PML; exec_control |= vmcs12_exec_ctrl; } /* All VMFUNCs are currently emulated through L0 vmexits. */ if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC) vmcs_write64(VM_FUNCTION_CONTROL, 0); if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); vmcs_write16(GUEST_INTR_STATUS, vmcs12->guest_intr_status); } /* * Write an illegal value to APIC_ACCESS_ADDR. Later, * nested_get_vmcs12_pages will either fix it up or * remove the VM execution control. */ if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) vmcs_write64(APIC_ACCESS_ADDR, -1ull); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } /* * Set host-state according to L0's settings (vmcs12 is irrelevant here) * Some constant fields are set here by vmx_set_constant_host_state(). * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ vmx_set_constant_host_state(vmx); /* * Set the MSR load/store lists to match L0's settings. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before * entry, but only if the current (host) sp changed from the value * we wrote last (vmx->host_rsp). This cache is no longer relevant * if we switch vmcs, and rather than hold a separate cache per vmcs, * here we just force the write to happen on entry. */ vmx->host_rsp = 0; exec_control = vmx_exec_control(vmx); /* L0's desires */ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; /* * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if * nested_get_vmcs12_pages can't fix it up, the illegal value * will result in a VM entry failure. */ if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); } else { #ifdef CONFIG_X86_64 exec_control |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING; #endif } /* * Merging of IO bitmap not currently supported. * Rather, exit every time. */ exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* L2->L1 exit controls are emulated - the hardware exit is to L0 so * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. */ vm_entry_controls_init(vmx, (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vcpu->arch.pat = vmcs12->guest_ia32_pat; } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); } set_cr4_guest_host_mask(vmx); if (from_vmentry && vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset + vmcs12->tsc_offset); else vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the * vpid02 is per-vCPU for L0 and reused while the value of * vpid12 is changed w/ one invvpid during nested vmentry. * The vpid12 is allocated by L1 for L2, so it will not * influence global bitmap(for vpid01 and vpid02 allocation) * even if spawn a lot of nested vCPUs. */ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); } } else { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx_flush_tlb(vcpu); } } if (enable_pml) { /* * Conceptually we want to copy the PML address and index from * vmcs01 here, and then back to vmcs01 on nested vmexit. But, * since we always flush the log on each vmexit, this happens * to be equivalent to simply resetting the fields in vmcs02. */ ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } if (nested_cpu_has_ept(vmcs12)) { if (nested_ept_init_mmu_context(vcpu)) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return 1; } } else if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmx_flush_tlb_ept_only(vcpu); } /* * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those * bits which we consider mandatory enabled. * The CR0_READ_SHADOW is what L2 should have expected to read given * the specifications by L1; It's not enough to take * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we * have more bits than L1 expected. */ vmx_set_cr0(vcpu, vmcs12->guest_cr0); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* Shadow page tables on either EPT or shadow page tables. */ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), entry_failure_code)) return 1; if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; /* * L1 may access the L2's PDPTR, so save them to construct vmcs12 */ if (enable_ept) { vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); } kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); return 0; } static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_vmx_check_pml_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high) || (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && !vmx_control_verify(vmcs12->secondary_vm_exec_control, vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high)) || !vmx_control_verify(vmcs12->pin_based_vm_exec_control, vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high) || !vmx_control_verify(vmcs12->vm_exit_controls, vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high) || !vmx_control_verify(vmcs12->vm_entry_controls, vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_cpu_has_vmfunc(vmcs12)) { if (vmcs12->vm_function_control & ~vmx->nested.nested_vmx_vmfunc_controls) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (nested_cpu_has_eptp_switching(vmcs12)) { if (!nested_cpu_has_ept(vmcs12) || !page_address_valid(vcpu, vmcs12->eptp_list_address)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; } } if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) || !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || !nested_cr3_valid(vcpu, vmcs12->host_cr3)) return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; return 0; } static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 *exit_qual) { bool ia32e; *exit_qual = ENTRY_FAIL_DEFAULT; if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) || !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) return 1; if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS) && vmcs12->vmcs_link_pointer != -1ull) { *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; return 1; } /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: * - Bits reserved in the IA32_EFER MSR must be 0. * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of * the IA-32e mode guest VM-exit control. It must also be identical * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to * CR0.PG) is 1. */ if (to_vmx(vcpu)->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) || ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) || ((vmcs12->guest_cr0 & X86_CR0_PG) && ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) return 1; } /* * If the load IA32_EFER VM-exit control is 1, bits reserved in the * IA32_EFER MSR must be 0 in the field for that register. In addition, * the values of the LMA and LME bits in the field must each be that of * the host address-space size VM-exit control. */ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { ia32e = (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) return 1; } return 0; } static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); struct loaded_vmcs *vmcs02; u32 msr_entry_idx; u32 exit_qual; vmcs02 = nested_get_current_vmcs02(vmx); if (!vmcs02) return -ENOMEM; enter_guest_mode(vcpu); if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); vmx_switch_vmcs(vcpu, vmcs02); vmx_segment_cache_clear(vmx); if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { leave_guest_mode(vcpu); vmx_switch_vmcs(vcpu, &vmx->vmcs01); nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, exit_qual); return 1; } nested_get_vmcs12_pages(vcpu, vmcs12); msr_entry_idx = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); if (msr_entry_idx) { leave_guest_mode(vcpu); vmx_switch_vmcs(vcpu, &vmx->vmcs01); nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); return 1; } /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet * returned as far as L1 is concerned. It will only return (and set * the success flag) when L2 exits (see nested_vmx_vmexit()). */ return 0; } /* * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 * for running an L2 nested guest. */ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); u32 exit_qual; int ret; if (!nested_vmx_check_permission(vcpu)) return 1; if (!nested_vmx_check_vmcs12(vcpu)) goto out; vmcs12 = get_vmcs12(vcpu); if (enable_shadow_vmcs) copy_shadow_to_vmcs12(vmx); /* * The nested entry process starts with enforcing various prerequisites * on vmcs12 as required by the Intel SDM, and act appropriately when * they fail: As the SDM explains, some conditions should cause the * instruction to fail, while others will cause the instruction to seem * to succeed, but return an EXIT_REASON_INVALID_STATE. * To speed up the normal (success) code path, we should avoid checking * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) { nested_vmx_failValid(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); goto out; } if (vmcs12->launch_state == launch) { nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); goto out; } ret = check_vmentry_prereqs(vcpu, vmcs12); if (ret) { nested_vmx_failValid(vcpu, ret); goto out; } /* * After this point, the trap flag no longer triggers a singlestep trap * on the vm entry instructions; don't call kvm_skip_emulated_instruction. * This is not 100% correct; for performance reasons, we delegate most * of the checks on host state to the processor. If those fail, * the singlestep trap is missed. */ skip_emulated_instruction(vcpu); ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual); if (ret) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, exit_qual); return 1; } /* * We're finally done with prerequisite checking, and can start with * the nested entry. */ ret = enter_vmx_non_root_mode(vcpu, true); if (ret) return ret; if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) return kvm_vcpu_halt(vcpu); vmx->nested.nested_run_pending = 1; return 1; out: return kvm_skip_emulated_instruction(vcpu); } /* * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK). * This function returns the new value we should put in vmcs12.guest_cr0. * It's not enough to just return the vmcs02 GUEST_CR0. Rather, * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 * didn't trap the bit, because if L1 did, so would L0). * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have * been modified by L2, and L1 knows it. So just leave the old value of * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 * isn't relevant, because if L0 traps this bit it can set it to anything. * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have * changed these bits, and therefore they need to be updated, but L0 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. */ static inline unsigned long vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | vcpu->arch.cr0_guest_owned_bits)); } static inline unsigned long vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { return /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | vcpu->arch.cr4_guest_owned_bits)); } static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { u32 idt_vectoring; unsigned int nr; if (vcpu->arch.exception.injected) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (kvm_exception_is_soft(nr)) { vmcs12->vm_exit_instruction_len = vcpu->arch.event_exit_inst_len; idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; } else idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; if (vcpu->arch.exception.has_error_code) { idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; vmcs12->idt_vectoring_error_code = vcpu->arch.exception.error_code; } vmcs12->idt_vectoring_info_field = idt_vectoring; } else if (vcpu->arch.nmi_injected) { vmcs12->idt_vectoring_info_field = INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; } else if (vcpu->arch.interrupt.pending) { nr = vcpu->arch.interrupt.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; if (vcpu->arch.interrupt.soft) { idt_vectoring |= INTR_TYPE_SOFT_INTR; vmcs12->vm_entry_instruction_len = vcpu->arch.event_exit_inst_len; } else idt_vectoring |= INTR_TYPE_EXT_INTR; vmcs12->idt_vectoring_info_field = idt_vectoring; } } static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) { struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qual; if (kvm_event_needs_reinjection(vcpu)) return -EBUSY; if (vcpu->arch.exception.pending && nested_vmx_check_exception(vcpu, &exit_qual)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); vcpu->arch.exception.pending = false; return 0; } if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && vmx->nested.preemption_timer_expired) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); return 0; } if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, NMI_VECTOR | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK, 0); /* * The NMI-triggered VM exit counts as injection: * clear this one and block further NMIs. */ vcpu->arch.nmi_pending = 0; vmx_set_nmi_mask(vcpu, true); return 0; } if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && nested_exit_on_intr(vcpu)) { if (vmx->nested.nested_run_pending) return -EBUSY; nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); return 0; } vmx_complete_nested_posted_interrupt(vcpu); return 0; } static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); u64 value; if (ktime_to_ns(remaining) <= 0) return 0; value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; do_div(value, 1000000); return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; } /* * Update the guest state fields of vmcs12 to reflect changes that * occurred while L2 was running. (The "IA-32e mode guest" bit of the * VM-entry controls is also updated, since this is really a guest * state bit.) */ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); vmcs12->guest_interruptibility_info = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; else vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; if (nested_cpu_has_preemption_timer(vmcs12)) { if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) vmcs12->vmx_preemption_timer_value = vmx_get_preemption_timer_value(vcpu); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); } /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. * * Additionally, restore L2's PDPTR to vmcs12. */ if (enable_ept) { vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); } vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); if (nested_cpu_has_vid(vmcs12)) vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); } /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); if (kvm_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); } /* * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), * and this function updates it to reflect the changes to the guest state while * L2 was running (and perhaps made some exits which were handled directly by L0 * without going back to L1), and to reflect the exit reason. * Note that we do not have to copy here all VMCS fields, just those that * could have changed by the L2 guest or the exit - i.e., the guest-state and * exit-information fields only. Other fields are modified by L1 with VMWRITE, * which already writes to vmcs12 directly. */ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { /* update guest state fields: */ sync_vmcs12(vcpu, vmcs12); /* update exit information fields: */ vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { vmcs12->launch_state = 1; /* vm_entry_intr_info_field is cleared on exit. Emulate this * instead of reading the real value. */ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; /* * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ vmcs12_save_pending_event(vcpu, vmcs12); } /* * Drop what we picked up for L2 via vmx_complete_interrupts. It is * preserved above and would only end up incorrectly in L1. */ vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); } /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified * in vmcs12. * This function is to be called not only on normal nested exit, but also on * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry * Failures During or After Loading Guest State"). * This function should be called when the active VMCS is L1's (vmcs01). */ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); vmx_set_efer(vcpu, vcpu->arch.efer); kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); /* * Note that calling vmx_set_cr0 is important, even if cr0 hasn't * actually changed, because vmx_set_cr0 refers to efer set above. * * CR0_GUEST_HOST_MASK is already set in the original vmcs01 * (KVM doesn't change it); */ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; vmx_set_cr0(vcpu, vmcs12->host_cr0); /* Same as above - no reason to call set_cr4_guest_host_mask(). */ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); kvm_set_cr4(vcpu, vmcs12->host_cr4); nested_ept_uninit_mmu_context(vcpu); /* * Only PDPTE load can fail as the value of cr3 was checked on entry and * couldn't have changed. */ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ vmx_flush_tlb(vcpu); } /* Restore posted intr vector. */ if (nested_cpu_has_posted_intr(vmcs12)) vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, 0); if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); vcpu->arch.pat = vmcs12->host_ia32_pat; } if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, vmcs12->host_ia32_perf_global_ctrl); /* Set L1 segment info according to Intel SDM 27.5.2 Loading Host Segment and Descriptor-Table Registers */ seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .selector = vmcs12->host_cs_selector, .type = 11, .present = 1, .s = 1, .g = 1 }; if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) seg.l = 1; else seg.db = 1; vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); seg = (struct kvm_segment) { .base = 0, .limit = 0xFFFFFFFF, .type = 3, .present = 1, .s = 1, .db = 1, .g = 1 }; seg.selector = vmcs12->host_ds_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); seg.selector = vmcs12->host_es_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); seg.selector = vmcs12->host_ss_selector; vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); seg.selector = vmcs12->host_fs_selector; seg.base = vmcs12->host_fs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); seg.selector = vmcs12->host_gs_selector; seg.base = vmcs12->host_gs_base; vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); seg = (struct kvm_segment) { .base = vmcs12->host_tr_base, .limit = 0x67, .selector = vmcs12->host_tr_selector, .type = 11, .present = 1 }; vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); kvm_set_dr(vcpu, 7, 0x400); vmcs_write64(GUEST_IA32_DEBUGCTL, 0); if (cpu_has_vmx_msr_bitmap()) vmx_set_msr_bitmap(vcpu); if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, vmcs12->vm_exit_msr_load_count)) nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); } /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) */ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 vm_inst_error = 0; /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); leave_guest_mode(vcpu); prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, exit_qualification); if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, vmcs12->vm_exit_msr_store_count)) nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); if (unlikely(vmx->fail)) vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR); vmx_switch_vmcs(vcpu, &vmx->vmcs01); /* * TODO: SDM says that with acknowledge interrupt on exit, bit 31 of * the VM-exit interrupt information (valid interrupt) is always set to * 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need * kvm_cpu_has_interrupt(). See the commit message for details. */ if (nested_exit_intr_ack_set(vcpu) && exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && kvm_cpu_has_interrupt(vcpu)) { int irq = kvm_cpu_get_interrupt(vcpu); WARN_ON(irq < 0); vmcs12->vm_exit_intr_info = irq | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; } trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, vmcs12->exit_qualification, vmcs12->idt_vectoring_info_field, vmcs12->vm_exit_intr_info, vmcs12->vm_exit_intr_error_code, KVM_ISA_VMX); vm_entry_controls_reset_shadow(vmx); vm_exit_controls_reset_shadow(vmx); vmx_segment_cache_clear(vmx); /* if no vmcs02 cache requested, remove the one we used */ if (VMCS02_POOL_SIZE == 0) nested_free_vmcs02(vmx, vmx->nested.current_vmptr); load_vmcs12_host_state(vcpu, vmcs12); /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); if (vmx->hv_deadline_tsc == -1) vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); else vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { vmx->nested.change_vmcs01_virtual_x2apic_mode = false; vmx_set_virtual_x2apic_mode(vcpu, vcpu->arch.apic_base & X2APIC_ENABLE); } else if (!nested_cpu_has_ept(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmx_flush_tlb_ept_only(vcpu); } /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; /* Unpin physical memory we referred to in vmcs02 */ if (vmx->nested.apic_access_page) { kvm_release_page_dirty(vmx->nested.apic_access_page); vmx->nested.apic_access_page = NULL; } if (vmx->nested.virtual_apic_page) { kvm_release_page_dirty(vmx->nested.virtual_apic_page); vmx->nested.virtual_apic_page = NULL; } if (vmx->nested.pi_desc_page) { kunmap(vmx->nested.pi_desc_page); kvm_release_page_dirty(vmx->nested.pi_desc_page); vmx->nested.pi_desc_page = NULL; vmx->nested.pi_desc = NULL; } /* * We are now running in L2, mmu_notifier will force to reload the * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. */ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); /* * Exiting from L2 to L1, we're now back to L1 which thinks it just * finished a VMLAUNCH or VMRESUME instruction, so we need to set the * success or failure flag accordingly. */ if (unlikely(vmx->fail)) { vmx->fail = 0; nested_vmx_failValid(vcpu, vm_inst_error); } else nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } /* * Forcibly leave nested mode in order to be able to reset the VCPU later on. */ static void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { to_vmx(vcpu)->nested.nested_run_pending = 0; nested_vmx_vmexit(vcpu, -1, 0, 0); } free_nested(to_vmx(vcpu)); } /* * L1's failure to enter L2 is a subset of a normal exit, as explained in * 23.7 "VM-entry failures during or after loading guest state" (this also * lists the acceptable exit-reason and exit-qualification parameters). * It should only be called before L2 actually succeeded to run, and when * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). */ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification) { load_vmcs12_host_state(vcpu, vmcs12); vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; vmcs12->exit_qualification = qualification; nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) to_vmx(vcpu)->nested.sync_shadow_vmcs = true; } static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return X86EMUL_CONTINUE; } #ifdef CONFIG_X86_64 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */ static inline int u64_shl_div_u64(u64 a, unsigned int shift, u64 divisor, u64 *result) { u64 low = a << shift, high = a >> (64 - shift); /* To avoid the overflow on divq */ if (high >= divisor) return 1; /* Low hold the result, high hold rem which is discarded */ asm("divq %2\n\t" : "=a" (low), "=d" (high) : "rm" (divisor), "0" (low), "1" (high)); *result = low; return 0; } static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl = rdtsc(); u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; /* Convert to host delta tsc if tsc scaling is enabled */ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && u64_shl_div_u64(delta_tsc, kvm_tsc_scaling_ratio_frac_bits, vcpu->arch.tsc_scaling_ratio, &delta_tsc)) return -ERANGE; /* * If the delta tsc can't fit in the 32 bit after the multi shift, * we can't use the preemption timer. * It's possible that it fits on later vmentries, but checking * on every vmentry is costly so we just use an hrtimer. */ if (delta_tsc >> (cpu_preemption_timer_multi + 32)) return -ERANGE; vmx->hv_deadline_tsc = tscl + delta_tsc; vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); return delta_tsc == 0; } static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); vmx->hv_deadline_tsc = -1; vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, PIN_BASED_VMX_PREEMPTION_TIMER); } #endif static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu) { if (ple_gap) shrink_ple_window(vcpu); } static void vmx_slot_enable_log_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_slot_leaf_clear_dirty(kvm, slot); kvm_mmu_slot_largepage_remove_write_access(kvm, slot); } static void vmx_slot_disable_log_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_slot_set_dirty(kvm, slot); } static void vmx_flush_log_dirty(struct kvm *kvm) { kvm_flush_pml_buffers(kvm); } static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); gpa_t gpa; struct page *page = NULL; u64 *pml_address; if (is_guest_mode(vcpu)) { WARN_ON_ONCE(vmx->nested.pml_full); /* * Check if PML is enabled for the nested guest. * Whether eptp bit 6 is set is already checked * as part of A/D emulation. */ vmcs12 = get_vmcs12(vcpu); if (!nested_cpu_has_pml(vmcs12)) return 0; if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) { vmx->nested.pml_full = true; return 1; } gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); if (is_error_page(page)) return 0; pml_address = kmap(page); pml_address[vmcs12->guest_pml_index--] = gpa; kunmap(page); kvm_release_page_clean(page); } return 0; } static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t offset, unsigned long mask) { kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); } /* * This routine does the following things for vCPU which is going * to be blocked if VT-d PI is enabled. * - Store the vCPU to the wakeup list, so when interrupts happen * we can find the right vCPU to wake up. * - Change the Posted-interrupt descriptor as below: * 'NDST' <-- vcpu->pre_pcpu * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR * - If 'ON' is set during this process, which means at least one * interrupt is posted for this vCPU, we cannot block it, in * this case, return 1, otherwise, return 0. * */ static int pi_pre_block(struct kvm_vcpu *vcpu) { unsigned long flags; unsigned int dest; struct pi_desc old, new; struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return 0; vcpu->pre_pcpu = vcpu->cpu; spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_add_tail(&vcpu->blocked_vcpu_list, &per_cpu(blocked_vcpu_on_cpu, vcpu->pre_pcpu)); spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); do { old.control = new.control = pi_desc->control; /* * We should not block the vCPU if * an interrupt is posted for it. */ if (pi_test_on(pi_desc) == 1) { spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_del(&vcpu->blocked_vcpu_list); spin_unlock_irqrestore( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); vcpu->pre_pcpu = -1; return 1; } WARN((pi_desc->sn == 1), "Warning: SN field of posted-interrupts " "is set before blocking\n"); /* * Since vCPU can be preempted during this process, * vcpu->cpu could be different with pre_pcpu, we * need to set pre_pcpu as the destination of wakeup * notification event, then we can find the right vCPU * to wakeup in wakeup handler if interrupts happen * when the vCPU is in blocked state. */ dest = cpu_physical_id(vcpu->pre_pcpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; /* set 'NV' to 'wakeup vector' */ new.nv = POSTED_INTR_WAKEUP_VECTOR; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); return 0; } static int vmx_pre_block(struct kvm_vcpu *vcpu) { if (pi_pre_block(vcpu)) return 1; if (kvm_lapic_hv_timer_in_use(vcpu)) kvm_lapic_switch_to_sw_timer(vcpu); return 0; } static void pi_post_block(struct kvm_vcpu *vcpu) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); struct pi_desc old, new; unsigned int dest; unsigned long flags; if (!kvm_arch_has_assigned_device(vcpu->kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(vcpu)) return; do { old.control = new.control = pi_desc->control; dest = cpu_physical_id(vcpu->cpu); if (x2apic_enabled()) new.ndst = dest; else new.ndst = (dest << 8) & 0xFF00; /* Allow posting non-urgent interrupts */ new.sn = 0; /* set 'NV' to 'notification vector' */ new.nv = POSTED_INTR_VECTOR; } while (cmpxchg(&pi_desc->control, old.control, new.control) != old.control); if(vcpu->pre_pcpu != -1) { spin_lock_irqsave( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); list_del(&vcpu->blocked_vcpu_list); spin_unlock_irqrestore( &per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu), flags); vcpu->pre_pcpu = -1; } } static void vmx_post_block(struct kvm_vcpu *vcpu) { if (kvm_x86_ops->set_hv_timer) kvm_lapic_switch_to_hv_timer(vcpu); pi_post_block(vcpu); } /* * vmx_update_pi_irte - set IRTE for Posted-Interrupts * * @kvm: kvm * @host_irq: host irq of the interrupt * @guest_irq: gsi of the interrupt * @set: set or unset PI * returns 0 on success, < 0 on failure */ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP) || !kvm_vcpu_apicv_active(kvm->vcpus[0])) return 0; idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); if (guest_irq >= irq_rt->nr_rt_entries || hlist_empty(&irq_rt->map[guest_irq])) { pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", guest_irq, irq_rt->nr_rt_entries); goto out; } hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { if (e->type != KVM_IRQ_ROUTING_MSI) continue; /* * VT-d PI cannot support posting multicast/broadcast * interrupts to a vCPU, we still use interrupt remapping * for these kind of interrupts. * * For lowest-priority interrupts, we only support * those with single CPU as the destination, e.g. user * configures the interrupts via /proc/irq or uses * irqbalance to make the interrupts single-CPU. * * We will support full lowest-priority interrupt later. */ kvm_set_msi_irq(kvm, e, &irq); if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) { /* * Make sure the IRTE is in remapped mode if * we don't handle it in posted mode. */ ret = irq_set_vcpu_affinity(host_irq, NULL); if (ret < 0) { printk(KERN_INFO "failed to back to remapped mode, irq: %u\n", host_irq); goto out; } continue; } vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); else { /* suppress notification event before unposting */ pi_set_sn(vcpu_to_pi_desc(vcpu)); ret = irq_set_vcpu_affinity(host_irq, NULL); pi_clear_sn(vcpu_to_pi_desc(vcpu)); } if (ret < 0) { printk(KERN_INFO "%s: failed to update PI IRTE\n", __func__); goto out; } } ret = 0; out: srcu_read_unlock(&kvm->irq_srcu, idx); return ret; } static void vmx_setup_mce(struct kvm_vcpu *vcpu) { if (vcpu->arch.mcg_cap & MCG_LMCE_P) to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= FEATURE_CONTROL_LMCE; else to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= ~FEATURE_CONTROL_LMCE; } static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, .hardware_unsetup = hardware_unsetup, .check_processor_compatibility = vmx_check_processor_compat, .hardware_enable = hardware_enable, .hardware_disable = hardware_disable, .cpu_has_accelerated_tpr = report_flexpriority, .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, .vcpu_create = vmx_create_vcpu, .vcpu_free = vmx_free_vcpu, .vcpu_reset = vmx_vcpu_reset, .prepare_guest_switch = vmx_save_host_state, .vcpu_load = vmx_vcpu_load, .vcpu_put = vmx_vcpu_put, .update_bp_intercept = update_exception_bitmap, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, .get_segment = vmx_get_segment, .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, .decache_cr3 = vmx_decache_cr3, .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, .set_cr0 = vmx_set_cr0, .set_cr3 = vmx_set_cr3, .set_cr4 = vmx_set_cr4, .set_efer = vmx_set_efer, .get_idt = vmx_get_idt, .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, .get_dr6 = vmx_get_dr6, .set_dr6 = vmx_set_dr6, .set_dr7 = vmx_set_dr7, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .cache_reg = vmx_cache_reg, .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, .handle_exit = vmx_handle_exit, .skip_emulated_instruction = skip_emulated_instruction, .set_interrupt_shadow = vmx_set_interrupt_shadow, .get_interrupt_shadow = vmx_get_interrupt_shadow, .patch_hypercall = vmx_patch_hypercall, .set_irq = vmx_inject_irq, .set_nmi = vmx_inject_nmi, .queue_exception = vmx_queue_exception, .cancel_injection = vmx_cancel_injection, .interrupt_allowed = vmx_interrupt_allowed, .nmi_allowed = vmx_nmi_allowed, .get_nmi_mask = vmx_get_nmi_mask, .set_nmi_mask = vmx_set_nmi_mask, .enable_nmi_window = enable_nmi_window, .enable_irq_window = enable_irq_window, .update_cr8_intercept = update_cr8_intercept, .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, .set_apic_access_page_addr = vmx_set_apic_access_page_addr, .get_enable_apicv = vmx_get_enable_apicv, .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl, .load_eoi_exitmap = vmx_load_eoi_exitmap, .apicv_post_state_restore = vmx_apicv_post_state_restore, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, .set_tss_addr = vmx_set_tss_addr, .get_tdp_level = get_ept_level, .get_mt_mask = vmx_get_mt_mask, .get_exit_info = vmx_get_exit_info, .get_lpage_level = vmx_get_lpage_level, .cpuid_update = vmx_cpuid_update, .rdtscp_supported = vmx_rdtscp_supported, .invpcid_supported = vmx_invpcid_supported, .set_supported_cpuid = vmx_set_supported_cpuid, .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, .write_tsc_offset = vmx_write_tsc_offset, .set_tdp_cr3 = vmx_set_cr3, .check_intercept = vmx_check_intercept, .handle_external_intr = vmx_handle_external_intr, .mpx_supported = vmx_mpx_supported, .xsaves_supported = vmx_xsaves_supported, .check_nested_events = vmx_check_nested_events, .sched_in = vmx_sched_in, .slot_enable_log_dirty = vmx_slot_enable_log_dirty, .slot_disable_log_dirty = vmx_slot_disable_log_dirty, .flush_log_dirty = vmx_flush_log_dirty, .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, .write_log_dirty = vmx_write_pml_buffer, .pre_block = vmx_pre_block, .post_block = vmx_post_block, .pmu_ops = &intel_pmu_ops, .update_pi_irte = vmx_update_pi_irte, #ifdef CONFIG_X86_64 .set_hv_timer = vmx_set_hv_timer, .cancel_hv_timer = vmx_cancel_hv_timer, #endif .setup_mce = vmx_setup_mce, }; static int __init vmx_init(void) { int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx), THIS_MODULE); if (r) return r; #ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); #endif return 0; } static void __exit vmx_exit(void) { #ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); #endif kvm_exit(); } module_init(vmx_init) module_exit(vmx_exit)
./CrossVul/dataset_final_sorted/CWE-617/c/good_2524_0
crossvul-cpp_data_good_487_0
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/kdc_util.c - Utility functions for the KDC implementation */ /* * Copyright 1990,1991,2007,2008,2009 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include "kdc_util.h" #include "extern.h" #include <stdio.h> #include <ctype.h> #include <syslog.h> #include <kadm5/admin.h> #include "adm_proto.h" #include "net-server.h" #include <limits.h> #ifdef KRBCONF_VAGUE_ERRORS const int vague_errors = 1; #else const int vague_errors = 0; #endif static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey); static krb5_error_code find_server_key(krb5_context, krb5_db_entry *, krb5_enctype, krb5_kvno, krb5_keyblock **, krb5_kvno *); /* * concatenate first two authdata arrays, returning an allocated replacement. * The replacement should be freed with krb5_free_authdata(). */ krb5_error_code concat_authorization_data(krb5_context context, krb5_authdata **first, krb5_authdata **second, krb5_authdata ***output) { int i, j; krb5_authdata **ptr, **retdata; /* count up the entries */ i = 0; if (first) for (ptr = first; *ptr; ptr++) i++; if (second) for (ptr = second; *ptr; ptr++) i++; retdata = (krb5_authdata **)malloc((i+1)*sizeof(*retdata)); if (!retdata) return ENOMEM; retdata[i] = 0; /* null-terminated array */ for (i = 0, j = 0, ptr = first; j < 2 ; ptr = second, j++) while (ptr && *ptr) { /* now walk & copy */ retdata[i] = (krb5_authdata *)malloc(sizeof(*retdata[i])); if (!retdata[i]) { krb5_free_authdata(context, retdata); return ENOMEM; } *retdata[i] = **ptr; if (!(retdata[i]->contents = (krb5_octet *)malloc(retdata[i]->length))) { free(retdata[i]); retdata[i] = 0; krb5_free_authdata(context, retdata); return ENOMEM; } memcpy(retdata[i]->contents, (*ptr)->contents, retdata[i]->length); ptr++; i++; } *output = retdata; return 0; } krb5_boolean is_local_principal(kdc_realm_t *kdc_active_realm, krb5_const_principal princ1) { return krb5_realm_compare(kdc_context, princ1, tgs_server); } /* * Returns TRUE if the kerberos principal is the name of a Kerberos ticket * service. */ krb5_boolean krb5_is_tgs_principal(krb5_const_principal principal) { if (krb5_princ_size(kdc_context, principal) != 2) return FALSE; if (data_eq_string(*krb5_princ_component(kdc_context, principal, 0), KRB5_TGS_NAME)) return TRUE; else return FALSE; } /* Returns TRUE if principal is the name of a cross-realm TGS. */ krb5_boolean is_cross_tgs_principal(krb5_const_principal principal) { if (!krb5_is_tgs_principal(principal)) return FALSE; if (!data_eq(*krb5_princ_component(kdc_context, principal, 1), *krb5_princ_realm(kdc_context, principal))) return TRUE; else return FALSE; } /* * given authentication data (provides seed for checksum), verify checksum * for source data. */ static krb5_error_code comp_cksum(krb5_context kcontext, krb5_data *source, krb5_ticket *ticket, krb5_checksum *his_cksum) { krb5_error_code retval; krb5_boolean valid; if (!krb5_c_valid_cksumtype(his_cksum->checksum_type)) return KRB5KDC_ERR_SUMTYPE_NOSUPP; /* must be collision proof */ if (!krb5_c_is_coll_proof_cksum(his_cksum->checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; /* verify checksum */ if ((retval = krb5_c_verify_checksum(kcontext, ticket->enc_part2->session, KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM, source, his_cksum, &valid))) return(retval); if (!valid) return(KRB5KRB_AP_ERR_BAD_INTEGRITY); return(0); } /* If a header ticket is decrypted, *ticket_out is filled in even on error. */ krb5_error_code kdc_process_tgs_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_fulladdr *from, krb5_data *pkt, krb5_ticket **ticket_out, krb5_db_entry **krbtgt_ptr, krb5_keyblock **tgskey, krb5_keyblock **subkey, krb5_pa_data **pa_tgs_req) { krb5_pa_data * tmppa; krb5_ap_req * apreq; krb5_error_code retval; krb5_authdata **authdata = NULL; krb5_data scratch1; krb5_data * scratch = NULL; krb5_boolean foreign_server = FALSE; krb5_auth_context auth_context = NULL; krb5_authenticator * authenticator = NULL; krb5_checksum * his_cksum = NULL; krb5_db_entry * krbtgt = NULL; krb5_ticket * ticket; *ticket_out = NULL; *krbtgt_ptr = NULL; *tgskey = NULL; tmppa = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_AP_REQ); if (!tmppa) return KRB5KDC_ERR_PADATA_TYPE_NOSUPP; scratch1.length = tmppa->length; scratch1.data = (char *)tmppa->contents; if ((retval = decode_krb5_ap_req(&scratch1, &apreq))) return retval; ticket = apreq->ticket; if (isflagset(apreq->ap_options, AP_OPTS_USE_SESSION_KEY) || isflagset(apreq->ap_options, AP_OPTS_MUTUAL_REQUIRED)) { krb5_klog_syslog(LOG_INFO, _("TGS_REQ: SESSION KEY or MUTUAL")); retval = KRB5KDC_ERR_POLICY; goto cleanup; } /* If the "server" principal in the ticket is not something in the local realm, then we must refuse to service the request if the client claims to be from the local realm. If we don't do this, then some other realm's nasty KDC can claim to be authenticating a client from our realm, and we'll give out tickets concurring with it! we set a flag here for checking below. */ foreign_server = !is_local_principal(kdc_active_realm, apreq->ticket->server); if ((retval = krb5_auth_con_init(kdc_context, &auth_context))) goto cleanup; /* Don't use a replay cache. */ if ((retval = krb5_auth_con_setflags(kdc_context, auth_context, 0))) goto cleanup; if ((retval = krb5_auth_con_setaddrs(kdc_context, auth_context, NULL, from->address)) ) goto cleanup_auth_context; retval = kdc_rd_ap_req(kdc_active_realm, apreq, auth_context, &krbtgt, tgskey); if (retval) goto cleanup_auth_context; /* "invalid flag" tickets can must be used to validate */ if (isflagset(ticket->enc_part2->flags, TKT_FLG_INVALID) && !isflagset(request->kdc_options, KDC_OPT_VALIDATE)) { retval = KRB5KRB_AP_ERR_TKT_INVALID; goto cleanup_auth_context; } if ((retval = krb5_auth_con_getrecvsubkey(kdc_context, auth_context, subkey))) goto cleanup_auth_context; if ((retval = krb5_auth_con_getauthenticator(kdc_context, auth_context, &authenticator))) goto cleanup_auth_context; retval = krb5_find_authdata(kdc_context, ticket->enc_part2->authorization_data, authenticator->authorization_data, KRB5_AUTHDATA_FX_ARMOR, &authdata); if (retval != 0) goto cleanup_authenticator; if (authdata&& authdata[0]) { k5_setmsg(kdc_context, KRB5KDC_ERR_POLICY, "ticket valid only as FAST armor"); retval = KRB5KDC_ERR_POLICY; krb5_free_authdata(kdc_context, authdata); goto cleanup_authenticator; } krb5_free_authdata(kdc_context, authdata); /* Check for a checksum */ if (!(his_cksum = authenticator->checksum)) { retval = KRB5KRB_AP_ERR_INAPP_CKSUM; goto cleanup_authenticator; } /* make sure the client is of proper lineage (see above) */ if (foreign_server && !krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER)) { if (is_local_principal(kdc_active_realm, ticket->enc_part2->client)) { /* someone in a foreign realm claiming to be local */ krb5_klog_syslog(LOG_INFO, _("PROCESS_TGS: failed lineage check")); retval = KRB5KDC_ERR_POLICY; goto cleanup_authenticator; } } /* * Check application checksum vs. tgs request * * We try checksumming the req-body two different ways: first we * try reaching into the raw asn.1 stream (if available), and * checksum that directly; if that fails, then we try encoding * using our local asn.1 library. */ if (pkt && (fetch_asn1_field((unsigned char *) pkt->data, 1, 4, &scratch1) >= 0)) { if (comp_cksum(kdc_context, &scratch1, ticket, his_cksum)) { if (!(retval = encode_krb5_kdc_req_body(request, &scratch))) retval = comp_cksum(kdc_context, scratch, ticket, his_cksum); krb5_free_data(kdc_context, scratch); if (retval) goto cleanup_authenticator; } } *pa_tgs_req = tmppa; *krbtgt_ptr = krbtgt; krbtgt = NULL; cleanup_authenticator: krb5_free_authenticator(kdc_context, authenticator); cleanup_auth_context: krb5_auth_con_free(kdc_context, auth_context); cleanup: if (retval != 0) { krb5_free_keyblock(kdc_context, *tgskey); *tgskey = NULL; } if (apreq->ticket->enc_part2 != NULL) { /* Steal the decrypted ticket pointer, even on error. */ *ticket_out = apreq->ticket; apreq->ticket = NULL; } krb5_free_ap_req(kdc_context, apreq); krb5_db_free_principal(kdc_context, krbtgt); return retval; } /* * This is a KDC wrapper around krb5_rd_req_decoded_anyflag(). * * We can't depend on KDB-as-keytab for handling the AP-REQ here for * optimization reasons: we want to minimize the number of KDB lookups. We'll * need the KDB entry for the TGS principal, and the TGS key used to decrypt * the TGT, elsewhere in the TGS code. * * This function also implements key rollover support for kvno 0 cross-realm * TGTs issued by AD. */ static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey) { krb5_error_code retval; krb5_enctype search_enctype = apreq->ticket->enc_part.enctype; krb5_boolean match_enctype = 1; krb5_kvno kvno; size_t tries = 3; /* * When we issue tickets we use the first key in the principals' highest * kvno keyset. For non-cross-realm krbtgt principals we want to only * allow the use of the first key of the principal's keyset that matches * the given kvno. */ if (krb5_is_tgs_principal(apreq->ticket->server) && !is_cross_tgs_principal(apreq->ticket->server)) { search_enctype = -1; match_enctype = 0; } retval = kdc_get_server_key(kdc_context, apreq->ticket, KRB5_KDB_FLAG_ALIAS_OK, match_enctype, server, NULL, NULL); if (retval) return retval; *tgskey = NULL; kvno = apreq->ticket->enc_part.kvno; do { krb5_free_keyblock(kdc_context, *tgskey); retval = find_server_key(kdc_context, *server, search_enctype, kvno, tgskey, &kvno); if (retval) continue; /* Make the TGS key available to krb5_rd_req_decoded_anyflag() */ retval = krb5_auth_con_setuseruserkey(kdc_context, auth_context, *tgskey); if (retval) return retval; retval = krb5_rd_req_decoded_anyflag(kdc_context, &auth_context, apreq, apreq->ticket->server, kdc_active_realm->realm_keytab, NULL, NULL); /* If the ticket was decrypted, don't try any more keys. */ if (apreq->ticket->enc_part2 != NULL) break; } while (retval && apreq->ticket->enc_part.kvno == 0 && kvno-- > 1 && --tries > 0); return retval; } /* * The KDC should take the keytab associated with the realm and pass * that to the krb5_rd_req_decoded_anyflag(), but we still need to use * the service (TGS, here) key elsewhere. This approach is faster than * the KDB keytab approach too. * * This is also used by do_tgs_req() for u2u auth. */ krb5_error_code kdc_get_server_key(krb5_context context, krb5_ticket *ticket, unsigned int flags, krb5_boolean match_enctype, krb5_db_entry **server_ptr, krb5_keyblock **key, krb5_kvno *kvno) { krb5_error_code retval; krb5_db_entry * server = NULL; krb5_enctype search_enctype = -1; krb5_kvno search_kvno = -1; if (match_enctype) search_enctype = ticket->enc_part.enctype; if (ticket->enc_part.kvno) search_kvno = ticket->enc_part.kvno; *server_ptr = NULL; retval = krb5_db_get_principal(context, ticket->server, flags, &server); if (retval == KRB5_KDB_NOENTRY) { char *sname; if (!krb5_unparse_name(context, ticket->server, &sname)) { limit_string(sname); krb5_klog_syslog(LOG_ERR, _("TGS_REQ: UNKNOWN SERVER: server='%s'"), sname); free(sname); } return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; } else if (retval) return retval; if (server->attributes & KRB5_KDB_DISALLOW_SVR || server->attributes & KRB5_KDB_DISALLOW_ALL_TIX) { retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } if (key) { retval = find_server_key(context, server, search_enctype, search_kvno, key, kvno); if (retval) goto errout; } *server_ptr = server; server = NULL; return 0; errout: krb5_db_free_principal(context, server); return retval; } /* * A utility function to get the right key from a KDB entry. Used in handling * of kvno 0 TGTs, for example. */ static krb5_error_code find_server_key(krb5_context context, krb5_db_entry *server, krb5_enctype enctype, krb5_kvno kvno, krb5_keyblock **key_out, krb5_kvno *kvno_out) { krb5_error_code retval; krb5_key_data * server_key; krb5_keyblock * key; *key_out = NULL; retval = krb5_dbe_find_enctype(context, server, enctype, -1, kvno ? (krb5_int32)kvno : -1, &server_key); if (retval) return retval; if (!server_key) return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; if ((key = (krb5_keyblock *)malloc(sizeof *key)) == NULL) return ENOMEM; retval = krb5_dbe_decrypt_key_data(context, NULL, server_key, key, NULL); if (retval) goto errout; if (enctype != -1) { krb5_boolean similar; retval = krb5_c_enctype_compare(context, enctype, key->enctype, &similar); if (retval) goto errout; if (!similar) { retval = KRB5_KDB_NO_PERMITTED_KEY; goto errout; } key->enctype = enctype; } *key_out = key; key = NULL; if (kvno_out) *kvno_out = server_key->key_data_kvno; errout: krb5_free_keyblock(context, key); return retval; } /* * If candidate is the local TGT for realm, set *alias_out to candidate and * *storage_out to NULL. Otherwise, load the local TGT into *storage_out and * set *alias_out to *storage_out. * * In the future we might generalize this to a small per-request principal * cache. For now, it saves a load operation in the common case where the AS * server or TGS header ticket server is the local TGT. */ krb5_error_code get_local_tgt(krb5_context context, const krb5_data *realm, krb5_db_entry *candidate, krb5_db_entry **alias_out, krb5_db_entry **storage_out) { krb5_error_code ret; krb5_principal princ; krb5_db_entry *tgt; *alias_out = NULL; *storage_out = NULL; ret = krb5_build_principal_ext(context, &princ, realm->length, realm->data, KRB5_TGS_NAME_SIZE, KRB5_TGS_NAME, realm->length, realm->data, 0); if (ret) return ret; if (!krb5_principal_compare(context, candidate->princ, princ)) { ret = krb5_db_get_principal(context, princ, 0, &tgt); if (!ret) *storage_out = *alias_out = tgt; } else { *alias_out = candidate; } krb5_free_principal(context, princ); return ret; } /* This probably wants to be updated if you support last_req stuff */ static krb5_last_req_entry nolrentry = { KV5M_LAST_REQ_ENTRY, KRB5_LRQ_NONE, 0 }; static krb5_last_req_entry *nolrarray[] = { &nolrentry, 0 }; krb5_error_code fetch_last_req_info(krb5_db_entry *dbentry, krb5_last_req_entry ***lrentry) { *lrentry = nolrarray; return 0; } /* XXX! This is a temporary place-holder */ krb5_error_code check_hot_list(krb5_ticket *ticket) { return 0; } /* Convert an API error code to a protocol error code. */ int errcode_to_protocol(krb5_error_code code) { int protcode; protcode = code - ERROR_TABLE_BASE_krb5; return (protcode >= 0 && protcode <= 128) ? protcode : KRB_ERR_GENERIC; } /* Return -1 if the AS or TGS request is disallowed due to KDC policy on * anonymous tickets. */ int check_anon(kdc_realm_t *kdc_active_realm, krb5_principal client, krb5_principal server) { /* If restrict_anon is set, reject requests from anonymous to principals * other than the local TGT. */ if (kdc_active_realm->realm_restrict_anon && krb5_principal_compare_any_realm(kdc_context, client, krb5_anonymous_principal()) && !krb5_principal_compare(kdc_context, server, tgs_server)) return -1; return 0; } /* * Routines that validate a AS request; checks a lot of things. :-) * * Returns a Kerberos protocol error number, which is _not_ the same * as a com_err error number! */ #define AS_INVALID_OPTIONS (KDC_OPT_FORWARDED | KDC_OPT_PROXY | \ KDC_OPT_VALIDATE | KDC_OPT_RENEW | \ KDC_OPT_ENC_TKT_IN_SKEY | KDC_OPT_CNAME_IN_ADDL_TKT) int validate_as_request(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_db_entry client, krb5_db_entry server, krb5_timestamp kdc_time, const char **status, krb5_pa_data ***e_data) { krb5_error_code ret; /* * If an option is set that is only allowed in TGS requests, complain. */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KDC_ERR_BADOPTION; } /* The client must not be expired */ if (client.expiration && ts_after(kdc_time, client.expiration)) { *status = "CLIENT EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_NAME_EXP); } /* The client's password must not be expired, unless the server is a KRB5_KDC_PWCHANGE_SERVICE. */ if (client.pw_expiration && ts_after(kdc_time, client.pw_expiration) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "CLIENT KEY EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_KEY_EXP); } /* The server must not be expired */ if (server.expiration && ts_after(kdc_time, server.expiration)) { *status = "SERVICE EXPIRED"; return(KDC_ERR_SERVICE_EXP); } /* * If the client requires password changing, then only allow the * pwchange service. */ if (isflagset(client.attributes, KRB5_KDB_REQUIRES_PWCHANGE) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "REQUIRED PWCHANGE"; return(KDC_ERR_KEY_EXP); } /* Client and server must allow postdating tickets */ if ((isflagset(request->kdc_options, KDC_OPT_ALLOW_POSTDATE) || isflagset(request->kdc_options, KDC_OPT_POSTDATED)) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_POSTDATED) || isflagset(server.attributes, KRB5_KDB_DISALLOW_POSTDATED))) { *status = "POSTDATE NOT ALLOWED"; return(KDC_ERR_CANNOT_POSTDATE); } /* Check to see if client is locked out */ if (isflagset(client.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "CLIENT LOCKED OUT"; return(KDC_ERR_CLIENT_REVOKED); } /* Check to see if server is locked out */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "SERVICE LOCKED OUT"; return(KDC_ERR_S_PRINCIPAL_UNKNOWN); } /* Check to see if server is allowed to be a service */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_SVR)) { *status = "SERVICE NOT ALLOWED"; return(KDC_ERR_MUST_USE_USER2USER); } if (check_anon(kdc_active_realm, client.princ, request->server) != 0) { *status = "ANONYMOUS NOT ALLOWED"; return(KDC_ERR_POLICY); } /* Perform KDB module policy checks. */ ret = krb5_db_check_policy_as(kdc_context, request, &client, &server, kdc_time, status, e_data); if (ret && ret != KRB5_PLUGIN_OP_NOTSUPP) return errcode_to_protocol(ret); return 0; } /* * Compute ticket flags based on the request, the client and server DB entry * (which may prohibit forwardable or proxiable tickets), and the header * ticket. client may be NULL for a TGS request (although it may be set, such * as for an S4U2Self request). header_enc may be NULL for an AS request. */ krb5_flags get_ticket_flags(krb5_flags reqflags, krb5_db_entry *client, krb5_db_entry *server, krb5_enc_tkt_part *header_enc) { krb5_flags flags; /* Indicate support for encrypted padata (RFC 6806), and set flags based on * request options and the header ticket. */ flags = OPTS2FLAGS(reqflags) | TKT_FLG_ENC_PA_REP; if (reqflags & KDC_OPT_POSTDATED) flags |= TKT_FLG_INVALID; if (header_enc != NULL) flags |= COPY_TKT_FLAGS(header_enc->flags); if (header_enc == NULL) flags |= TKT_FLG_INITIAL; /* For TGS requests, indicate if the service is marked ok-as-delegate. */ if (header_enc != NULL && (server->attributes & KRB5_KDB_OK_AS_DELEGATE)) flags |= TKT_FLG_OK_AS_DELEGATE; /* Unset PROXIABLE if it is disallowed. */ if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_PROXIABLE)) flags &= ~TKT_FLG_PROXIABLE; if (server->attributes & KRB5_KDB_DISALLOW_PROXIABLE) flags &= ~TKT_FLG_PROXIABLE; if (header_enc != NULL && !(header_enc->flags & TKT_FLG_PROXIABLE)) flags &= ~TKT_FLG_PROXIABLE; /* Unset FORWARDABLE if it is disallowed. */ if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_FORWARDABLE)) flags &= ~TKT_FLG_FORWARDABLE; if (server->attributes & KRB5_KDB_DISALLOW_FORWARDABLE) flags &= ~TKT_FLG_FORWARDABLE; if (header_enc != NULL && !(header_enc->flags & TKT_FLG_FORWARDABLE)) flags &= ~TKT_FLG_FORWARDABLE; /* We don't currently handle issuing anonymous tickets based on * non-anonymous ones. */ if (header_enc != NULL && !(header_enc->flags & TKT_FLG_ANONYMOUS)) flags &= ~TKT_FLG_ANONYMOUS; return flags; } /* Return KRB5KDC_ERR_POLICY if indicators does not contain the required auth * indicators for server, ENOMEM on allocation error, 0 otherwise. */ krb5_error_code check_indicators(krb5_context context, krb5_db_entry *server, krb5_data *const *indicators) { krb5_error_code ret; char *str = NULL, *copy = NULL, *save, *ind; ret = krb5_dbe_get_string(context, server, KRB5_KDB_SK_REQUIRE_AUTH, &str); if (ret || str == NULL) goto cleanup; copy = strdup(str); if (copy == NULL) { ret = ENOMEM; goto cleanup; } /* Look for any of the space-separated strings in indicators. */ ind = strtok_r(copy, " ", &save); while (ind != NULL) { if (authind_contains(indicators, ind)) goto cleanup; ind = strtok_r(NULL, " ", &save); } ret = KRB5KDC_ERR_POLICY; k5_setmsg(context, ret, _("Required auth indicators not present in ticket: %s"), str); cleanup: krb5_dbe_free_string(context, str); free(copy); return ret; } #define ASN1_ID_CLASS (0xc0) #define ASN1_ID_TYPE (0x20) #define ASN1_ID_TAG (0x1f) #define ASN1_CLASS_UNIV (0) #define ASN1_CLASS_APP (1) #define ASN1_CLASS_CTX (2) #define ASN1_CLASS_PRIV (3) #define asn1_id_constructed(x) (x & ASN1_ID_TYPE) #define asn1_id_primitive(x) (!asn1_id_constructed(x)) #define asn1_id_class(x) ((x & ASN1_ID_CLASS) >> 6) #define asn1_id_tag(x) (x & ASN1_ID_TAG) /* * asn1length - return encoded length of value. * * passed a pointer into the asn.1 stream, which is updated * to point right after the length bits. * * returns -1 on failure. */ static int asn1length(unsigned char **astream) { int length; /* resulting length */ int sublen; /* sublengths */ int blen; /* bytes of length */ unsigned char *p; /* substring searching */ if (**astream & 0x80) { blen = **astream & 0x7f; if (blen > 3) { return(-1); } for (++*astream, length = 0; blen; ++*astream, blen--) { length = (length << 8) | **astream; } if (length == 0) { /* indefinite length, figure out by hand */ p = *astream; p++; while (1) { /* compute value length. */ if ((sublen = asn1length(&p)) < 0) { return(-1); } p += sublen; /* check for termination */ if ((!*p++) && (!*p)) { p++; break; } } length = p - *astream; } } else { length = **astream; ++*astream; } return(length); } /* * fetch_asn1_field - return raw asn.1 stream of subfield. * * this routine is passed a context-dependent tag number and "level" and returns * the size and length of the corresponding level subfield. * * levels and are numbered starting from 1. * * returns 0 on success, -1 otherwise. */ int fetch_asn1_field(unsigned char *astream, unsigned int level, unsigned int field, krb5_data *data) { unsigned char *estream; /* end of stream */ int classes; /* # classes seen so far this level */ unsigned int levels = 0; /* levels seen so far */ int lastlevel = 1000; /* last level seen */ int length; /* various lengths */ int tag; /* tag number */ unsigned char savelen; /* saved length of our field */ classes = -1; /* we assume that the first identifier/length will tell us how long the entire stream is. */ astream++; estream = astream; if ((length = asn1length(&astream)) < 0) { return(-1); } estream += length; /* search down the stream, checking identifiers. we process identifiers until we hit the "level" we want, and then process that level for our subfield, always making sure we don't go off the end of the stream. */ while (astream < estream) { if (!asn1_id_constructed(*astream)) { return(-1); } if (asn1_id_class(*astream) == ASN1_CLASS_CTX) { if ((tag = (int)asn1_id_tag(*astream)) <= lastlevel) { levels++; classes = -1; } lastlevel = tag; if (levels == level) { /* in our context-dependent class, is this the one we're looking for ? */ if (tag == (int)field) { /* return length and data */ astream++; savelen = *astream; if ((length = asn1length(&astream)) < 0) { return(-1); } data->length = length; /* if the field length is indefinite, we will have to subtract two (terminating octets) from the length returned since we don't want to pass any info from the "wrapper" back. asn1length will always return the *total* length of the field, not just what's contained in it */ if ((savelen & 0xff) == 0x80) { data->length -=2 ; } data->data = (char *)astream; return(0); } else if (tag <= classes) { /* we've seen this class before, something must be wrong */ return(-1); } else { classes = tag; } } } /* if we're not on our level yet, process this value. otherwise skip over it */ astream++; if ((length = asn1length(&astream)) < 0) { return(-1); } if (levels == level) { astream += length; } } return(-1); } /* Return true if we believe server can support enctype as a session key. */ static krb5_boolean dbentry_supports_enctype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, krb5_enctype enctype) { krb5_error_code retval; krb5_key_data *datap; char *etypes_str = NULL; krb5_enctype default_enctypes[1] = { 0 }; krb5_enctype *etypes = NULL; krb5_boolean in_list; /* Look up the supported session key enctypes list in the KDB. */ retval = krb5_dbe_get_string(kdc_context, server, KRB5_KDB_SK_SESSION_ENCTYPES, &etypes_str); if (retval == 0 && etypes_str != NULL && *etypes_str != '\0') { /* Pass a fake profile key for tracing of unrecognized tokens. */ retval = krb5int_parse_enctype_list(kdc_context, "KDB-session_etypes", etypes_str, default_enctypes, &etypes); if (retval == 0 && etypes != NULL && etypes[0]) { in_list = k5_etypes_contains(etypes, enctype); free(etypes_str); free(etypes); return in_list; } /* Fall through on error or empty list */ } free(etypes_str); free(etypes); /* If configured to, assume every server without a session_enctypes * attribute supports DES_CBC_CRC. */ if (kdc_active_realm->realm_assume_des_crc_sess && enctype == ENCTYPE_DES_CBC_CRC) return TRUE; /* Due to an ancient interop problem, assume nothing supports des-cbc-md5 * unless there's a session_enctypes explicitly saying that it does. */ if (enctype == ENCTYPE_DES_CBC_MD5) return FALSE; /* Assume the server supports any enctype it has a long-term key for. */ return !krb5_dbe_find_enctype(kdc_context, server, enctype, -1, 0, &datap); } /* * This function returns the keytype which should be selected for the * session key. It is based on the ordered list which the user * requested, and what the KDC and the application server can support. */ krb5_enctype select_session_keytype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, int nktypes, krb5_enctype *ktype) { int i; for (i = 0; i < nktypes; i++) { if (!krb5_c_valid_enctype(ktype[i])) continue; if (!krb5_is_permitted_enctype(kdc_context, ktype[i])) continue; if (dbentry_supports_enctype(kdc_active_realm, server, ktype[i])) return ktype[i]; } return 0; } /* * Limit strings to a "reasonable" length to prevent crowding out of * other useful information in the log entry */ #define NAME_LENGTH_LIMIT 128 void limit_string(char *name) { int i; if (!name) return; if (strlen(name) < NAME_LENGTH_LIMIT) return; i = NAME_LENGTH_LIMIT-4; name[i++] = '.'; name[i++] = '.'; name[i++] = '.'; name[i] = '\0'; return; } /* * L10_2 = log10(2**x), rounded up; log10(2) ~= 0.301. */ #define L10_2(x) ((int)(((x * 301) + 999) / 1000)) /* * Max length of sprintf("%ld") for an int of type T; includes leading * minus sign and terminating NUL. */ #define D_LEN(t) (L10_2(sizeof(t) * CHAR_BIT) + 2) void ktypes2str(char *s, size_t len, int nktypes, krb5_enctype *ktype) { int i; char stmp[D_LEN(krb5_enctype) + 1]; char *p; if (nktypes < 0 || len < (sizeof(" etypes {...}") + D_LEN(int))) { *s = '\0'; return; } snprintf(s, len, "%d etypes {", nktypes); for (i = 0; i < nktypes; i++) { snprintf(stmp, sizeof(stmp), "%s%ld", i ? " " : "", (long)ktype[i]); if (strlen(s) + strlen(stmp) + sizeof("}") > len) break; strlcat(s, stmp, len); } if (i < nktypes) { /* * We broke out of the loop. Try to truncate the list. */ p = s + strlen(s); while (p - s + sizeof("...}") > len) { while (p > s && *p != ' ' && *p != '{') *p-- = '\0'; if (p > s && *p == ' ') { *p-- = '\0'; continue; } } strlcat(s, "...", len); } strlcat(s, "}", len); return; } void rep_etypes2str(char *s, size_t len, krb5_kdc_rep *rep) { char stmp[sizeof("ses=") + D_LEN(krb5_enctype)]; if (len < (3 * D_LEN(krb5_enctype) + sizeof("etypes {rep= tkt= ses=}"))) { *s = '\0'; return; } snprintf(s, len, "etypes {rep=%ld", (long)rep->enc_part.enctype); if (rep->ticket != NULL) { snprintf(stmp, sizeof(stmp), " tkt=%ld", (long)rep->ticket->enc_part.enctype); strlcat(s, stmp, len); } if (rep->ticket != NULL && rep->ticket->enc_part2 != NULL && rep->ticket->enc_part2->session != NULL) { snprintf(stmp, sizeof(stmp), " ses=%ld", (long)rep->ticket->enc_part2->session->enctype); strlcat(s, stmp, len); } strlcat(s, "}", len); return; } static krb5_error_code verify_for_user_checksum(krb5_context context, krb5_keyblock *key, krb5_pa_for_user *req) { krb5_error_code code; int i; krb5_int32 name_type; char *p; krb5_data data; krb5_boolean valid = FALSE; if (!krb5_c_is_keyed_cksum(req->cksum.checksum_type)) { return KRB5KRB_AP_ERR_INAPP_CKSUM; } /* * Checksum is over name type and string components of * client principal name and auth_package. */ data.length = 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { data.length += krb5_princ_component(context, req->user, i)->length; } data.length += krb5_princ_realm(context, req->user)->length; data.length += req->auth_package.length; p = data.data = malloc(data.length); if (data.data == NULL) { return ENOMEM; } name_type = krb5_princ_type(context, req->user); p[0] = (name_type >> 0 ) & 0xFF; p[1] = (name_type >> 8 ) & 0xFF; p[2] = (name_type >> 16) & 0xFF; p[3] = (name_type >> 24) & 0xFF; p += 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { if (krb5_princ_component(context, req->user, i)->length > 0) { memcpy(p, krb5_princ_component(context, req->user, i)->data, krb5_princ_component(context, req->user, i)->length); } p += krb5_princ_component(context, req->user, i)->length; } if (krb5_princ_realm(context, req->user)->length > 0) { memcpy(p, krb5_princ_realm(context, req->user)->data, krb5_princ_realm(context, req->user)->length); } p += krb5_princ_realm(context, req->user)->length; if (req->auth_package.length > 0) memcpy(p, req->auth_package.data, req->auth_package.length); p += req->auth_package.length; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_APP_DATA_CKSUM, &data, &req->cksum, &valid); if (code == 0 && valid == FALSE) code = KRB5KRB_AP_ERR_MODIFIED; free(data.data); return code; } /* * Legacy protocol transition (Windows 2003 and above) */ static krb5_error_code kdc_process_for_user(kdc_realm_t *kdc_active_realm, krb5_pa_data *pa_data, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_pa_for_user *for_user; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_for_user(&req_data, &for_user); if (code) { *status = "DECODE_PA_FOR_USER"; return code; } code = verify_for_user_checksum(kdc_context, tgs_session, for_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_for_user(kdc_context, for_user); return code; } *s4u_x509_user = calloc(1, sizeof(krb5_pa_s4u_x509_user)); if (*s4u_x509_user == NULL) { krb5_free_pa_for_user(kdc_context, for_user); return ENOMEM; } (*s4u_x509_user)->user_id.user = for_user->user; for_user->user = NULL; krb5_free_pa_for_user(kdc_context, for_user); return 0; } static krb5_error_code verify_s4u_x509_user_checksum(krb5_context context, krb5_keyblock *key, krb5_data *req_data, krb5_int32 kdc_req_nonce, krb5_pa_s4u_x509_user *req) { krb5_error_code code; krb5_data scratch; krb5_boolean valid = FALSE; if (enctype_requires_etype_info_2(key->enctype) && !krb5_c_is_keyed_cksum(req->cksum.checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; if (req->user_id.nonce != kdc_req_nonce) return KRB5KRB_AP_ERR_MODIFIED; /* * Verify checksum over the encoded userid. If that fails, * re-encode, and verify that. This is similar to the * behaviour in kdc_process_tgs_req(). */ if (fetch_asn1_field((unsigned char *)req_data->data, 1, 0, &scratch) < 0) return ASN1_PARSE_ERROR; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, &scratch, &req->cksum, &valid); if (code != 0) return code; if (valid == FALSE) { krb5_data *data; code = encode_krb5_s4u_userid(&req->user_id, &data); if (code != 0) return code; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data, &req->cksum, &valid); krb5_free_data(context, data); if (code != 0) return code; } return valid ? 0 : KRB5KRB_AP_ERR_MODIFIED; } /* * New protocol transition request (Windows 2008 and above) */ static krb5_error_code kdc_process_s4u_x509_user(krb5_context context, krb5_kdc_req *request, krb5_pa_data *pa_data, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_s4u_x509_user(&req_data, s4u_x509_user); if (code) { *status = "DECODE_PA_S4U_X509_USER"; return code; } code = verify_s4u_x509_user_checksum(context, tgs_subkey ? tgs_subkey : tgs_session, &req_data, request->nonce, *s4u_x509_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return code; } if (krb5_princ_size(context, (*s4u_x509_user)->user_id.user) == 0 || (*s4u_x509_user)->user_id.subject_cert.length != 0) { *status = "INVALID_S4U2SELF_REQUEST"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } return 0; } krb5_error_code kdc_make_s4u2self_rep(krb5_context context, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user *req_s4u_user, krb5_kdc_rep *reply, krb5_enc_kdc_rep_part *reply_encpart) { krb5_error_code code; krb5_data *der_user_id = NULL, *der_s4u_x509_user = NULL; krb5_pa_s4u_x509_user rep_s4u_user; krb5_pa_data *pa; krb5_enctype enctype; krb5_keyusage usage; memset(&rep_s4u_user, 0, sizeof(rep_s4u_user)); rep_s4u_user.user_id.nonce = req_s4u_user->user_id.nonce; rep_s4u_user.user_id.user = req_s4u_user->user_id.user; rep_s4u_user.user_id.options = req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE; code = encode_krb5_s4u_userid(&rep_s4u_user.user_id, &der_user_id); if (code != 0) goto cleanup; if (req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY; else usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST; code = krb5_c_make_checksum(context, req_s4u_user->cksum.checksum_type, tgs_subkey != NULL ? tgs_subkey : tgs_session, usage, der_user_id, &rep_s4u_user.cksum); if (code != 0) goto cleanup; code = encode_krb5_pa_s4u_x509_user(&rep_s4u_user, &der_s4u_x509_user); if (code != 0) goto cleanup; /* Add a padata element, stealing memory from der_s4u_x509_user. */ code = alloc_pa_data(KRB5_PADATA_S4U_X509_USER, 0, &pa); if (code != 0) goto cleanup; pa->length = der_s4u_x509_user->length; pa->contents = (uint8_t *)der_s4u_x509_user->data; der_s4u_x509_user->data = NULL; /* add_pa_data_element() claims pa on success or failure. */ code = add_pa_data_element(&reply->padata, pa); if (code != 0) goto cleanup; if (tgs_subkey != NULL) enctype = tgs_subkey->enctype; else enctype = tgs_session->enctype; /* * Owing to a bug in Windows, unkeyed checksums were used for older * enctypes, including rc4-hmac. A forthcoming workaround for this * includes the checksum bytes in the encrypted padata. */ if ((req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) && enctype_requires_etype_info_2(enctype) == FALSE) { code = alloc_pa_data(KRB5_PADATA_S4U_X509_USER, req_s4u_user->cksum.length + rep_s4u_user.cksum.length, &pa); if (code != 0) goto cleanup; memcpy(pa->contents, req_s4u_user->cksum.contents, req_s4u_user->cksum.length); memcpy(&pa->contents[req_s4u_user->cksum.length], rep_s4u_user.cksum.contents, rep_s4u_user.cksum.length); /* add_pa_data_element() claims pa on success or failure. */ code = add_pa_data_element(&reply_encpart->enc_padata, pa); if (code != 0) goto cleanup; } cleanup: if (rep_s4u_user.cksum.contents != NULL) krb5_free_checksum_contents(context, &rep_s4u_user.cksum); krb5_free_data(context, der_user_id); krb5_free_data(context, der_s4u_x509_user); return code; } /* * Protocol transition (S4U2Self) */ krb5_error_code kdc_process_s4u2self_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_const_principal client_princ, krb5_const_principal header_srv_princ, krb5_boolean issuing_referral, const krb5_db_entry *server, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_timestamp kdc_time, krb5_pa_s4u_x509_user **s4u_x509_user, krb5_db_entry **princ_ptr, const char **status) { krb5_error_code code; krb5_boolean is_local_tgt; krb5_pa_data *pa_data; int flags; krb5_db_entry *princ; *princ_ptr = NULL; pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_S4U_X509_USER); if (pa_data != NULL) { code = kdc_process_s4u_x509_user(kdc_context, request, pa_data, tgs_subkey, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else { pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER); if (pa_data != NULL) { code = kdc_process_for_user(kdc_active_realm, pa_data, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else return 0; } /* * We need to compare the client name in the TGT with the requested * server name. Supporting server name aliases without assuming a * global name service makes this difficult to do. * * The comparison below handles the following cases (note that the * term "principal name" below excludes the realm). * * (1) The requested service is a host-based service with two name * components, in which case we assume the principal name to * contain sufficient qualifying information. The realm is * ignored for the purpose of comparison. * * (2) The requested service name is an enterprise principal name: * the service principal name is compared with the unparsed * form of the client name (including its realm). * * (3) The requested service is some other name type: an exact * match is required. * * An alternative would be to look up the server once again with * FLAG_CANONICALIZE | FLAG_CLIENT_REFERRALS_ONLY set, do an exact * match between the returned name and client_princ. However, this * assumes that the client set FLAG_CANONICALIZE when requesting * the TGT and that we have a global name service. */ flags = 0; switch (krb5_princ_type(kdc_context, request->server)) { case KRB5_NT_SRV_HST: /* (1) */ if (krb5_princ_size(kdc_context, request->server) == 2) flags |= KRB5_PRINCIPAL_COMPARE_IGNORE_REALM; break; case KRB5_NT_ENTERPRISE_PRINCIPAL: /* (2) */ flags |= KRB5_PRINCIPAL_COMPARE_ENTERPRISE; break; default: /* (3) */ break; } if (!krb5_principal_compare_flags(kdc_context, request->server, client_princ, flags)) { *status = "INVALID_S4U2SELF_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error code */ } /* * Protocol transition is mutually exclusive with renew/forward/etc * as well as user-to-user and constrained delegation. This check * is also made in validate_as_request(). * * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* * Valid S4U2Self requests can occur in the following combinations: * * (1) local TGT, local user, local server * (2) cross TGT, local user, issuing referral * (3) cross TGT, non-local user, issuing referral * (4) cross TGT, non-local user, local server * * The first case is for a single-realm S4U2Self scenario; the second, * third, and fourth cases are for the initial, intermediate (if any), and * final cross-realm requests in a multi-realm scenario. */ is_local_tgt = !is_cross_tgs_principal(header_srv_princ); if (is_local_tgt && issuing_referral) { /* The requesting server appears to no longer exist, and we found * a referral instead. Treat this as a server lookup failure. */ *status = "LOOKING_UP_SERVER"; return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; } /* * Do not attempt to lookup principals in foreign realms. */ if (is_local_principal(kdc_active_realm, (*s4u_x509_user)->user_id.user)) { krb5_db_entry no_server; krb5_pa_data **e_data = NULL; if (!is_local_tgt && !issuing_referral) { /* A local server should not need a cross-realm TGT to impersonate * a local principal. */ *status = "NOT_CROSS_REALM_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error */ } code = krb5_db_get_principal(kdc_context, (*s4u_x509_user)->user_id.user, KRB5_KDB_FLAG_INCLUDE_PAC, &princ); if (code == KRB5_KDB_NOENTRY) { *status = "UNKNOWN_S4U2SELF_PRINCIPAL"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } else if (code) { *status = "LOOKING_UP_S4U2SELF_PRINCIPAL"; return code; /* caller can free for_user */ } memset(&no_server, 0, sizeof(no_server)); /* Ignore password expiration and needchange attributes (as Windows * does), since S4U2Self is not password authentication. */ princ->pw_expiration = 0; clear(princ->attributes, KRB5_KDB_REQUIRES_PWCHANGE); code = validate_as_request(kdc_active_realm, request, *princ, no_server, kdc_time, status, &e_data); if (code) { krb5_db_free_principal(kdc_context, princ); krb5_free_pa_data(kdc_context, e_data); return code; } *princ_ptr = princ; } else if (is_local_tgt) { /* * The server is asking to impersonate a principal from another realm, * using a local TGT. It should instead ask that principal's realm and * follow referrals back to us. */ *status = "S4U2SELF_CLIENT_NOT_OURS"; return KRB5KDC_ERR_POLICY; /* match Windows error */ } return 0; } static krb5_error_code check_allowed_to_delegate_to(krb5_context context, krb5_const_principal client, const krb5_db_entry *server, krb5_const_principal proxy) { /* Can't get a TGT (otherwise it would be unconstrained delegation) */ if (krb5_is_tgs_principal(proxy)) return KRB5KDC_ERR_POLICY; /* Must be in same realm */ if (!krb5_realm_compare(context, server->princ, proxy)) return KRB5KDC_ERR_POLICY; return krb5_db_check_allowed_to_delegate(context, client, server, proxy); } krb5_error_code kdc_process_s4u2proxy_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_enc_tkt_part *t2enc, const krb5_db_entry *server, krb5_const_principal server_princ, krb5_const_principal proxy_princ, const char **status) { krb5_error_code errcode; /* * Constrained delegation is mutually exclusive with renew/forward/etc. * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & (NON_TGT_OPTION | KDC_OPT_ENC_TKT_IN_SKEY)) { *status = "INVALID_S4U2PROXY_OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* Ensure that evidence ticket server matches TGT client */ if (!krb5_principal_compare(kdc_context, server->princ, /* after canon */ server_princ)) { *status = "EVIDENCE_TICKET_MISMATCH"; return KRB5KDC_ERR_SERVER_NOMATCH; } if (!isflagset(t2enc->flags, TKT_FLG_FORWARDABLE)) { *status = "EVIDENCE_TKT_NOT_FORWARDABLE"; return KRB5_TKT_NOT_FORWARDABLE; } /* Backend policy check */ errcode = check_allowed_to_delegate_to(kdc_context, t2enc->client, server, proxy_princ); if (errcode) { *status = "NOT_ALLOWED_TO_DELEGATE"; return errcode; } return 0; } krb5_error_code kdc_check_transited_list(kdc_realm_t *kdc_active_realm, const krb5_data *trans, const krb5_data *realm1, const krb5_data *realm2) { krb5_error_code code; /* Check against the KDB module. Treat this answer as authoritative if the * method is supported and doesn't explicitly pass control. */ code = krb5_db_check_transited_realms(kdc_context, trans, realm1, realm2); if (code != KRB5_PLUGIN_OP_NOTSUPP && code != KRB5_PLUGIN_NO_HANDLE) return code; /* Check using krb5.conf [capaths] or hierarchical relationships. */ return krb5_check_transited_list(kdc_context, trans, realm1, realm2); } krb5_error_code validate_transit_path(krb5_context context, krb5_const_principal client, krb5_db_entry *server, krb5_db_entry *header_srv) { /* Incoming */ if (isflagset(server->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE)) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } /* Outgoing */ if (isflagset(header_srv->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE) && (!krb5_principal_compare(context, server->princ, header_srv->princ) || !krb5_realm_compare(context, client, header_srv->princ))) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } return 0; } krb5_boolean enctype_requires_etype_info_2(krb5_enctype enctype) { switch(enctype) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: case ENCTYPE_DES3_CBC_SHA1: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC_EXP : return 0; default: return krb5_c_valid_enctype(enctype); } } /* Allocate a pa-data entry with an uninitialized buffer of size len. */ krb5_error_code alloc_pa_data(krb5_preauthtype pa_type, size_t len, krb5_pa_data **out) { krb5_pa_data *pa; uint8_t *buf = NULL; *out = NULL; if (len > 0) { buf = malloc(len); if (buf == NULL) return ENOMEM; } pa = malloc(sizeof(*pa)); if (pa == NULL) { free(buf); return ENOMEM; } pa->magic = KV5M_PA_DATA; pa->pa_type = pa_type; pa->length = len; pa->contents = buf; *out = pa; return 0; } /* Add pa to list, claiming its memory. Free pa on failure. */ krb5_error_code add_pa_data_element(krb5_pa_data ***list, krb5_pa_data *pa) { size_t count; krb5_pa_data **newlist; for (count = 0; *list != NULL && (*list)[count] != NULL; count++); newlist = realloc(*list, (count + 2) * sizeof(*newlist)); if (newlist == NULL) { free(pa->contents); free(pa); return ENOMEM; } newlist[count] = pa; newlist[count + 1] = NULL; *list = newlist; return 0; } void kdc_get_ticket_endtime(kdc_realm_t *kdc_active_realm, krb5_timestamp starttime, krb5_timestamp endtime, krb5_timestamp till, krb5_db_entry *client, krb5_db_entry *server, krb5_timestamp *out_endtime) { krb5_timestamp until; krb5_deltat life; if (till == 0) till = kdc_infinity; until = ts_min(till, endtime); /* Determine the requested lifetime, capped at the maximum valid time * interval. */ life = ts_delta(until, starttime); if (ts_after(until, starttime) && life < 0) life = INT32_MAX; if (client != NULL && client->max_life != 0) life = min(life, client->max_life); if (server->max_life != 0) life = min(life, server->max_life); if (kdc_active_realm->realm_maxlife != 0) life = min(life, kdc_active_realm->realm_maxlife); *out_endtime = ts_incr(starttime, life); } /* * Set tkt->renew_till to the requested renewable lifetime as modified by * policy. Set the TKT_FLG_RENEWABLE flag if we set a nonzero renew_till. * client and tgt may be NULL. */ void kdc_get_ticket_renewtime(kdc_realm_t *realm, krb5_kdc_req *request, krb5_enc_tkt_part *tgt, krb5_db_entry *client, krb5_db_entry *server, krb5_enc_tkt_part *tkt) { krb5_timestamp rtime, max_rlife; clear(tkt->flags, TKT_FLG_RENEWABLE); tkt->times.renew_till = 0; /* Don't issue renewable tickets if the client or server don't allow it, * or if this is a TGS request and the TGT isn't renewable. */ if (server->attributes & KRB5_KDB_DISALLOW_RENEWABLE) return; if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_RENEWABLE)) return; if (tgt != NULL && !(tgt->flags & TKT_FLG_RENEWABLE)) return; /* Determine the requested renewable time. */ if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE)) rtime = request->rtime ? request->rtime : kdc_infinity; else if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE_OK) && ts_after(request->till, tkt->times.endtime)) rtime = request->till; else return; /* Truncate it to the allowable renewable time. */ if (tgt != NULL) rtime = ts_min(rtime, tgt->times.renew_till); max_rlife = min(server->max_renewable_life, realm->realm_maxrlife); if (client != NULL) max_rlife = min(max_rlife, client->max_renewable_life); rtime = ts_min(rtime, ts_incr(tkt->times.starttime, max_rlife)); /* If the client only specified renewable-ok, don't issue a renewable * ticket unless the truncated renew time exceeds the ticket end time. */ if (!isflagset(request->kdc_options, KDC_OPT_RENEWABLE) && !ts_after(rtime, tkt->times.endtime)) return; setflag(tkt->flags, TKT_FLG_RENEWABLE); tkt->times.renew_till = rtime; } /** * Handle protected negotiation of FAST using enc_padata * - If ENCPADATA_REQ_ENC_PA_REP is present, then: * - Return ENCPADATA_REQ_ENC_PA_REP with checksum of AS-REQ from client * - Include PADATA_FX_FAST in the enc_padata to indicate FAST * @pre @c out_enc_padata has space for at least two more padata * @param index in/out index into @c out_enc_padata for next item */ krb5_error_code kdc_handle_protected_negotiation(krb5_context context, krb5_data *req_pkt, krb5_kdc_req *request, const krb5_keyblock *reply_key, krb5_pa_data ***out_enc_padata) { krb5_error_code retval = 0; krb5_checksum checksum; krb5_data *der_cksum = NULL; krb5_pa_data *pa, *pa_in; memset(&checksum, 0, sizeof(checksum)); pa_in = krb5int_find_pa_data(context, request->padata, KRB5_ENCPADATA_REQ_ENC_PA_REP); if (pa_in == NULL) return 0; /* Compute and encode a checksum over the AS-REQ. */ retval = krb5_c_make_checksum(context, 0, reply_key, KRB5_KEYUSAGE_AS_REQ, req_pkt, &checksum); if (retval != 0) goto cleanup; retval = encode_krb5_checksum(&checksum, &der_cksum); if (retval != 0) goto cleanup; /* Add a pa-data element to the list, stealing memory from der_cksum. */ retval = alloc_pa_data(KRB5_ENCPADATA_REQ_ENC_PA_REP, 0, &pa); if (retval) goto cleanup; pa->length = der_cksum->length; pa->contents = (uint8_t *)der_cksum->data; der_cksum->data = NULL; /* add_pa_data_element() claims pa on success or failure. */ retval = add_pa_data_element(out_enc_padata, pa); if (retval) goto cleanup; /* Add a zero-length PA-FX-FAST element to the list. */ retval = alloc_pa_data(KRB5_PADATA_FX_FAST, 0, &pa); if (retval) goto cleanup; /* add_pa_data_element() claims pa on success or failure. */ retval = add_pa_data_element(out_enc_padata, pa); cleanup: krb5_free_checksum_contents(context, &checksum); krb5_free_data(context, der_cksum); return retval; } /* * Although the KDC doesn't call this function directly, * process_tcp_connection_read() in net-server.c does call it. */ krb5_error_code make_toolong_error (void *handle, krb5_data **out) { krb5_error errpkt; krb5_error_code retval; krb5_data *scratch; struct server_handle *h = handle; retval = krb5_us_timeofday(h->kdc_err_context, &errpkt.stime, &errpkt.susec); if (retval) return retval; errpkt.error = KRB_ERR_FIELD_TOOLONG; errpkt.server = h->kdc_realmlist[0]->realm_tgsprinc; errpkt.client = NULL; errpkt.cusec = 0; errpkt.ctime = 0; errpkt.text.length = 0; errpkt.text.data = 0; errpkt.e_data.length = 0; errpkt.e_data.data = 0; scratch = malloc(sizeof(*scratch)); if (scratch == NULL) return ENOMEM; retval = krb5_mk_error(h->kdc_err_context, &errpkt, scratch); if (retval) { free(scratch); return retval; } *out = scratch; return 0; } void reset_for_hangup(void *ctx) { int k; struct server_handle *h = ctx; for (k = 0; k < h->kdc_numrealms; k++) krb5_db_refresh_config(h->kdc_realmlist[k]->realm_context); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_487_0
crossvul-cpp_data_bad_219_0
/* * Error resilience / concealment * * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Error resilience / concealment. */ #include <limits.h> #include "libavutil/internal.h" #include "avcodec.h" #include "error_resilience.h" #include "me_cmp.h" #include "mpegutils.h" #include "mpegvideo.h" #include "rectangle.h" #include "thread.h" #include "version.h" /** * @param stride the number of MVs to get to the next row * @param mv_step the number of MVs per row or column in a macroblock */ static void set_mv_strides(ERContext *s, ptrdiff_t *mv_step, ptrdiff_t *stride) { if (s->avctx->codec_id == AV_CODEC_ID_H264) { av_assert0(s->quarter_sample); *mv_step = 4; *stride = s->mb_width * 4; } else { *mv_step = 2; *stride = s->b8_stride; } } /** * Replace the current MB with a flat dc-only version. */ static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y) { int *linesize = s->cur_pic.f->linesize; int dc, dcu, dcv, y, i; for (i = 0; i < 4; i++) { dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride]; if (dc < 0) dc = 0; else if (dc > 2040) dc = 2040; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8; } } dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride]; dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride]; if (dcu < 0) dcu = 0; else if (dcu > 2040) dcu = 2040; if (dcv < 0) dcv = 0; else if (dcv > 2040) dcv = 2040; if (dest_cr) for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) { dest_cb[x + y * linesize[1]] = dcu / 8; dest_cr[x + y * linesize[2]] = dcv / 8; } } } static void filter181(int16_t *data, int width, int height, ptrdiff_t stride) { int x, y; /* horizontal filter */ for (y = 1; y < height - 1; y++) { int prev_dc = data[0 + y * stride]; for (x = 1; x < width - 1; x++) { int dc; dc = -prev_dc + data[x + y * stride] * 8 - data[x + 1 + y * stride]; dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16; prev_dc = data[x + y * stride]; data[x + y * stride] = dc; } } /* vertical filter */ for (x = 1; x < width - 1; x++) { int prev_dc = data[x]; for (y = 1; y < height - 1; y++) { int dc; dc = -prev_dc + data[x + y * stride] * 8 - data[x + (y + 1) * stride]; dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16; prev_dc = data[x + y * stride]; data[x + y * stride] = dc; } } } /** * guess the dc of blocks which do not have an undamaged dc * @param w width in 8 pixel blocks * @param h height in 8 pixel blocks */ static void guess_dc(ERContext *s, int16_t *dc, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4); uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4); if(!col || !dist) { av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n"); goto fail; } for(b_y=0; b_y<h; b_y++){ int color= 1024; int distance= -1; for(b_x=0; b_x<w; b_x++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][1]= color; dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999; } color= 1024; distance= -1; for(b_x=w-1; b_x>=0; b_x--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_x; } col [b_x + b_y*stride][0]= color; dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999; } } for(b_x=0; b_x<w; b_x++){ int color= 1024; int distance= -1; for(b_y=0; b_y<h; b_y++){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][3]= color; dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999; } color= 1024; distance= -1; for(b_y=h-1; b_y>=0; b_y--){ int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride; int error_j= s->error_status_table[mb_index_j]; int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]); if(intra_j==0 || !(error_j&ER_DC_ERROR)){ color= dc[b_x + b_y*stride]; distance= b_y; } col [b_x + b_y*stride][2]= color; dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999; } } for (b_y = 0; b_y < h; b_y++) { for (b_x = 0; b_x < w; b_x++) { int mb_index, error, j; int64_t guess, weight_sum; mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride; error = s->error_status_table[mb_index]; if (IS_INTER(s->cur_pic.mb_type[mb_index])) continue; // inter if (!(error & ER_DC_ERROR)) continue; // dc-ok weight_sum = 0; guess = 0; for (j = 0; j < 4; j++) { int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1); guess += weight*(int64_t)col[b_x + b_y*stride][j]; weight_sum += weight; } guess = (guess + weight_sum / 2) / weight_sum; dc[b_x + b_y * stride] = guess; } } fail: av_freep(&col); av_freep(&dist); } /** * simple horizontal deblocking filter used for error resilience * @param w width in 8 pixel blocks * @param h height in 8 pixel blocks */ static void h_block_filter(ERContext *s, uint8_t *dst, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; ptrdiff_t mvx_stride, mvy_stride; const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; set_mv_strides(s, &mvx_stride, &mvy_stride); mvx_stride >>= is_luma; mvy_stride *= mvx_stride; for (b_y = 0; b_y < h; b_y++) { for (b_x = 0; b_x < w - 1; b_x++) { int y; int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]); int left_damage = left_status & ER_MB_ERROR; int right_damage = right_status & ER_MB_ERROR; int offset = b_x * 8 + b_y * stride * 8; int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)]; if (!(left_damage || right_damage)) continue; // both undamaged if ((!left_intra) && (!right_intra) && FFABS(left_mv[0] - right_mv[0]) + FFABS(left_mv[1] + right_mv[1]) < 2) continue; for (y = 0; y < 8; y++) { int a, b, c, d; a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride]; b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride]; c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride]; d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1); d = FFMAX(d, 0); if (b < 0) d = -d; if (d == 0) continue; if (!(left_damage && right_damage)) d = d * 16 / 9; if (left_damage) { dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)]; dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)]; dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)]; dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)]; } if (right_damage) { dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)]; dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)]; dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)]; dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)]; } } } } } /** * simple vertical deblocking filter used for error resilience * @param w width in 8 pixel blocks * @param h height in 8 pixel blocks */ static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h, ptrdiff_t stride, int is_luma) { int b_x, b_y; ptrdiff_t mvx_stride, mvy_stride; const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; set_mv_strides(s, &mvx_stride, &mvy_stride); mvx_stride >>= is_luma; mvy_stride *= mvx_stride; for (b_y = 0; b_y < h - 1; b_y++) { for (b_x = 0; b_x < w; b_x++) { int x; int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]; int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]; int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]); int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]); int top_damage = top_status & ER_MB_ERROR; int bottom_damage = bottom_status & ER_MB_ERROR; int offset = b_x * 8 + b_y * stride * 8; int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x]; int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x]; if (!(top_damage || bottom_damage)) continue; // both undamaged if ((!top_intra) && (!bottom_intra) && FFABS(top_mv[0] - bottom_mv[0]) + FFABS(top_mv[1] + bottom_mv[1]) < 2) continue; for (x = 0; x < 8; x++) { int a, b, c, d; a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride]; b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride]; c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride]; d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1); d = FFMAX(d, 0); if (b < 0) d = -d; if (d == 0) continue; if (!(top_damage && bottom_damage)) d = d * 16 / 9; if (top_damage) { dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)]; dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)]; dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)]; dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)]; } if (bottom_damage) { dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)]; dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)]; dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)]; dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)]; } } } } } #define MV_FROZEN 8 #define MV_CHANGED 4 #define MV_UNCHANGED 2 #define MV_LISTED 1 static av_always_inline void add_blocklist(int (*blocklist)[2], int *blocklist_length, uint8_t *fixed, int mb_x, int mb_y, int mb_xy) { if (fixed[mb_xy]) return; fixed[mb_xy] = MV_LISTED; blocklist[ *blocklist_length ][0] = mb_x; blocklist[(*blocklist_length)++][1] = mb_y; } static void guess_mv(ERContext *s) { int (*blocklist)[2], (*next_blocklist)[2]; uint8_t *fixed; const ptrdiff_t mb_stride = s->mb_stride; const int mb_width = s->mb_width; int mb_height = s->mb_height; int i, depth, num_avail; int mb_x, mb_y; ptrdiff_t mot_step, mot_stride; int blocklist_length, next_blocklist_length; if (s->last_pic.f && s->last_pic.f->data[0]) mb_height = FFMIN(mb_height, (s->last_pic.f->height+15)>>4); if (s->next_pic.f && s->next_pic.f->data[0]) mb_height = FFMIN(mb_height, (s->next_pic.f->height+15)>>4); blocklist = (int (*)[2])s->er_temp_buffer; next_blocklist = blocklist + s->mb_stride * s->mb_height; fixed = (uint8_t *)(next_blocklist + s->mb_stride * s->mb_height); set_mv_strides(s, &mot_step, &mot_stride); num_avail = 0; if (s->last_pic.motion_val[0]) ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0); for (i = 0; i < mb_width * mb_height; i++) { const int mb_xy = s->mb_index2xy[i]; int f = 0; int error = s->error_status_table[mb_xy]; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) f = MV_FROZEN; // intra // FIXME check if (!(error & ER_MV_ERROR)) f = MV_FROZEN; // inter with undamaged MV fixed[mb_xy] = f; if (f == MV_FROZEN) num_avail++; else if(s->last_pic.f->data[0] && s->last_pic.motion_val[0]){ const int mb_y= mb_xy / s->mb_stride; const int mb_x= mb_xy % s->mb_stride; const int mot_index= (mb_x + mb_y*mot_stride) * mot_step; s->cur_pic.motion_val[0][mot_index][0]= s->last_pic.motion_val[0][mot_index][0]; s->cur_pic.motion_val[0][mot_index][1]= s->last_pic.motion_val[0][mot_index][1]; s->cur_pic.ref_index[0][4*mb_xy] = s->last_pic.ref_index[0][4*mb_xy]; } } if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width / 2) { for (mb_y = 0; mb_y < mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) continue; if (!(s->error_status_table[mb_xy] & ER_MV_ERROR)) continue; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); } } return; } blocklist_length = 0; for (mb_y = 0; mb_y < mb_height; mb_y++) { for (mb_x = 0; mb_x < mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * mb_stride; if (fixed[mb_xy] == MV_FROZEN) { if (mb_x) add_blocklist(blocklist, &blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1); if (mb_y) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride); if (mb_x+1 < mb_width) add_blocklist(blocklist, &blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1); if (mb_y+1 < mb_height) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride); } } } for (depth = 0; ; depth++) { int changed, pass, none_left; int blocklist_index; none_left = 1; changed = 1; for (pass = 0; (changed || pass < 2) && pass < 10; pass++) { int score_sum = 0; changed = 0; for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) { const int mb_x = blocklist[blocklist_index][0]; const int mb_y = blocklist[blocklist_index][1]; const int mb_xy = mb_x + mb_y * mb_stride; int mv_predictor[8][2]; int ref[8]; int pred_count; int j; int best_score; int best_pred; int mot_index; int prev_x, prev_y, prev_ref; if ((mb_x ^ mb_y ^ pass) & 1) continue; av_assert2(fixed[mb_xy] != MV_FROZEN); av_assert1(!IS_INTRA(s->cur_pic.mb_type[mb_xy])); av_assert1(s->last_pic.f && s->last_pic.f->data[0]); j = 0; if (mb_x > 0) j |= fixed[mb_xy - 1]; if (mb_x + 1 < mb_width) j |= fixed[mb_xy + 1]; if (mb_y > 0) j |= fixed[mb_xy - mb_stride]; if (mb_y + 1 < mb_height) j |= fixed[mb_xy + mb_stride]; av_assert2(j & MV_FROZEN); if (!(j & MV_CHANGED) && pass > 1) continue; none_left = 0; pred_count = 0; mot_index = (mb_x + mb_y * mot_stride) * mot_step; if (mb_x > 0 && fixed[mb_xy - 1] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - 1)]; pred_count++; } if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index + mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + 1)]; pred_count++; } if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)]; pred_count++; } if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride] > 1) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)]; pred_count++; } if (pred_count == 0) continue; if (pred_count > 1) { int sum_x = 0, sum_y = 0, sum_r = 0; int max_x, max_y, min_x, min_y, max_r, min_r; for (j = 0; j < pred_count; j++) { sum_x += mv_predictor[j][0]; sum_y += mv_predictor[j][1]; sum_r += ref[j]; if (j && ref[j] != ref[j - 1]) goto skip_mean_and_median; } /* mean */ mv_predictor[pred_count][0] = sum_x / j; mv_predictor[pred_count][1] = sum_y / j; ref[pred_count] = sum_r / j; /* median */ if (pred_count >= 3) { min_y = min_x = min_r = 99999; max_y = max_x = max_r = -99999; } else { min_x = min_y = max_x = max_y = min_r = max_r = 0; } for (j = 0; j < pred_count; j++) { max_x = FFMAX(max_x, mv_predictor[j][0]); max_y = FFMAX(max_y, mv_predictor[j][1]); max_r = FFMAX(max_r, ref[j]); min_x = FFMIN(min_x, mv_predictor[j][0]); min_y = FFMIN(min_y, mv_predictor[j][1]); min_r = FFMIN(min_r, ref[j]); } mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x; mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y; ref[pred_count + 1] = sum_r - max_r - min_r; if (pred_count == 4) { mv_predictor[pred_count + 1][0] /= 2; mv_predictor[pred_count + 1][1] /= 2; ref[pred_count + 1] /= 2; } pred_count += 2; } skip_mean_and_median: /* zero MV */ mv_predictor[pred_count][0] = mv_predictor[pred_count][1] = ref[pred_count] = 0; pred_count++; prev_x = s->cur_pic.motion_val[0][mot_index][0]; prev_y = s->cur_pic.motion_val[0][mot_index][1]; prev_ref = s->cur_pic.ref_index[0][4 * mb_xy]; /* last MV */ mv_predictor[pred_count][0] = prev_x; mv_predictor[pred_count][1] = prev_y; ref[pred_count] = prev_ref; pred_count++; best_pred = 0; best_score = 256 * 256 * 256 * 64; for (j = 0; j < pred_count; j++) { int *linesize = s->cur_pic.f->linesize; int score = 0; uint8_t *src = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; s->cur_pic.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0]; s->cur_pic.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1]; // predictor intra or otherwise not available if (ref[j] < 0) continue; s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (mb_x > 0 && fixed[mb_xy - 1] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] - 1] - src[k * linesize[0]]); } if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] + 15] - src[k * linesize[0] + 16]); } if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k - linesize[0]] - src[k]); } if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] > 1) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k + linesize[0] * 15] - src[k + linesize[0] * 16]); } if (score <= best_score) { // <= will favor the last MV best_score = score; best_pred = j; } } score_sum += best_score; s->mv[0][0][0] = mv_predictor[best_pred][0]; s->mv[0][0][1] = mv_predictor[best_pred][1]; for (i = 0; i < mot_step; i++) for (j = 0; j < mot_step; j++) { s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; } s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) { fixed[mb_xy] = MV_CHANGED; changed++; } else fixed[mb_xy] = MV_UNCHANGED; } } if (none_left) return; next_blocklist_length = 0; for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) { const int mb_x = blocklist[blocklist_index][0]; const int mb_y = blocklist[blocklist_index][1]; const int mb_xy = mb_x + mb_y * mb_stride; if (fixed[mb_xy] & (MV_CHANGED|MV_UNCHANGED|MV_FROZEN)) { fixed[mb_xy] = MV_FROZEN; if (mb_x > 0) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1); if (mb_y > 0) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride); if (mb_x + 1 < mb_width) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1); if (mb_y + 1 < mb_height) add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride); } } av_assert0(next_blocklist_length <= mb_height * mb_width); FFSWAP(int , blocklist_length, next_blocklist_length); FFSWAP(void*, blocklist, next_blocklist); } } static int is_intra_more_likely(ERContext *s) { int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y; if (!s->last_pic.f || !s->last_pic.f->data[0]) return 1; // no previous frame available -> use spatial prediction if (s->avctx->error_concealment & FF_EC_FAVOR_INTER) return 0; undamaged_count = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; const int error = s->error_status_table[mb_xy]; if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR))) undamaged_count++; } if (undamaged_count < 5) return 0; // almost all MBs damaged -> use temporal prediction // prevent dsp.sad() check, that requires access to the image if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb && s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) return 1; skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs is_intra_likely = 0; j = 0; for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int error; const int mb_xy = mb_x + mb_y * s->mb_stride; error = s->error_status_table[mb_xy]; if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR)) continue; // skip damaged j++; // skip a few to speed things up if ((j % skip_amount) != 0) continue; if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) { int *linesize = s->cur_pic.f->linesize; uint8_t *mb_ptr = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; uint8_t *last_mb_ptr = s->last_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; if (s->avctx->codec_id == AV_CODEC_ID_H264) { // FIXME } else { ff_thread_await_progress(s->last_pic.tf, mb_y, 0); } is_intra_likely += s->mecc.sad[0](NULL, last_mb_ptr, mb_ptr, linesize[0], 16); // FIXME need await_progress() here is_intra_likely -= s->mecc.sad[0](NULL, last_mb_ptr, last_mb_ptr + linesize[0] * 16, linesize[0], 16); } else { if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) is_intra_likely++; else is_intra_likely--; } } } // av_log(NULL, AV_LOG_ERROR, "is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type); return is_intra_likely > 0; } void ff_er_frame_start(ERContext *s) { if (!s->avctx->error_concealment) return; if (!s->mecc_inited) { ff_me_cmp_init(&s->mecc, s->avctx); s->mecc_inited = 1; } memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END, s->mb_stride * s->mb_height * sizeof(uint8_t)); atomic_init(&s->error_count, 3 * s->mb_num); s->error_occurred = 0; } static int er_supported(ERContext *s) { if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice || !s->cur_pic.f || s->cur_pic.field_picture || s->avctx->profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO ) return 0; return 1; } /** * Add a slice. * @param endx x component of the last macroblock, can be -1 * for the last of the previous line * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is * assumed that no earlier end or error of the same type occurred */ void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status) { const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1); const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num); const int start_xy = s->mb_index2xy[start_i]; const int end_xy = s->mb_index2xy[end_i]; int mask = -1; if (s->avctx->hwaccel && s->avctx->hwaccel->decode_slice) return; if (start_i > end_i || start_xy > end_xy) { av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n"); return; } if (!s->avctx->error_concealment) return; mask &= ~VP_START; if (status & (ER_AC_ERROR | ER_AC_END)) { mask &= ~(ER_AC_ERROR | ER_AC_END); atomic_fetch_add(&s->error_count, start_i - end_i - 1); } if (status & (ER_DC_ERROR | ER_DC_END)) { mask &= ~(ER_DC_ERROR | ER_DC_END); atomic_fetch_add(&s->error_count, start_i - end_i - 1); } if (status & (ER_MV_ERROR | ER_MV_END)) { mask &= ~(ER_MV_ERROR | ER_MV_END); atomic_fetch_add(&s->error_count, start_i - end_i - 1); } if (status & ER_MB_ERROR) { s->error_occurred = 1; atomic_store(&s->error_count, INT_MAX); } if (mask == ~0x7F) { memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t)); } else { int i; for (i = start_xy; i < end_xy; i++) s->error_status_table[i] &= mask; } if (end_i == s->mb_num) atomic_store(&s->error_count, INT_MAX); else { s->error_status_table[end_xy] &= mask; s->error_status_table[end_xy] |= status; } s->error_status_table[start_xy] |= VP_START; if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) && er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) { int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]]; prev_status &= ~ VP_START; if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) { s->error_occurred = 1; atomic_store(&s->error_count, INT_MAX); } } } void ff_er_frame_end(ERContext *s) { int *linesize = NULL; int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error; int distance; int threshold_part[4] = { 100, 100, 100 }; int threshold = 50; int is_intra_likely; int size = s->b8_stride * 2 * s->mb_height; /* We do not support ER of field pictures yet, * though it should not crash if enabled. */ if (!s->avctx->error_concealment || !atomic_load(&s->error_count) || s->avctx->lowres || !er_supported(s) || atomic_load(&s->error_count) == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom)) { return; } linesize = s->cur_pic.f->linesize; for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int status = s->error_status_table[mb_x + (s->mb_height - 1) * s->mb_stride]; if (status != 0x7F) break; } if ( mb_x == s->mb_width && s->avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && (FFALIGN(s->avctx->height, 16)&16) && atomic_load(&s->error_count) == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom + 1) ) { av_log(s->avctx, AV_LOG_DEBUG, "ignoring last missing slice\n"); return; } if (s->last_pic.f) { if (s->last_pic.f->width != s->cur_pic.f->width || s->last_pic.f->height != s->cur_pic.f->height || s->last_pic.f->format != s->cur_pic.f->format) { av_log(s->avctx, AV_LOG_WARNING, "Cannot use previous picture in error concealment\n"); memset(&s->last_pic, 0, sizeof(s->last_pic)); } } if (s->next_pic.f) { if (s->next_pic.f->width != s->cur_pic.f->width || s->next_pic.f->height != s->cur_pic.f->height || s->next_pic.f->format != s->cur_pic.f->format) { av_log(s->avctx, AV_LOG_WARNING, "Cannot use next picture in error concealment\n"); memset(&s->next_pic, 0, sizeof(s->next_pic)); } } if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) { av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); for (i = 0; i < 2; i++) { s->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); s->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t)); if (!s->ref_index_buf[i] || !s->motion_val_buf[i]) break; s->cur_pic.ref_index[i] = s->ref_index_buf[i]->data; s->cur_pic.motion_val[i] = (int16_t (*)[2])s->motion_val_buf[i]->data + 4; } if (i < 2) { for (i = 0; i < 2; i++) { av_buffer_unref(&s->ref_index_buf[i]); av_buffer_unref(&s->motion_val_buf[i]); s->cur_pic.ref_index[i] = NULL; s->cur_pic.motion_val[i] = NULL; } return; } } if (s->avctx->debug & FF_DEBUG_ER) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int status = s->error_status_table[mb_x + mb_y * s->mb_stride]; av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); } } #if 1 /* handle overlapping slices */ for (error_type = 1; error_type <= 3; error_type++) { int end_ok = 0; for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & (1 << error_type)) end_ok = 1; if (error & (8 << error_type)) end_ok = 1; if (!end_ok) s->error_status_table[mb_xy] |= 1 << error_type; if (error & VP_START) end_ok = 0; } } #endif #if 1 /* handle slices with partitions of different length */ if (s->partitioned_frame) { int end_ok = 0; for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & ER_AC_END) end_ok = 0; if ((error & ER_MV_END) || (error & ER_DC_END) || (error & ER_AC_ERROR)) end_ok = 1; if (!end_ok) s->error_status_table[mb_xy]|= ER_AC_ERROR; if (error & VP_START) end_ok = 0; } } #endif /* handle missing slices */ if (s->avctx->err_recognition & AV_EF_EXPLODE) { int end_ok = 1; // FIXME + 100 hack for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) { const int mb_xy = s->mb_index2xy[i]; int error1 = s->error_status_table[mb_xy]; int error2 = s->error_status_table[s->mb_index2xy[i + 1]]; if (error1 & VP_START) end_ok = 1; if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) && error1 != (VP_START | ER_MB_ERROR | ER_MB_END) && ((error1 & ER_AC_END) || (error1 & ER_DC_END) || (error1 & ER_MV_END))) { // end & uninit end_ok = 0; } if (!end_ok) s->error_status_table[mb_xy] |= ER_MB_ERROR; } } #if 1 /* backward mark errors */ distance = 9999999; for (error_type = 1; error_type <= 3; error_type++) { for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (!s->mbskip_table || !s->mbskip_table[mb_xy]) // FIXME partition specific distance++; if (error & (1 << error_type)) distance = 0; if (s->partitioned_frame) { if (distance < threshold_part[error_type - 1]) s->error_status_table[mb_xy] |= 1 << error_type; } else { if (distance < threshold) s->error_status_table[mb_xy] |= 1 << error_type; } if (error & VP_START) distance = 9999999; } } #endif /* forward mark errors */ error = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int old_error = s->error_status_table[mb_xy]; if (old_error & VP_START) { error = old_error & ER_MB_ERROR; } else { error |= old_error & ER_MB_ERROR; s->error_status_table[mb_xy] |= error; } } #if 1 /* handle not partitioned case */ if (!s->partitioned_frame) { for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & ER_MB_ERROR) error |= ER_MB_ERROR; s->error_status_table[mb_xy] = error; } } #endif dc_error = ac_error = mv_error = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & ER_DC_ERROR) dc_error++; if (error & ER_AC_ERROR) ac_error++; if (error & ER_MV_ERROR) mv_error++; } av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n", dc_error, ac_error, mv_error, av_get_picture_type_char(s->cur_pic.f->pict_type)); is_intra_likely = is_intra_more_likely(s); /* set unknown mb-type to most likely */ for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR))) continue; if (is_intra_likely) s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4; else s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; } // change inter to intra blocks if no reference frames are available if (!(s->last_pic.f && s->last_pic.f->data[0]) && !(s->next_pic.f && s->next_pic.f->data[0])) for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; if (!IS_INTRA(s->cur_pic.mb_type[mb_xy])) s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4; } /* handle inter blocks with damaged AC */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; const int dir = !(s->last_pic.f && s->last_pic.f->data[0]); const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD; int mv_type; int error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type)) continue; // intra if (error & ER_MV_ERROR) continue; // inter with damaged MV if (!(error & ER_AC_ERROR)) continue; // undamaged inter if (IS_8X8(mb_type)) { int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride; int j; mv_type = MV_TYPE_8X8; for (j = 0; j < 4; j++) { s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; } } else { mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0]; s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1]; } s->decode_mb(s->opaque, 0 /* FIXME H.264 partitioned slices need this set */, mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0); } } /* guess MVs */ if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int xy = mb_x * 2 + mb_y * 2 * s->b8_stride; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; int error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type)) continue; if (!(error & ER_MV_ERROR)) continue; // inter with undamaged MV if (!(error & ER_AC_ERROR)) continue; // undamaged inter if (!(s->last_pic.f && s->last_pic.f->data[0])) mv_dir &= ~MV_DIR_FORWARD; if (!(s->next_pic.f && s->next_pic.f->data[0])) mv_dir &= ~MV_DIR_BACKWARD; if (s->pp_time) { int time_pp = s->pp_time; int time_pb = s->pb_time; av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264); ff_thread_await_progress(s->next_pic.tf, mb_y, 0); s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp; s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp; s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; } else { s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mv[1][0][0] = 0; s->mv[1][0][1] = 0; } s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); } } } else guess_mv(s); /* the filters below manipulate raw image, skip them */ if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) goto ec_clean; /* fill DC for inter blocks */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int dc, dcu, dcv, y, n; int16_t *dc_ptr; uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; // error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type) && s->partitioned_frame) continue; // if (error & ER_MV_ERROR) // continue; // inter data damaged FIXME is this good? dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride]; for (n = 0; n < 4; n++) { dc = 0; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) dc += dest_y[x + (n & 1) * 8 + (y + (n >> 1) * 8) * linesize[0]]; } dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3; } if (!s->cur_pic.f->data[2]) continue; dcu = dcv = 0; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) { dcu += dest_cb[x + y * linesize[1]]; dcv += dest_cr[x + y * linesize[2]]; } } s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3; s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3; } } #if 1 /* guess DC for damaged blocks */ guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1); guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0); guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0); #endif /* filter luma DC */ filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride); #if 1 /* render DC only intra */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic.mb_type[mb_xy]; int error = s->error_status_table[mb_xy]; if (IS_INTER(mb_type)) continue; if (!(error & ER_AC_ERROR)) continue; // undamaged dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; if (!s->cur_pic.f->data[2]) dest_cb = dest_cr = NULL; put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); } } #endif if (s->avctx->error_concealment & FF_EC_DEBLOCK) { /* filter horizontal block boundaries */ h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); /* filter vertical block boundaries */ v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); if (s->cur_pic.f->data[2]) { h_block_filter(s, s->cur_pic.f->data[1], s->mb_width, s->mb_height, linesize[1], 0); h_block_filter(s, s->cur_pic.f->data[2], s->mb_width, s->mb_height, linesize[2], 0); v_block_filter(s, s->cur_pic.f->data[1], s->mb_width, s->mb_height, linesize[1], 0); v_block_filter(s, s->cur_pic.f->data[2], s->mb_width, s->mb_height, linesize[2], 0); } } ec_clean: /* clean a few tables */ for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (s->mbskip_table && s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B && (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) { s->mbskip_table[mb_xy] = 0; } if (s->mbintra_table) s->mbintra_table[mb_xy] = 1; } for (i = 0; i < 2; i++) { av_buffer_unref(&s->ref_index_buf[i]); av_buffer_unref(&s->motion_val_buf[i]); s->cur_pic.ref_index[i] = NULL; s->cur_pic.motion_val[i] = NULL; } memset(&s->cur_pic, 0, sizeof(ERPicture)); memset(&s->last_pic, 0, sizeof(ERPicture)); memset(&s->next_pic, 0, sizeof(ERPicture)); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_219_0
crossvul-cpp_data_good_389_0
/* * Copyright (c) 2008-2017 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include <ctype.h> #include <errno.h> #include <inttypes.h> #include <sys/types.h> #include <netinet/in.h> #include <netinet/icmp6.h> #include <stdlib.h> #include "bitmap.h" #include "bundle.h" #include "byte-order.h" #include "classifier.h" #include "learn.h" #include "multipath.h" #include "netdev.h" #include "nx-match.h" #include "id-pool.h" #include "openflow/netronome-ext.h" #include "openvswitch/dynamic-string.h" #include "openvswitch/meta-flow.h" #include "openvswitch/ofp-actions.h" #include "openvswitch/ofp-errors.h" #include "openvswitch/ofp-msgs.h" #include "openvswitch/ofp-print.h" #include "openvswitch/ofp-prop.h" #include "openvswitch/ofp-util.h" #include "openvswitch/ofpbuf.h" #include "openvswitch/type-props.h" #include "openvswitch/vlog.h" #include "openflow/intel-ext.h" #include "packets.h" #include "random.h" #include "tun-metadata.h" #include "unaligned.h" #include "util.h" #include "uuid.h" VLOG_DEFINE_THIS_MODULE(ofp_util); /* Rate limit for OpenFlow message parse errors. These always indicate a bug * in the peer and so there's not much point in showing a lot of them. */ static struct vlog_rate_limit bad_ofmsg_rl = VLOG_RATE_LIMIT_INIT(1, 5); static enum ofputil_table_vacancy ofputil_decode_table_vacancy( ovs_be32 config, enum ofp_version); static enum ofputil_table_eviction ofputil_decode_table_eviction( ovs_be32 config, enum ofp_version); static ovs_be32 ofputil_encode_table_config(enum ofputil_table_miss, enum ofputil_table_eviction, enum ofputil_table_vacancy, enum ofp_version); /* Given the wildcard bit count in the least-significant 6 of 'wcbits', returns * an IP netmask with a 1 in each bit that must match and a 0 in each bit that * is wildcarded. * * The bits in 'wcbits' are in the format used in enum ofp_flow_wildcards: 0 * is exact match, 1 ignores the LSB, 2 ignores the 2 least-significant bits, * ..., 32 and higher wildcard the entire field. This is the *opposite* of the * usual convention where e.g. /24 indicates that 8 bits (not 24 bits) are * wildcarded. */ ovs_be32 ofputil_wcbits_to_netmask(int wcbits) { wcbits &= 0x3f; return wcbits < 32 ? htonl(~((1u << wcbits) - 1)) : 0; } /* Given the IP netmask 'netmask', returns the number of bits of the IP address * that it wildcards, that is, the number of 0-bits in 'netmask', a number * between 0 and 32 inclusive. * * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will * still be in the valid range but isn't otherwise meaningful. */ int ofputil_netmask_to_wcbits(ovs_be32 netmask) { return 32 - ip_count_cidr_bits(netmask); } /* Converts the OpenFlow 1.0 wildcards in 'ofpfw' (OFPFW10_*) into a * flow_wildcards in 'wc' for use in struct match. It is the caller's * responsibility to handle the special case where the flow match's dl_vlan is * set to OFP_VLAN_NONE. */ void ofputil_wildcard_from_ofpfw10(uint32_t ofpfw, struct flow_wildcards *wc) { BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36); /* Initialize most of wc. */ flow_wildcards_init_catchall(wc); if (!(ofpfw & OFPFW10_IN_PORT)) { wc->masks.in_port.ofp_port = u16_to_ofp(UINT16_MAX); } if (!(ofpfw & OFPFW10_NW_TOS)) { wc->masks.nw_tos |= IP_DSCP_MASK; } if (!(ofpfw & OFPFW10_NW_PROTO)) { wc->masks.nw_proto = UINT8_MAX; } wc->masks.nw_src = ofputil_wcbits_to_netmask(ofpfw >> OFPFW10_NW_SRC_SHIFT); wc->masks.nw_dst = ofputil_wcbits_to_netmask(ofpfw >> OFPFW10_NW_DST_SHIFT); if (!(ofpfw & OFPFW10_TP_SRC)) { wc->masks.tp_src = OVS_BE16_MAX; } if (!(ofpfw & OFPFW10_TP_DST)) { wc->masks.tp_dst = OVS_BE16_MAX; } if (!(ofpfw & OFPFW10_DL_SRC)) { WC_MASK_FIELD(wc, dl_src); } if (!(ofpfw & OFPFW10_DL_DST)) { WC_MASK_FIELD(wc, dl_dst); } if (!(ofpfw & OFPFW10_DL_TYPE)) { wc->masks.dl_type = OVS_BE16_MAX; } /* VLAN TCI mask. */ if (!(ofpfw & OFPFW10_DL_VLAN_PCP)) { wc->masks.vlan_tci |= htons(VLAN_PCP_MASK | VLAN_CFI); } if (!(ofpfw & OFPFW10_DL_VLAN)) { wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI); } } /* Converts the ofp10_match in 'ofmatch' into a struct match in 'match'. */ void ofputil_match_from_ofp10_match(const struct ofp10_match *ofmatch, struct match *match) { uint32_t ofpfw = ntohl(ofmatch->wildcards) & OFPFW10_ALL; /* Initialize match->wc. */ memset(&match->flow, 0, sizeof match->flow); ofputil_wildcard_from_ofpfw10(ofpfw, &match->wc); memset(&match->tun_md, 0, sizeof match->tun_md); /* Initialize most of match->flow. */ match->flow.nw_src = ofmatch->nw_src; match->flow.nw_dst = ofmatch->nw_dst; match->flow.in_port.ofp_port = u16_to_ofp(ntohs(ofmatch->in_port)); match->flow.dl_type = ofputil_dl_type_from_openflow(ofmatch->dl_type); match->flow.tp_src = ofmatch->tp_src; match->flow.tp_dst = ofmatch->tp_dst; match->flow.dl_src = ofmatch->dl_src; match->flow.dl_dst = ofmatch->dl_dst; match->flow.nw_tos = ofmatch->nw_tos & IP_DSCP_MASK; match->flow.nw_proto = ofmatch->nw_proto; /* Translate VLANs. */ if (!(ofpfw & OFPFW10_DL_VLAN) && ofmatch->dl_vlan == htons(OFP10_VLAN_NONE)) { /* Match only packets without 802.1Q header. * * When OFPFW10_DL_VLAN_PCP is wildcarded, this is obviously correct. * * If OFPFW10_DL_VLAN_PCP is matched, the flow match is contradictory, * because we can't have a specific PCP without an 802.1Q header. * However, older versions of OVS treated this as matching packets * withut an 802.1Q header, so we do here too. */ match->flow.vlan_tci = htons(0); match->wc.masks.vlan_tci = htons(0xffff); } else { ovs_be16 vid, pcp, tci; uint16_t hpcp; vid = ofmatch->dl_vlan & htons(VLAN_VID_MASK); hpcp = (ofmatch->dl_vlan_pcp << VLAN_PCP_SHIFT) & VLAN_PCP_MASK; pcp = htons(hpcp); tci = vid | pcp | htons(VLAN_CFI); match->flow.vlan_tci = tci & match->wc.masks.vlan_tci; } /* Clean up. */ match_zero_wildcarded_fields(match); } /* Convert 'match' into the OpenFlow 1.0 match structure 'ofmatch'. */ void ofputil_match_to_ofp10_match(const struct match *match, struct ofp10_match *ofmatch) { const struct flow_wildcards *wc = &match->wc; uint32_t ofpfw; /* Figure out most OpenFlow wildcards. */ ofpfw = 0; if (!wc->masks.in_port.ofp_port) { ofpfw |= OFPFW10_IN_PORT; } if (!wc->masks.dl_type) { ofpfw |= OFPFW10_DL_TYPE; } if (!wc->masks.nw_proto) { ofpfw |= OFPFW10_NW_PROTO; } ofpfw |= (ofputil_netmask_to_wcbits(wc->masks.nw_src) << OFPFW10_NW_SRC_SHIFT); ofpfw |= (ofputil_netmask_to_wcbits(wc->masks.nw_dst) << OFPFW10_NW_DST_SHIFT); if (!(wc->masks.nw_tos & IP_DSCP_MASK)) { ofpfw |= OFPFW10_NW_TOS; } if (!wc->masks.tp_src) { ofpfw |= OFPFW10_TP_SRC; } if (!wc->masks.tp_dst) { ofpfw |= OFPFW10_TP_DST; } if (eth_addr_is_zero(wc->masks.dl_src)) { ofpfw |= OFPFW10_DL_SRC; } if (eth_addr_is_zero(wc->masks.dl_dst)) { ofpfw |= OFPFW10_DL_DST; } /* Translate VLANs. */ ofmatch->dl_vlan = htons(0); ofmatch->dl_vlan_pcp = 0; if (match->wc.masks.vlan_tci == htons(0)) { ofpfw |= OFPFW10_DL_VLAN | OFPFW10_DL_VLAN_PCP; } else if (match->wc.masks.vlan_tci & htons(VLAN_CFI) && !(match->flow.vlan_tci & htons(VLAN_CFI))) { ofmatch->dl_vlan = htons(OFP10_VLAN_NONE); } else { if (!(match->wc.masks.vlan_tci & htons(VLAN_VID_MASK))) { ofpfw |= OFPFW10_DL_VLAN; } else { ofmatch->dl_vlan = htons(vlan_tci_to_vid(match->flow.vlan_tci)); } if (!(match->wc.masks.vlan_tci & htons(VLAN_PCP_MASK))) { ofpfw |= OFPFW10_DL_VLAN_PCP; } else { ofmatch->dl_vlan_pcp = vlan_tci_to_pcp(match->flow.vlan_tci); } } /* Compose most of the match structure. */ ofmatch->wildcards = htonl(ofpfw); ofmatch->in_port = htons(ofp_to_u16(match->flow.in_port.ofp_port)); ofmatch->dl_src = match->flow.dl_src; ofmatch->dl_dst = match->flow.dl_dst; ofmatch->dl_type = ofputil_dl_type_to_openflow(match->flow.dl_type); ofmatch->nw_src = match->flow.nw_src; ofmatch->nw_dst = match->flow.nw_dst; ofmatch->nw_tos = match->flow.nw_tos & IP_DSCP_MASK; ofmatch->nw_proto = match->flow.nw_proto; ofmatch->tp_src = match->flow.tp_src; ofmatch->tp_dst = match->flow.tp_dst; memset(ofmatch->pad1, '\0', sizeof ofmatch->pad1); memset(ofmatch->pad2, '\0', sizeof ofmatch->pad2); } enum ofperr ofputil_pull_ofp11_match(struct ofpbuf *buf, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct match *match, uint16_t *padded_match_len) { struct ofp11_match_header *omh = buf->data; uint16_t match_len; if (buf->size < sizeof *omh) { return OFPERR_OFPBMC_BAD_LEN; } match_len = ntohs(omh->length); switch (ntohs(omh->type)) { case OFPMT_STANDARD: { struct ofp11_match *om; if (match_len != sizeof *om || buf->size < sizeof *om) { return OFPERR_OFPBMC_BAD_LEN; } om = ofpbuf_pull(buf, sizeof *om); if (padded_match_len) { *padded_match_len = match_len; } return ofputil_match_from_ofp11_match(om, match); } case OFPMT_OXM: if (padded_match_len) { *padded_match_len = ROUND_UP(match_len, 8); } return oxm_pull_match(buf, tun_table, vl_mff_map, match); default: return OFPERR_OFPBMC_BAD_TYPE; } } /* Converts the ofp11_match in 'ofmatch' into a struct match in 'match'. * Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_match_from_ofp11_match(const struct ofp11_match *ofmatch, struct match *match) { uint16_t wc = ntohl(ofmatch->wildcards); bool ipv4, arp, rarp; match_init_catchall(match); if (!(wc & OFPFW11_IN_PORT)) { ofp_port_t ofp_port; enum ofperr error; error = ofputil_port_from_ofp11(ofmatch->in_port, &ofp_port); if (error) { return OFPERR_OFPBMC_BAD_VALUE; } match_set_in_port(match, ofp_port); } match_set_dl_src_masked(match, ofmatch->dl_src, eth_addr_invert(ofmatch->dl_src_mask)); match_set_dl_dst_masked(match, ofmatch->dl_dst, eth_addr_invert(ofmatch->dl_dst_mask)); if (!(wc & OFPFW11_DL_VLAN)) { if (ofmatch->dl_vlan == htons(OFPVID11_NONE)) { /* Match only packets without a VLAN tag. */ match->flow.vlan_tci = htons(0); match->wc.masks.vlan_tci = OVS_BE16_MAX; } else { if (ofmatch->dl_vlan == htons(OFPVID11_ANY)) { /* Match any packet with a VLAN tag regardless of VID. */ match->flow.vlan_tci = htons(VLAN_CFI); match->wc.masks.vlan_tci = htons(VLAN_CFI); } else if (ntohs(ofmatch->dl_vlan) < 4096) { /* Match only packets with the specified VLAN VID. */ match->flow.vlan_tci = htons(VLAN_CFI) | ofmatch->dl_vlan; match->wc.masks.vlan_tci = htons(VLAN_CFI | VLAN_VID_MASK); } else { /* Invalid VID. */ return OFPERR_OFPBMC_BAD_VALUE; } if (!(wc & OFPFW11_DL_VLAN_PCP)) { if (ofmatch->dl_vlan_pcp <= 7) { match->flow.vlan_tci |= htons(ofmatch->dl_vlan_pcp << VLAN_PCP_SHIFT); match->wc.masks.vlan_tci |= htons(VLAN_PCP_MASK); } else { /* Invalid PCP. */ return OFPERR_OFPBMC_BAD_VALUE; } } } } if (!(wc & OFPFW11_DL_TYPE)) { match_set_dl_type(match, ofputil_dl_type_from_openflow(ofmatch->dl_type)); } ipv4 = match->flow.dl_type == htons(ETH_TYPE_IP); arp = match->flow.dl_type == htons(ETH_TYPE_ARP); rarp = match->flow.dl_type == htons(ETH_TYPE_RARP); if (ipv4 && !(wc & OFPFW11_NW_TOS)) { if (ofmatch->nw_tos & ~IP_DSCP_MASK) { /* Invalid TOS. */ return OFPERR_OFPBMC_BAD_VALUE; } match_set_nw_dscp(match, ofmatch->nw_tos); } if (ipv4 || arp || rarp) { if (!(wc & OFPFW11_NW_PROTO)) { match_set_nw_proto(match, ofmatch->nw_proto); } match_set_nw_src_masked(match, ofmatch->nw_src, ~ofmatch->nw_src_mask); match_set_nw_dst_masked(match, ofmatch->nw_dst, ~ofmatch->nw_dst_mask); } #define OFPFW11_TP_ALL (OFPFW11_TP_SRC | OFPFW11_TP_DST) if (ipv4 && (wc & OFPFW11_TP_ALL) != OFPFW11_TP_ALL) { switch (match->flow.nw_proto) { case IPPROTO_ICMP: /* "A.2.3 Flow Match Structures" in OF1.1 says: * * The tp_src and tp_dst fields will be ignored unless the * network protocol specified is as TCP, UDP or SCTP. * * but I'm pretty sure we should support ICMP too, otherwise * that's a regression from OF1.0. */ if (!(wc & OFPFW11_TP_SRC)) { uint16_t icmp_type = ntohs(ofmatch->tp_src); if (icmp_type < 0x100) { match_set_icmp_type(match, icmp_type); } else { return OFPERR_OFPBMC_BAD_FIELD; } } if (!(wc & OFPFW11_TP_DST)) { uint16_t icmp_code = ntohs(ofmatch->tp_dst); if (icmp_code < 0x100) { match_set_icmp_code(match, icmp_code); } else { return OFPERR_OFPBMC_BAD_FIELD; } } break; case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: if (!(wc & (OFPFW11_TP_SRC))) { match_set_tp_src(match, ofmatch->tp_src); } if (!(wc & (OFPFW11_TP_DST))) { match_set_tp_dst(match, ofmatch->tp_dst); } break; default: /* OF1.1 says explicitly to ignore this. */ break; } } if (eth_type_mpls(match->flow.dl_type)) { if (!(wc & OFPFW11_MPLS_LABEL)) { match_set_mpls_label(match, 0, ofmatch->mpls_label); } if (!(wc & OFPFW11_MPLS_TC)) { match_set_mpls_tc(match, 0, ofmatch->mpls_tc); } } match_set_metadata_masked(match, ofmatch->metadata, ~ofmatch->metadata_mask); return 0; } /* Convert 'match' into the OpenFlow 1.1 match structure 'ofmatch'. */ void ofputil_match_to_ofp11_match(const struct match *match, struct ofp11_match *ofmatch) { uint32_t wc = 0; memset(ofmatch, 0, sizeof *ofmatch); ofmatch->omh.type = htons(OFPMT_STANDARD); ofmatch->omh.length = htons(OFPMT11_STANDARD_LENGTH); if (!match->wc.masks.in_port.ofp_port) { wc |= OFPFW11_IN_PORT; } else { ofmatch->in_port = ofputil_port_to_ofp11(match->flow.in_port.ofp_port); } ofmatch->dl_src = match->flow.dl_src; ofmatch->dl_src_mask = eth_addr_invert(match->wc.masks.dl_src); ofmatch->dl_dst = match->flow.dl_dst; ofmatch->dl_dst_mask = eth_addr_invert(match->wc.masks.dl_dst); if (match->wc.masks.vlan_tci == htons(0)) { wc |= OFPFW11_DL_VLAN | OFPFW11_DL_VLAN_PCP; } else if (match->wc.masks.vlan_tci & htons(VLAN_CFI) && !(match->flow.vlan_tci & htons(VLAN_CFI))) { ofmatch->dl_vlan = htons(OFPVID11_NONE); wc |= OFPFW11_DL_VLAN_PCP; } else { if (!(match->wc.masks.vlan_tci & htons(VLAN_VID_MASK))) { ofmatch->dl_vlan = htons(OFPVID11_ANY); } else { ofmatch->dl_vlan = htons(vlan_tci_to_vid(match->flow.vlan_tci)); } if (!(match->wc.masks.vlan_tci & htons(VLAN_PCP_MASK))) { wc |= OFPFW11_DL_VLAN_PCP; } else { ofmatch->dl_vlan_pcp = vlan_tci_to_pcp(match->flow.vlan_tci); } } if (!match->wc.masks.dl_type) { wc |= OFPFW11_DL_TYPE; } else { ofmatch->dl_type = ofputil_dl_type_to_openflow(match->flow.dl_type); } if (!(match->wc.masks.nw_tos & IP_DSCP_MASK)) { wc |= OFPFW11_NW_TOS; } else { ofmatch->nw_tos = match->flow.nw_tos & IP_DSCP_MASK; } if (!match->wc.masks.nw_proto) { wc |= OFPFW11_NW_PROTO; } else { ofmatch->nw_proto = match->flow.nw_proto; } ofmatch->nw_src = match->flow.nw_src; ofmatch->nw_src_mask = ~match->wc.masks.nw_src; ofmatch->nw_dst = match->flow.nw_dst; ofmatch->nw_dst_mask = ~match->wc.masks.nw_dst; if (!match->wc.masks.tp_src) { wc |= OFPFW11_TP_SRC; } else { ofmatch->tp_src = match->flow.tp_src; } if (!match->wc.masks.tp_dst) { wc |= OFPFW11_TP_DST; } else { ofmatch->tp_dst = match->flow.tp_dst; } if (!(match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK))) { wc |= OFPFW11_MPLS_LABEL; } else { ofmatch->mpls_label = htonl(mpls_lse_to_label( match->flow.mpls_lse[0])); } if (!(match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK))) { wc |= OFPFW11_MPLS_TC; } else { ofmatch->mpls_tc = mpls_lse_to_tc(match->flow.mpls_lse[0]); } ofmatch->metadata = match->flow.metadata; ofmatch->metadata_mask = ~match->wc.masks.metadata; ofmatch->wildcards = htonl(wc); } /* Returns the "typical" length of a match for 'protocol', for use in * estimating space to preallocate. */ int ofputil_match_typical_len(enum ofputil_protocol protocol) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: return sizeof(struct ofp10_match); case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return NXM_TYPICAL_LEN; case OFPUTIL_P_OF11_STD: return sizeof(struct ofp11_match); case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: return NXM_TYPICAL_LEN; default: OVS_NOT_REACHED(); } } /* Appends to 'b' an struct ofp11_match_header followed by a match that * expresses 'match' properly for 'protocol', plus enough zero bytes to pad the * data appended out to a multiple of 8. 'protocol' must be one that is usable * in OpenFlow 1.1 or later. * * This function can cause 'b''s data to be reallocated. * * Returns the number of bytes appended to 'b', excluding the padding. Never * returns zero. */ int ofputil_put_ofp11_match(struct ofpbuf *b, const struct match *match, enum ofputil_protocol protocol) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: OVS_NOT_REACHED(); case OFPUTIL_P_OF11_STD: { struct ofp11_match *om; /* Make sure that no padding is needed. */ BUILD_ASSERT_DECL(sizeof *om % 8 == 0); om = ofpbuf_put_uninit(b, sizeof *om); ofputil_match_to_ofp11_match(match, om); return sizeof *om; } case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: return oxm_put_match(b, match, ofputil_protocol_to_ofp_version(protocol)); } OVS_NOT_REACHED(); } /* Given a 'dl_type' value in the format used in struct flow, returns the * corresponding 'dl_type' value for use in an ofp10_match or ofp11_match * structure. */ ovs_be16 ofputil_dl_type_to_openflow(ovs_be16 flow_dl_type) { return (flow_dl_type == htons(FLOW_DL_TYPE_NONE) ? htons(OFP_DL_TYPE_NOT_ETH_TYPE) : flow_dl_type); } /* Given a 'dl_type' value in the format used in an ofp10_match or ofp11_match * structure, returns the corresponding 'dl_type' value for use in struct * flow. */ ovs_be16 ofputil_dl_type_from_openflow(ovs_be16 ofp_dl_type) { return (ofp_dl_type == htons(OFP_DL_TYPE_NOT_ETH_TYPE) ? htons(FLOW_DL_TYPE_NONE) : ofp_dl_type); } /* Protocols. */ struct proto_abbrev { enum ofputil_protocol protocol; const char *name; }; /* Most users really don't care about some of the differences between * protocols. These abbreviations help with that. */ static const struct proto_abbrev proto_abbrevs[] = { { OFPUTIL_P_ANY, "any" }, { OFPUTIL_P_OF10_STD_ANY, "OpenFlow10" }, { OFPUTIL_P_OF10_NXM_ANY, "NXM" }, { OFPUTIL_P_ANY_OXM, "OXM" }, }; #define N_PROTO_ABBREVS ARRAY_SIZE(proto_abbrevs) enum ofputil_protocol ofputil_flow_dump_protocols[] = { OFPUTIL_P_OF16_OXM, OFPUTIL_P_OF15_OXM, OFPUTIL_P_OF14_OXM, OFPUTIL_P_OF13_OXM, OFPUTIL_P_OF12_OXM, OFPUTIL_P_OF11_STD, OFPUTIL_P_OF10_NXM, OFPUTIL_P_OF10_STD, }; size_t ofputil_n_flow_dump_protocols = ARRAY_SIZE(ofputil_flow_dump_protocols); /* Returns the set of ofputil_protocols that are supported with the given * OpenFlow 'version'. 'version' should normally be an 8-bit OpenFlow version * identifier (e.g. 0x01 for OpenFlow 1.0, 0x02 for OpenFlow 1.1). Returns 0 * if 'version' is not supported or outside the valid range. */ enum ofputil_protocol ofputil_protocols_from_ofp_version(enum ofp_version version) { switch (version) { case OFP10_VERSION: return OFPUTIL_P_OF10_STD_ANY | OFPUTIL_P_OF10_NXM_ANY; case OFP11_VERSION: return OFPUTIL_P_OF11_STD; case OFP12_VERSION: return OFPUTIL_P_OF12_OXM; case OFP13_VERSION: return OFPUTIL_P_OF13_OXM; case OFP14_VERSION: return OFPUTIL_P_OF14_OXM; case OFP15_VERSION: return OFPUTIL_P_OF15_OXM; case OFP16_VERSION: return OFPUTIL_P_OF16_OXM; default: return 0; } } /* Returns the ofputil_protocol that is initially in effect on an OpenFlow * connection that has negotiated the given 'version'. 'version' should * normally be an 8-bit OpenFlow version identifier (e.g. 0x01 for OpenFlow * 1.0, 0x02 for OpenFlow 1.1). Returns 0 if 'version' is not supported or * outside the valid range. */ enum ofputil_protocol ofputil_protocol_from_ofp_version(enum ofp_version version) { return rightmost_1bit(ofputil_protocols_from_ofp_version(version)); } /* Returns the OpenFlow protocol version number (e.g. OFP10_VERSION, * etc.) that corresponds to 'protocol'. */ enum ofp_version ofputil_protocol_to_ofp_version(enum ofputil_protocol protocol) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return OFP10_VERSION; case OFPUTIL_P_OF11_STD: return OFP11_VERSION; case OFPUTIL_P_OF12_OXM: return OFP12_VERSION; case OFPUTIL_P_OF13_OXM: return OFP13_VERSION; case OFPUTIL_P_OF14_OXM: return OFP14_VERSION; case OFPUTIL_P_OF15_OXM: return OFP15_VERSION; case OFPUTIL_P_OF16_OXM: return OFP16_VERSION; } OVS_NOT_REACHED(); } /* Returns a bitmap of OpenFlow versions that are supported by at * least one of the 'protocols'. */ uint32_t ofputil_protocols_to_version_bitmap(enum ofputil_protocol protocols) { uint32_t bitmap = 0; for (; protocols; protocols = zero_rightmost_1bit(protocols)) { enum ofputil_protocol protocol = rightmost_1bit(protocols); bitmap |= 1u << ofputil_protocol_to_ofp_version(protocol); } return bitmap; } /* Returns the set of protocols that are supported on top of the * OpenFlow versions included in 'bitmap'. */ enum ofputil_protocol ofputil_protocols_from_version_bitmap(uint32_t bitmap) { enum ofputil_protocol protocols = 0; for (; bitmap; bitmap = zero_rightmost_1bit(bitmap)) { enum ofp_version version = rightmost_1bit_idx(bitmap); protocols |= ofputil_protocols_from_ofp_version(version); } return protocols; } /* Returns true if 'protocol' is a single OFPUTIL_P_* value, false * otherwise. */ bool ofputil_protocol_is_valid(enum ofputil_protocol protocol) { return protocol & OFPUTIL_P_ANY && is_pow2(protocol); } /* Returns the equivalent of 'protocol' with the Nicira flow_mod_table_id * extension turned on or off if 'enable' is true or false, respectively. * * This extension is only useful for protocols whose "standard" version does * not allow specific tables to be modified. In particular, this is true of * OpenFlow 1.0. In later versions of OpenFlow, a flow_mod request always * specifies a table ID and so there is no need for such an extension. When * 'protocol' is such a protocol that doesn't need a flow_mod_table_id * extension, this function just returns its 'protocol' argument unchanged * regardless of the value of 'enable'. */ enum ofputil_protocol ofputil_protocol_set_tid(enum ofputil_protocol protocol, bool enable) { switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: return enable ? OFPUTIL_P_OF10_STD_TID : OFPUTIL_P_OF10_STD; case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return enable ? OFPUTIL_P_OF10_NXM_TID : OFPUTIL_P_OF10_NXM; case OFPUTIL_P_OF11_STD: return OFPUTIL_P_OF11_STD; case OFPUTIL_P_OF12_OXM: return OFPUTIL_P_OF12_OXM; case OFPUTIL_P_OF13_OXM: return OFPUTIL_P_OF13_OXM; case OFPUTIL_P_OF14_OXM: return OFPUTIL_P_OF14_OXM; case OFPUTIL_P_OF15_OXM: return OFPUTIL_P_OF15_OXM; case OFPUTIL_P_OF16_OXM: return OFPUTIL_P_OF16_OXM; default: OVS_NOT_REACHED(); } } /* Returns the "base" version of 'protocol'. That is, if 'protocol' includes * some extension to a standard protocol version, the return value is the * standard version of that protocol without any extension. If 'protocol' is a * standard protocol version, returns 'protocol' unchanged. */ enum ofputil_protocol ofputil_protocol_to_base(enum ofputil_protocol protocol) { return ofputil_protocol_set_tid(protocol, false); } /* Returns 'new_base' with any extensions taken from 'cur'. */ enum ofputil_protocol ofputil_protocol_set_base(enum ofputil_protocol cur, enum ofputil_protocol new_base) { bool tid = (cur & OFPUTIL_P_TID) != 0; switch (new_base) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: return ofputil_protocol_set_tid(OFPUTIL_P_OF10_STD, tid); case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: return ofputil_protocol_set_tid(OFPUTIL_P_OF10_NXM, tid); case OFPUTIL_P_OF11_STD: return ofputil_protocol_set_tid(OFPUTIL_P_OF11_STD, tid); case OFPUTIL_P_OF12_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF12_OXM, tid); case OFPUTIL_P_OF13_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF13_OXM, tid); case OFPUTIL_P_OF14_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF14_OXM, tid); case OFPUTIL_P_OF15_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF15_OXM, tid); case OFPUTIL_P_OF16_OXM: return ofputil_protocol_set_tid(OFPUTIL_P_OF16_OXM, tid); default: OVS_NOT_REACHED(); } } /* Returns a string form of 'protocol', if a simple form exists (that is, if * 'protocol' is either a single protocol or it is a combination of protocols * that have a single abbreviation). Otherwise, returns NULL. */ const char * ofputil_protocol_to_string(enum ofputil_protocol protocol) { const struct proto_abbrev *p; /* Use a "switch" statement for single-bit names so that we get a compiler * warning if we forget any. */ switch (protocol) { case OFPUTIL_P_OF10_NXM: return "NXM-table_id"; case OFPUTIL_P_OF10_NXM_TID: return "NXM+table_id"; case OFPUTIL_P_OF10_STD: return "OpenFlow10-table_id"; case OFPUTIL_P_OF10_STD_TID: return "OpenFlow10+table_id"; case OFPUTIL_P_OF11_STD: return "OpenFlow11"; case OFPUTIL_P_OF12_OXM: return "OXM-OpenFlow12"; case OFPUTIL_P_OF13_OXM: return "OXM-OpenFlow13"; case OFPUTIL_P_OF14_OXM: return "OXM-OpenFlow14"; case OFPUTIL_P_OF15_OXM: return "OXM-OpenFlow15"; case OFPUTIL_P_OF16_OXM: return "OXM-OpenFlow16"; } /* Check abbreviations. */ for (p = proto_abbrevs; p < &proto_abbrevs[N_PROTO_ABBREVS]; p++) { if (protocol == p->protocol) { return p->name; } } return NULL; } /* Returns a string that represents 'protocols'. The return value might be a * comma-separated list if 'protocols' doesn't have a simple name. The return * value is "none" if 'protocols' is 0. * * The caller must free the returned string (with free()). */ char * ofputil_protocols_to_string(enum ofputil_protocol protocols) { struct ds s; ovs_assert(!(protocols & ~OFPUTIL_P_ANY)); if (protocols == 0) { return xstrdup("none"); } ds_init(&s); while (protocols) { const struct proto_abbrev *p; int i; if (s.length) { ds_put_char(&s, ','); } for (p = proto_abbrevs; p < &proto_abbrevs[N_PROTO_ABBREVS]; p++) { if ((protocols & p->protocol) == p->protocol) { ds_put_cstr(&s, p->name); protocols &= ~p->protocol; goto match; } } for (i = 0; i < CHAR_BIT * sizeof(enum ofputil_protocol); i++) { enum ofputil_protocol bit = 1u << i; if (protocols & bit) { ds_put_cstr(&s, ofputil_protocol_to_string(bit)); protocols &= ~bit; goto match; } } OVS_NOT_REACHED(); match: ; } return ds_steal_cstr(&s); } static enum ofputil_protocol ofputil_protocol_from_string__(const char *s, size_t n) { const struct proto_abbrev *p; int i; for (i = 0; i < CHAR_BIT * sizeof(enum ofputil_protocol); i++) { enum ofputil_protocol bit = 1u << i; const char *name = ofputil_protocol_to_string(bit); if (name && n == strlen(name) && !strncasecmp(s, name, n)) { return bit; } } for (p = proto_abbrevs; p < &proto_abbrevs[N_PROTO_ABBREVS]; p++) { if (n == strlen(p->name) && !strncasecmp(s, p->name, n)) { return p->protocol; } } return 0; } /* Returns the nonempty set of protocols represented by 's', which can be a * single protocol name or abbreviation or a comma-separated list of them. * * Aborts the program with an error message if 's' is invalid. */ enum ofputil_protocol ofputil_protocols_from_string(const char *s) { const char *orig_s = s; enum ofputil_protocol protocols; protocols = 0; while (*s) { enum ofputil_protocol p; size_t n; n = strcspn(s, ","); if (n == 0) { s++; continue; } p = ofputil_protocol_from_string__(s, n); if (!p) { ovs_fatal(0, "%.*s: unknown flow protocol", (int) n, s); } protocols |= p; s += n; } if (!protocols) { ovs_fatal(0, "%s: no flow protocol specified", orig_s); } return protocols; } enum ofp_version ofputil_version_from_string(const char *s) { if (!strcasecmp(s, "OpenFlow10")) { return OFP10_VERSION; } if (!strcasecmp(s, "OpenFlow11")) { return OFP11_VERSION; } if (!strcasecmp(s, "OpenFlow12")) { return OFP12_VERSION; } if (!strcasecmp(s, "OpenFlow13")) { return OFP13_VERSION; } if (!strcasecmp(s, "OpenFlow14")) { return OFP14_VERSION; } if (!strcasecmp(s, "OpenFlow15")) { return OFP15_VERSION; } if (!strcasecmp(s, "OpenFlow16")) { return OFP16_VERSION; } return 0; } static bool is_delimiter(unsigned char c) { return isspace(c) || c == ','; } uint32_t ofputil_versions_from_string(const char *s) { size_t i = 0; uint32_t bitmap = 0; while (s[i]) { size_t j; int version; char *key; if (is_delimiter(s[i])) { i++; continue; } j = 0; while (s[i + j] && !is_delimiter(s[i + j])) { j++; } key = xmemdup0(s + i, j); version = ofputil_version_from_string(key); if (!version) { VLOG_FATAL("Unknown OpenFlow version: \"%s\"", key); } free(key); bitmap |= 1u << version; i += j; } return bitmap; } uint32_t ofputil_versions_from_strings(char ** const s, size_t count) { uint32_t bitmap = 0; while (count--) { int version = ofputil_version_from_string(s[count]); if (!version) { VLOG_WARN("Unknown OpenFlow version: \"%s\"", s[count]); } else { bitmap |= 1u << version; } } return bitmap; } const char * ofputil_version_to_string(enum ofp_version ofp_version) { switch (ofp_version) { case OFP10_VERSION: return "OpenFlow10"; case OFP11_VERSION: return "OpenFlow11"; case OFP12_VERSION: return "OpenFlow12"; case OFP13_VERSION: return "OpenFlow13"; case OFP14_VERSION: return "OpenFlow14"; case OFP15_VERSION: return "OpenFlow15"; case OFP16_VERSION: return "OpenFlow16"; default: OVS_NOT_REACHED(); } } bool ofputil_packet_in_format_is_valid(enum nx_packet_in_format packet_in_format) { switch (packet_in_format) { case NXPIF_STANDARD: case NXPIF_NXT_PACKET_IN: case NXPIF_NXT_PACKET_IN2: return true; } return false; } const char * ofputil_packet_in_format_to_string(enum nx_packet_in_format packet_in_format) { switch (packet_in_format) { case NXPIF_STANDARD: return "standard"; case NXPIF_NXT_PACKET_IN: return "nxt_packet_in"; case NXPIF_NXT_PACKET_IN2: return "nxt_packet_in2"; default: OVS_NOT_REACHED(); } } int ofputil_packet_in_format_from_string(const char *s) { return (!strcmp(s, "standard") || !strcmp(s, "openflow10") ? NXPIF_STANDARD : !strcmp(s, "nxt_packet_in") || !strcmp(s, "nxm") ? NXPIF_NXT_PACKET_IN : !strcmp(s, "nxt_packet_in2") ? NXPIF_NXT_PACKET_IN2 : -1); } void ofputil_format_version(struct ds *msg, enum ofp_version version) { ds_put_format(msg, "0x%02x", version); } void ofputil_format_version_name(struct ds *msg, enum ofp_version version) { ds_put_cstr(msg, ofputil_version_to_string(version)); } static void ofputil_format_version_bitmap__(struct ds *msg, uint32_t bitmap, void (*format_version)(struct ds *msg, enum ofp_version)) { while (bitmap) { format_version(msg, raw_ctz(bitmap)); bitmap = zero_rightmost_1bit(bitmap); if (bitmap) { ds_put_cstr(msg, ", "); } } } void ofputil_format_version_bitmap(struct ds *msg, uint32_t bitmap) { ofputil_format_version_bitmap__(msg, bitmap, ofputil_format_version); } void ofputil_format_version_bitmap_names(struct ds *msg, uint32_t bitmap) { ofputil_format_version_bitmap__(msg, bitmap, ofputil_format_version_name); } static bool ofputil_decode_hello_bitmap(const struct ofp_hello_elem_header *oheh, uint32_t *allowed_versionsp) { uint16_t bitmap_len = ntohs(oheh->length) - sizeof *oheh; const ovs_be32 *bitmap = ALIGNED_CAST(const ovs_be32 *, oheh + 1); uint32_t allowed_versions; if (!bitmap_len || bitmap_len % sizeof *bitmap) { return false; } /* Only use the first 32-bit element of the bitmap as that is all the * current implementation supports. Subsequent elements are ignored which * should have no effect on session negotiation until Open vSwitch supports * wire-protocol versions greater than 31. */ allowed_versions = ntohl(bitmap[0]); if (allowed_versions & 1) { /* There's no OpenFlow version 0. */ VLOG_WARN_RL(&bad_ofmsg_rl, "peer claims to support invalid OpenFlow " "version 0x00"); allowed_versions &= ~1u; } if (!allowed_versions) { VLOG_WARN_RL(&bad_ofmsg_rl, "peer does not support any OpenFlow " "version (between 0x01 and 0x1f)"); return false; } *allowed_versionsp = allowed_versions; return true; } static uint32_t version_bitmap_from_version(uint8_t ofp_version) { return ((ofp_version < 32 ? 1u << ofp_version : 0) - 1) << 1; } /* Decodes OpenFlow OFPT_HELLO message 'oh', storing into '*allowed_versions' * the set of OpenFlow versions for which 'oh' announces support. * * Because of how OpenFlow defines OFPT_HELLO messages, this function is always * successful, and thus '*allowed_versions' is always initialized. However, it * returns false if 'oh' contains some data that could not be fully understood, * true if 'oh' was completely parsed. */ bool ofputil_decode_hello(const struct ofp_header *oh, uint32_t *allowed_versions) { struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpbuf_pull(&msg, sizeof *oh); *allowed_versions = version_bitmap_from_version(oh->version); bool ok = true; while (msg.size) { const struct ofp_hello_elem_header *oheh; unsigned int len; if (msg.size < sizeof *oheh) { return false; } oheh = msg.data; len = ntohs(oheh->length); if (len < sizeof *oheh || !ofpbuf_try_pull(&msg, ROUND_UP(len, 8))) { return false; } if (oheh->type != htons(OFPHET_VERSIONBITMAP) || !ofputil_decode_hello_bitmap(oheh, allowed_versions)) { ok = false; } } return ok; } /* Returns true if 'allowed_versions' needs to be accompanied by a version * bitmap to be correctly expressed in an OFPT_HELLO message. */ static bool should_send_version_bitmap(uint32_t allowed_versions) { return !is_pow2((allowed_versions >> 1) + 1); } /* Create an OFPT_HELLO message that expresses support for the OpenFlow * versions in the 'allowed_versions' bitmaps and returns the message. */ struct ofpbuf * ofputil_encode_hello(uint32_t allowed_versions) { enum ofp_version ofp_version; struct ofpbuf *msg; ofp_version = leftmost_1bit_idx(allowed_versions); msg = ofpraw_alloc(OFPRAW_OFPT_HELLO, ofp_version, 0); if (should_send_version_bitmap(allowed_versions)) { struct ofp_hello_elem_header *oheh; uint16_t map_len; map_len = sizeof allowed_versions; oheh = ofpbuf_put_zeros(msg, ROUND_UP(map_len + sizeof *oheh, 8)); oheh->type = htons(OFPHET_VERSIONBITMAP); oheh->length = htons(map_len + sizeof *oheh); *ALIGNED_CAST(ovs_be32 *, oheh + 1) = htonl(allowed_versions); ofpmsg_update_length(msg); } return msg; } /* Returns an OpenFlow message that, sent on an OpenFlow connection whose * protocol is 'current', at least partly transitions the protocol to 'want'. * Stores in '*next' the protocol that will be in effect on the OpenFlow * connection if the switch processes the returned message correctly. (If * '*next != want' then the caller will have to iterate.) * * If 'current == want', or if it is not possible to transition from 'current' * to 'want' (because, for example, 'current' and 'want' use different OpenFlow * protocol versions), returns NULL and stores 'current' in '*next'. */ struct ofpbuf * ofputil_encode_set_protocol(enum ofputil_protocol current, enum ofputil_protocol want, enum ofputil_protocol *next) { enum ofp_version cur_version, want_version; enum ofputil_protocol cur_base, want_base; bool cur_tid, want_tid; cur_version = ofputil_protocol_to_ofp_version(current); want_version = ofputil_protocol_to_ofp_version(want); if (cur_version != want_version) { *next = current; return NULL; } cur_base = ofputil_protocol_to_base(current); want_base = ofputil_protocol_to_base(want); if (cur_base != want_base) { *next = ofputil_protocol_set_base(current, want_base); switch (want_base) { case OFPUTIL_P_OF10_NXM: return ofputil_encode_nx_set_flow_format(NXFF_NXM); case OFPUTIL_P_OF10_STD: return ofputil_encode_nx_set_flow_format(NXFF_OPENFLOW10); case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: /* There is only one variant of each OpenFlow 1.1+ protocol, and we * verified above that we're not trying to change versions. */ OVS_NOT_REACHED(); case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM_TID: OVS_NOT_REACHED(); } } cur_tid = (current & OFPUTIL_P_TID) != 0; want_tid = (want & OFPUTIL_P_TID) != 0; if (cur_tid != want_tid) { *next = ofputil_protocol_set_tid(current, want_tid); return ofputil_make_flow_mod_table_id(want_tid); } ovs_assert(current == want); *next = current; return NULL; } /* Returns an NXT_SET_FLOW_FORMAT message that can be used to set the flow * format to 'nxff'. */ struct ofpbuf * ofputil_encode_nx_set_flow_format(enum nx_flow_format nxff) { struct nx_set_flow_format *sff; struct ofpbuf *msg; ovs_assert(ofputil_nx_flow_format_is_valid(nxff)); msg = ofpraw_alloc(OFPRAW_NXT_SET_FLOW_FORMAT, OFP10_VERSION, 0); sff = ofpbuf_put_zeros(msg, sizeof *sff); sff->format = htonl(nxff); return msg; } /* Returns the base protocol if 'flow_format' is a valid NXFF_* value, false * otherwise. */ enum ofputil_protocol ofputil_nx_flow_format_to_protocol(enum nx_flow_format flow_format) { switch (flow_format) { case NXFF_OPENFLOW10: return OFPUTIL_P_OF10_STD; case NXFF_NXM: return OFPUTIL_P_OF10_NXM; default: return 0; } } /* Returns true if 'flow_format' is a valid NXFF_* value, false otherwise. */ bool ofputil_nx_flow_format_is_valid(enum nx_flow_format flow_format) { return ofputil_nx_flow_format_to_protocol(flow_format) != 0; } /* Returns a string version of 'flow_format', which must be a valid NXFF_* * value. */ const char * ofputil_nx_flow_format_to_string(enum nx_flow_format flow_format) { switch (flow_format) { case NXFF_OPENFLOW10: return "openflow10"; case NXFF_NXM: return "nxm"; default: OVS_NOT_REACHED(); } } struct ofpbuf * ofputil_make_set_packet_in_format(enum ofp_version ofp_version, enum nx_packet_in_format packet_in_format) { struct nx_set_packet_in_format *spif; struct ofpbuf *msg; msg = ofpraw_alloc(OFPRAW_NXT_SET_PACKET_IN_FORMAT, ofp_version, 0); spif = ofpbuf_put_zeros(msg, sizeof *spif); spif->format = htonl(packet_in_format); return msg; } /* Returns an OpenFlow message that can be used to turn the flow_mod_table_id * extension on or off (according to 'flow_mod_table_id'). */ struct ofpbuf * ofputil_make_flow_mod_table_id(bool flow_mod_table_id) { struct nx_flow_mod_table_id *nfmti; struct ofpbuf *msg; msg = ofpraw_alloc(OFPRAW_NXT_FLOW_MOD_TABLE_ID, OFP10_VERSION, 0); nfmti = ofpbuf_put_zeros(msg, sizeof *nfmti); nfmti->set = flow_mod_table_id; return msg; } struct ofputil_flow_mod_flag { uint16_t raw_flag; enum ofp_version min_version, max_version; enum ofputil_flow_mod_flags flag; }; static const struct ofputil_flow_mod_flag ofputil_flow_mod_flags[] = { { OFPFF_SEND_FLOW_REM, OFP10_VERSION, 0, OFPUTIL_FF_SEND_FLOW_REM }, { OFPFF_CHECK_OVERLAP, OFP10_VERSION, 0, OFPUTIL_FF_CHECK_OVERLAP }, { OFPFF10_EMERG, OFP10_VERSION, OFP10_VERSION, OFPUTIL_FF_EMERG }, { OFPFF12_RESET_COUNTS, OFP12_VERSION, 0, OFPUTIL_FF_RESET_COUNTS }, { OFPFF13_NO_PKT_COUNTS, OFP13_VERSION, 0, OFPUTIL_FF_NO_PKT_COUNTS }, { OFPFF13_NO_BYT_COUNTS, OFP13_VERSION, 0, OFPUTIL_FF_NO_BYT_COUNTS }, { 0, 0, 0, 0 }, }; static enum ofperr ofputil_decode_flow_mod_flags(ovs_be16 raw_flags_, enum ofp_flow_mod_command command, enum ofp_version version, enum ofputil_flow_mod_flags *flagsp) { uint16_t raw_flags = ntohs(raw_flags_); const struct ofputil_flow_mod_flag *f; *flagsp = 0; for (f = ofputil_flow_mod_flags; f->raw_flag; f++) { if (raw_flags & f->raw_flag && version >= f->min_version && (!f->max_version || version <= f->max_version)) { raw_flags &= ~f->raw_flag; *flagsp |= f->flag; } } /* In OF1.0 and OF1.1, "add" always resets counters, and other commands * never do. * * In OF1.2 and later, OFPFF12_RESET_COUNTS controls whether each command * resets counters. */ if ((version == OFP10_VERSION || version == OFP11_VERSION) && command == OFPFC_ADD) { *flagsp |= OFPUTIL_FF_RESET_COUNTS; } return raw_flags ? OFPERR_OFPFMFC_BAD_FLAGS : 0; } static ovs_be16 ofputil_encode_flow_mod_flags(enum ofputil_flow_mod_flags flags, enum ofp_version version) { const struct ofputil_flow_mod_flag *f; uint16_t raw_flags; raw_flags = 0; for (f = ofputil_flow_mod_flags; f->raw_flag; f++) { if (f->flag & flags && version >= f->min_version && (!f->max_version || version <= f->max_version)) { raw_flags |= f->raw_flag; } } return htons(raw_flags); } /* Converts an OFPT_FLOW_MOD or NXT_FLOW_MOD message 'oh' into an abstract * flow_mod in 'fm'. Returns 0 if successful, otherwise an OpenFlow error * code. * * Uses 'ofpacts' to store the abstract OFPACT_* version of 'oh''s actions. * The caller must initialize 'ofpacts' and retains ownership of it. * 'fm->ofpacts' will point into the 'ofpacts' buffer. * * Does not validate the flow_mod actions. The caller should do that, with * ofpacts_check(). */ enum ofperr ofputil_decode_flow_mod(struct ofputil_flow_mod *fm, const struct ofp_header *oh, enum ofputil_protocol protocol, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofpbuf *ofpacts, ofp_port_t max_port, uint8_t max_table) { ovs_be16 raw_flags; enum ofperr error; struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT11_FLOW_MOD) { /* Standard OpenFlow 1.1+ flow_mod. */ const struct ofp11_flow_mod *ofm; ofm = ofpbuf_pull(&b, sizeof *ofm); error = ofputil_pull_ofp11_match(&b, tun_table, vl_mff_map, &fm->match, NULL); if (error) { return error; } /* Translate the message. */ fm->priority = ntohs(ofm->priority); if (ofm->command == OFPFC_ADD || (oh->version == OFP11_VERSION && (ofm->command == OFPFC_MODIFY || ofm->command == OFPFC_MODIFY_STRICT) && ofm->cookie_mask == htonll(0))) { /* In OpenFlow 1.1 only, a "modify" or "modify-strict" that does * not match on the cookie is treated as an "add" if there is no * match. */ fm->cookie = htonll(0); fm->cookie_mask = htonll(0); fm->new_cookie = ofm->cookie; } else { fm->cookie = ofm->cookie; fm->cookie_mask = ofm->cookie_mask; fm->new_cookie = OVS_BE64_MAX; } fm->modify_cookie = false; fm->command = ofm->command; /* Get table ID. * * OF1.1 entirely forbids table_id == OFPTT_ALL. * OF1.2+ allows table_id == OFPTT_ALL only for deletes. */ fm->table_id = ofm->table_id; if (fm->table_id == OFPTT_ALL && (oh->version == OFP11_VERSION || (ofm->command != OFPFC_DELETE && ofm->command != OFPFC_DELETE_STRICT))) { return OFPERR_OFPFMFC_BAD_TABLE_ID; } fm->idle_timeout = ntohs(ofm->idle_timeout); fm->hard_timeout = ntohs(ofm->hard_timeout); if (oh->version >= OFP14_VERSION && ofm->command == OFPFC_ADD) { fm->importance = ntohs(ofm->importance); } else { fm->importance = 0; } fm->buffer_id = ntohl(ofm->buffer_id); error = ofputil_port_from_ofp11(ofm->out_port, &fm->out_port); if (error) { return error; } fm->out_group = (ofm->command == OFPFC_DELETE || ofm->command == OFPFC_DELETE_STRICT ? ntohl(ofm->out_group) : OFPG_ANY); raw_flags = ofm->flags; } else { uint16_t command; if (raw == OFPRAW_OFPT10_FLOW_MOD) { /* Standard OpenFlow 1.0 flow_mod. */ const struct ofp10_flow_mod *ofm; /* Get the ofp10_flow_mod. */ ofm = ofpbuf_pull(&b, sizeof *ofm); /* Translate the rule. */ ofputil_match_from_ofp10_match(&ofm->match, &fm->match); ofputil_normalize_match(&fm->match); /* OpenFlow 1.0 says that exact-match rules have to have the * highest possible priority. */ fm->priority = (ofm->match.wildcards & htonl(OFPFW10_ALL) ? ntohs(ofm->priority) : UINT16_MAX); /* Translate the message. */ command = ntohs(ofm->command); fm->cookie = htonll(0); fm->cookie_mask = htonll(0); fm->new_cookie = ofm->cookie; fm->idle_timeout = ntohs(ofm->idle_timeout); fm->hard_timeout = ntohs(ofm->hard_timeout); fm->importance = 0; fm->buffer_id = ntohl(ofm->buffer_id); fm->out_port = u16_to_ofp(ntohs(ofm->out_port)); fm->out_group = OFPG_ANY; raw_flags = ofm->flags; } else if (raw == OFPRAW_NXT_FLOW_MOD) { /* Nicira extended flow_mod. */ const struct nx_flow_mod *nfm; /* Dissect the message. */ nfm = ofpbuf_pull(&b, sizeof *nfm); error = nx_pull_match(&b, ntohs(nfm->match_len), &fm->match, &fm->cookie, &fm->cookie_mask, tun_table, vl_mff_map); if (error) { return error; } /* Translate the message. */ command = ntohs(nfm->command); if ((command & 0xff) == OFPFC_ADD && fm->cookie_mask) { /* Flow additions may only set a new cookie, not match an * existing cookie. */ return OFPERR_NXBRC_NXM_INVALID; } fm->priority = ntohs(nfm->priority); fm->new_cookie = nfm->cookie; fm->idle_timeout = ntohs(nfm->idle_timeout); fm->hard_timeout = ntohs(nfm->hard_timeout); fm->importance = 0; fm->buffer_id = ntohl(nfm->buffer_id); fm->out_port = u16_to_ofp(ntohs(nfm->out_port)); fm->out_group = OFPG_ANY; raw_flags = nfm->flags; } else { OVS_NOT_REACHED(); } fm->modify_cookie = fm->new_cookie != OVS_BE64_MAX; if (protocol & OFPUTIL_P_TID) { fm->command = command & 0xff; fm->table_id = command >> 8; } else { if (command > 0xff) { VLOG_WARN_RL(&bad_ofmsg_rl, "flow_mod has explicit table_id " "but flow_mod_table_id extension is not enabled"); } fm->command = command; fm->table_id = 0xff; } } if (fm->command > OFPFC_DELETE_STRICT) { return OFPERR_OFPFMFC_BAD_COMMAND; } fm->ofpacts_tlv_bitmap = 0; error = ofpacts_pull_openflow_instructions(&b, b.size, oh->version, vl_mff_map, &fm->ofpacts_tlv_bitmap, ofpacts); if (error) { return error; } fm->ofpacts = ofpacts->data; fm->ofpacts_len = ofpacts->size; error = ofputil_decode_flow_mod_flags(raw_flags, fm->command, oh->version, &fm->flags); if (error) { return error; } if (fm->flags & OFPUTIL_FF_EMERG) { /* We do not support the OpenFlow 1.0 emergency flow cache, which * is not required in OpenFlow 1.0.1 and removed from OpenFlow 1.1. * * OpenFlow 1.0 specifies the error code to use when idle_timeout * or hard_timeout is nonzero. Otherwise, there is no good error * code, so just state that the flow table is full. */ return (fm->hard_timeout || fm->idle_timeout ? OFPERR_OFPFMFC_BAD_EMERG_TIMEOUT : OFPERR_OFPFMFC_TABLE_FULL); } return ofpacts_check_consistency(fm->ofpacts, fm->ofpacts_len, &fm->match.flow, max_port, fm->table_id, max_table, protocol); } static enum ofperr ofputil_pull_bands(struct ofpbuf *msg, size_t len, uint16_t *n_bands, struct ofpbuf *bands) { const struct ofp13_meter_band_header *ombh; struct ofputil_meter_band *mb; uint16_t n = 0; ombh = ofpbuf_try_pull(msg, len); if (!ombh) { return OFPERR_OFPBRC_BAD_LEN; } while (len >= sizeof (struct ofp13_meter_band_drop)) { size_t ombh_len = ntohs(ombh->len); /* All supported band types have the same length. */ if (ombh_len != sizeof (struct ofp13_meter_band_drop)) { return OFPERR_OFPBRC_BAD_LEN; } mb = ofpbuf_put_uninit(bands, sizeof *mb); mb->type = ntohs(ombh->type); if (mb->type != OFPMBT13_DROP && mb->type != OFPMBT13_DSCP_REMARK) { return OFPERR_OFPMMFC_BAD_BAND; } mb->rate = ntohl(ombh->rate); mb->burst_size = ntohl(ombh->burst_size); mb->prec_level = (mb->type == OFPMBT13_DSCP_REMARK) ? ((struct ofp13_meter_band_dscp_remark *)ombh)->prec_level : 0; n++; len -= ombh_len; ombh = ALIGNED_CAST(struct ofp13_meter_band_header *, (char *) ombh + ombh_len); } if (len) { return OFPERR_OFPBRC_BAD_LEN; } *n_bands = n; return 0; } enum ofperr ofputil_decode_meter_mod(const struct ofp_header *oh, struct ofputil_meter_mod *mm, struct ofpbuf *bands) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); const struct ofp13_meter_mod *omm = ofpbuf_pull(&b, sizeof *omm); /* Translate the message. */ mm->command = ntohs(omm->command); if (mm->command != OFPMC13_ADD && mm->command != OFPMC13_MODIFY && mm->command != OFPMC13_DELETE) { return OFPERR_OFPMMFC_BAD_COMMAND; } mm->meter.meter_id = ntohl(omm->meter_id); if (mm->command == OFPMC13_DELETE) { mm->meter.flags = 0; mm->meter.n_bands = 0; mm->meter.bands = NULL; } else { enum ofperr error; mm->meter.flags = ntohs(omm->flags); if (mm->meter.flags & OFPMF13_KBPS && mm->meter.flags & OFPMF13_PKTPS) { return OFPERR_OFPMMFC_BAD_FLAGS; } error = ofputil_pull_bands(&b, b.size, &mm->meter.n_bands, bands); if (error) { return error; } mm->meter.bands = bands->data; } return 0; } void ofputil_decode_meter_request(const struct ofp_header *oh, uint32_t *meter_id) { const struct ofp13_meter_multipart_request *omr = ofpmsg_body(oh); *meter_id = ntohl(omr->meter_id); } struct ofpbuf * ofputil_encode_meter_request(enum ofp_version ofp_version, enum ofputil_meter_request_type type, uint32_t meter_id) { struct ofpbuf *msg; enum ofpraw raw; switch (type) { case OFPUTIL_METER_CONFIG: raw = OFPRAW_OFPST13_METER_CONFIG_REQUEST; break; case OFPUTIL_METER_STATS: raw = OFPRAW_OFPST13_METER_REQUEST; break; default: case OFPUTIL_METER_FEATURES: raw = OFPRAW_OFPST13_METER_FEATURES_REQUEST; break; } msg = ofpraw_alloc(raw, ofp_version, 0); if (type != OFPUTIL_METER_FEATURES) { struct ofp13_meter_multipart_request *omr; omr = ofpbuf_put_zeros(msg, sizeof *omr); omr->meter_id = htonl(meter_id); } return msg; } static void ofputil_put_bands(uint16_t n_bands, const struct ofputil_meter_band *mb, struct ofpbuf *msg) { uint16_t n = 0; for (n = 0; n < n_bands; ++n) { /* Currently all band types have same size. */ struct ofp13_meter_band_dscp_remark *ombh; size_t ombh_len = sizeof *ombh; ombh = ofpbuf_put_zeros(msg, ombh_len); ombh->type = htons(mb->type); ombh->len = htons(ombh_len); ombh->rate = htonl(mb->rate); ombh->burst_size = htonl(mb->burst_size); ombh->prec_level = mb->prec_level; mb++; } } /* Encode a meter stat for 'mc' and append it to 'replies'. */ void ofputil_append_meter_config(struct ovs_list *replies, const struct ofputil_meter_config *mc) { struct ofpbuf *msg = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = msg->size; struct ofp13_meter_config *reply; ofpbuf_put_uninit(msg, sizeof *reply); ofputil_put_bands(mc->n_bands, mc->bands, msg); reply = ofpbuf_at_assert(msg, start_ofs, sizeof *reply); reply->flags = htons(mc->flags); reply->meter_id = htonl(mc->meter_id); reply->length = htons(msg->size - start_ofs); ofpmp_postappend(replies, start_ofs); } /* Encode a meter stat for 'ms' and append it to 'replies'. */ void ofputil_append_meter_stats(struct ovs_list *replies, const struct ofputil_meter_stats *ms) { struct ofp13_meter_stats *reply; uint16_t n = 0; uint16_t len; len = sizeof *reply + ms->n_bands * sizeof(struct ofp13_meter_band_stats); reply = ofpmp_append(replies, len); reply->meter_id = htonl(ms->meter_id); reply->len = htons(len); memset(reply->pad, 0, sizeof reply->pad); reply->flow_count = htonl(ms->flow_count); reply->packet_in_count = htonll(ms->packet_in_count); reply->byte_in_count = htonll(ms->byte_in_count); reply->duration_sec = htonl(ms->duration_sec); reply->duration_nsec = htonl(ms->duration_nsec); for (n = 0; n < ms->n_bands; ++n) { const struct ofputil_meter_band_stats *src = &ms->bands[n]; struct ofp13_meter_band_stats *dst = &reply->band_stats[n]; dst->packet_band_count = htonll(src->packet_count); dst->byte_band_count = htonll(src->byte_count); } } /* Converts an OFPMP_METER_CONFIG reply in 'msg' into an abstract * ofputil_meter_config in 'mc', with mc->bands pointing to bands decoded into * 'bands'. The caller must have initialized 'bands' and retains ownership of * it across the call. * * Multiple OFPST13_METER_CONFIG replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. 'bands' is cleared for each reply. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_meter_config(struct ofpbuf *msg, struct ofputil_meter_config *mc, struct ofpbuf *bands) { const struct ofp13_meter_config *omc; enum ofperr err; /* Pull OpenFlow headers for the first call. */ if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } omc = ofpbuf_try_pull(msg, sizeof *omc); if (!omc) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPMP_METER_CONFIG reply has %"PRIu32" leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } ofpbuf_clear(bands); err = ofputil_pull_bands(msg, ntohs(omc->length) - sizeof *omc, &mc->n_bands, bands); if (err) { return err; } mc->meter_id = ntohl(omc->meter_id); mc->flags = ntohs(omc->flags); mc->bands = bands->data; return 0; } static enum ofperr ofputil_pull_band_stats(struct ofpbuf *msg, size_t len, uint16_t *n_bands, struct ofpbuf *bands) { const struct ofp13_meter_band_stats *ombs; struct ofputil_meter_band_stats *mbs; uint16_t n, i; ombs = ofpbuf_try_pull(msg, len); if (!ombs) { return OFPERR_OFPBRC_BAD_LEN; } n = len / sizeof *ombs; if (len != n * sizeof *ombs) { return OFPERR_OFPBRC_BAD_LEN; } mbs = ofpbuf_put_uninit(bands, len); for (i = 0; i < n; ++i) { mbs[i].packet_count = ntohll(ombs[i].packet_band_count); mbs[i].byte_count = ntohll(ombs[i].byte_band_count); } *n_bands = n; return 0; } /* Converts an OFPMP_METER reply in 'msg' into an abstract * ofputil_meter_stats in 'ms', with ms->bands pointing to band stats * decoded into 'bands'. * * Multiple OFPMP_METER replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. 'bands' is cleared for each reply. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_meter_stats(struct ofpbuf *msg, struct ofputil_meter_stats *ms, struct ofpbuf *bands) { const struct ofp13_meter_stats *oms; enum ofperr err; /* Pull OpenFlow headers for the first call. */ if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } oms = ofpbuf_try_pull(msg, sizeof *oms); if (!oms) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPMP_METER reply has %"PRIu32" leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } ofpbuf_clear(bands); err = ofputil_pull_band_stats(msg, ntohs(oms->len) - sizeof *oms, &ms->n_bands, bands); if (err) { return err; } ms->meter_id = ntohl(oms->meter_id); ms->flow_count = ntohl(oms->flow_count); ms->packet_in_count = ntohll(oms->packet_in_count); ms->byte_in_count = ntohll(oms->byte_in_count); ms->duration_sec = ntohl(oms->duration_sec); ms->duration_nsec = ntohl(oms->duration_nsec); ms->bands = bands->data; return 0; } void ofputil_decode_meter_features(const struct ofp_header *oh, struct ofputil_meter_features *mf) { const struct ofp13_meter_features *omf = ofpmsg_body(oh); mf->max_meters = ntohl(omf->max_meter); mf->band_types = ntohl(omf->band_types); mf->capabilities = ntohl(omf->capabilities); mf->max_bands = omf->max_bands; mf->max_color = omf->max_color; } struct ofpbuf * ofputil_encode_meter_features_reply(const struct ofputil_meter_features *mf, const struct ofp_header *request) { struct ofpbuf *reply; struct ofp13_meter_features *omf; reply = ofpraw_alloc_stats_reply(request, 0); omf = ofpbuf_put_zeros(reply, sizeof *omf); omf->max_meter = htonl(mf->max_meters); omf->band_types = htonl(mf->band_types); omf->capabilities = htonl(mf->capabilities); omf->max_bands = mf->max_bands; omf->max_color = mf->max_color; return reply; } struct ofpbuf * ofputil_encode_meter_mod(enum ofp_version ofp_version, const struct ofputil_meter_mod *mm) { struct ofpbuf *msg; struct ofp13_meter_mod *omm; msg = ofpraw_alloc(OFPRAW_OFPT13_METER_MOD, ofp_version, NXM_TYPICAL_LEN + mm->meter.n_bands * 16); omm = ofpbuf_put_zeros(msg, sizeof *omm); omm->command = htons(mm->command); if (mm->command != OFPMC13_DELETE) { omm->flags = htons(mm->meter.flags); } omm->meter_id = htonl(mm->meter.meter_id); ofputil_put_bands(mm->meter.n_bands, mm->meter.bands, msg); ofpmsg_update_length(msg); return msg; } static ovs_be16 ofputil_tid_command(const struct ofputil_flow_mod *fm, enum ofputil_protocol protocol) { return htons(protocol & OFPUTIL_P_TID ? (fm->command & 0xff) | (fm->table_id << 8) : fm->command); } /* Converts 'fm' into an OFPT_FLOW_MOD or NXT_FLOW_MOD message according to * 'protocol' and returns the message. */ struct ofpbuf * ofputil_encode_flow_mod(const struct ofputil_flow_mod *fm, enum ofputil_protocol protocol) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); ovs_be16 raw_flags = ofputil_encode_flow_mod_flags(fm->flags, version); struct ofpbuf *msg; switch (protocol) { case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: { struct ofp11_flow_mod *ofm; int tailroom; tailroom = ofputil_match_typical_len(protocol) + fm->ofpacts_len; msg = ofpraw_alloc(OFPRAW_OFPT11_FLOW_MOD, version, tailroom); ofm = ofpbuf_put_zeros(msg, sizeof *ofm); if ((protocol == OFPUTIL_P_OF11_STD && (fm->command == OFPFC_MODIFY || fm->command == OFPFC_MODIFY_STRICT) && fm->cookie_mask == htonll(0)) || fm->command == OFPFC_ADD) { ofm->cookie = fm->new_cookie; } else { ofm->cookie = fm->cookie & fm->cookie_mask; } ofm->cookie_mask = fm->cookie_mask; if (fm->table_id != OFPTT_ALL || (protocol != OFPUTIL_P_OF11_STD && (fm->command == OFPFC_DELETE || fm->command == OFPFC_DELETE_STRICT))) { ofm->table_id = fm->table_id; } else { ofm->table_id = 0; } ofm->command = fm->command; ofm->idle_timeout = htons(fm->idle_timeout); ofm->hard_timeout = htons(fm->hard_timeout); ofm->priority = htons(fm->priority); ofm->buffer_id = htonl(fm->buffer_id); ofm->out_port = ofputil_port_to_ofp11(fm->out_port); ofm->out_group = htonl(fm->out_group); ofm->flags = raw_flags; if (version >= OFP14_VERSION && fm->command == OFPFC_ADD) { ofm->importance = htons(fm->importance); } else { ofm->importance = 0; } ofputil_put_ofp11_match(msg, &fm->match, protocol); ofpacts_put_openflow_instructions(fm->ofpacts, fm->ofpacts_len, msg, version); break; } case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: { struct ofp10_flow_mod *ofm; msg = ofpraw_alloc(OFPRAW_OFPT10_FLOW_MOD, OFP10_VERSION, fm->ofpacts_len); ofm = ofpbuf_put_zeros(msg, sizeof *ofm); ofputil_match_to_ofp10_match(&fm->match, &ofm->match); ofm->cookie = fm->new_cookie; ofm->command = ofputil_tid_command(fm, protocol); ofm->idle_timeout = htons(fm->idle_timeout); ofm->hard_timeout = htons(fm->hard_timeout); ofm->priority = htons(fm->priority); ofm->buffer_id = htonl(fm->buffer_id); ofm->out_port = htons(ofp_to_u16(fm->out_port)); ofm->flags = raw_flags; ofpacts_put_openflow_actions(fm->ofpacts, fm->ofpacts_len, msg, version); break; } case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: { struct nx_flow_mod *nfm; int match_len; msg = ofpraw_alloc(OFPRAW_NXT_FLOW_MOD, OFP10_VERSION, NXM_TYPICAL_LEN + fm->ofpacts_len); nfm = ofpbuf_put_zeros(msg, sizeof *nfm); nfm->command = ofputil_tid_command(fm, protocol); nfm->cookie = fm->new_cookie; match_len = nx_put_match(msg, &fm->match, fm->cookie, fm->cookie_mask); nfm = msg->msg; nfm->idle_timeout = htons(fm->idle_timeout); nfm->hard_timeout = htons(fm->hard_timeout); nfm->priority = htons(fm->priority); nfm->buffer_id = htonl(fm->buffer_id); nfm->out_port = htons(ofp_to_u16(fm->out_port)); nfm->flags = raw_flags; nfm->match_len = htons(match_len); ofpacts_put_openflow_actions(fm->ofpacts, fm->ofpacts_len, msg, version); break; } default: OVS_NOT_REACHED(); } ofpmsg_update_length(msg); return msg; } static enum ofperr ofputil_decode_ofpst10_flow_request(struct ofputil_flow_stats_request *fsr, const struct ofp10_flow_stats_request *ofsr, bool aggregate) { fsr->aggregate = aggregate; ofputil_match_from_ofp10_match(&ofsr->match, &fsr->match); fsr->out_port = u16_to_ofp(ntohs(ofsr->out_port)); fsr->out_group = OFPG_ANY; fsr->table_id = ofsr->table_id; fsr->cookie = fsr->cookie_mask = htonll(0); return 0; } static enum ofperr ofputil_decode_ofpst11_flow_request(struct ofputil_flow_stats_request *fsr, struct ofpbuf *b, bool aggregate, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map) { const struct ofp11_flow_stats_request *ofsr; enum ofperr error; ofsr = ofpbuf_pull(b, sizeof *ofsr); fsr->aggregate = aggregate; fsr->table_id = ofsr->table_id; error = ofputil_port_from_ofp11(ofsr->out_port, &fsr->out_port); if (error) { return error; } fsr->out_group = ntohl(ofsr->out_group); fsr->cookie = ofsr->cookie; fsr->cookie_mask = ofsr->cookie_mask; error = ofputil_pull_ofp11_match(b, tun_table, vl_mff_map, &fsr->match, NULL); if (error) { return error; } return 0; } static enum ofperr ofputil_decode_nxst_flow_request(struct ofputil_flow_stats_request *fsr, struct ofpbuf *b, bool aggregate, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map) { const struct nx_flow_stats_request *nfsr; enum ofperr error; nfsr = ofpbuf_pull(b, sizeof *nfsr); error = nx_pull_match(b, ntohs(nfsr->match_len), &fsr->match, &fsr->cookie, &fsr->cookie_mask, tun_table, vl_mff_map); if (error) { return error; } if (b->size) { return OFPERR_OFPBRC_BAD_LEN; } fsr->aggregate = aggregate; fsr->out_port = u16_to_ofp(ntohs(nfsr->out_port)); fsr->out_group = OFPG_ANY; fsr->table_id = nfsr->table_id; return 0; } /* Constructs and returns an OFPT_QUEUE_GET_CONFIG request for the specified * 'port' and 'queue', suitable for OpenFlow version 'version'. * * 'queue' is honored only for OpenFlow 1.4 and later; older versions always * request all queues. */ struct ofpbuf * ofputil_encode_queue_get_config_request(enum ofp_version version, ofp_port_t port, uint32_t queue) { struct ofpbuf *request; if (version == OFP10_VERSION) { struct ofp10_queue_get_config_request *qgcr10; request = ofpraw_alloc(OFPRAW_OFPT10_QUEUE_GET_CONFIG_REQUEST, version, 0); qgcr10 = ofpbuf_put_zeros(request, sizeof *qgcr10); qgcr10->port = htons(ofp_to_u16(port)); } else if (version < OFP14_VERSION) { struct ofp11_queue_get_config_request *qgcr11; request = ofpraw_alloc(OFPRAW_OFPT11_QUEUE_GET_CONFIG_REQUEST, version, 0); qgcr11 = ofpbuf_put_zeros(request, sizeof *qgcr11); qgcr11->port = ofputil_port_to_ofp11(port); } else { struct ofp14_queue_desc_request *qdr14; request = ofpraw_alloc(OFPRAW_OFPST14_QUEUE_DESC_REQUEST, version, 0); qdr14 = ofpbuf_put_zeros(request, sizeof *qdr14); qdr14->port = ofputil_port_to_ofp11(port); qdr14->queue = htonl(queue); } return request; } /* Parses OFPT_QUEUE_GET_CONFIG request 'oh', storing the port specified by the * request into '*port'. Returns 0 if successful, otherwise an OpenFlow error * code. */ enum ofperr ofputil_decode_queue_get_config_request(const struct ofp_header *oh, ofp_port_t *port, uint32_t *queue) { const struct ofp10_queue_get_config_request *qgcr10; const struct ofp11_queue_get_config_request *qgcr11; const struct ofp14_queue_desc_request *qdr14; struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); switch ((int) raw) { case OFPRAW_OFPT10_QUEUE_GET_CONFIG_REQUEST: qgcr10 = b.data; *port = u16_to_ofp(ntohs(qgcr10->port)); *queue = OFPQ_ALL; break; case OFPRAW_OFPT11_QUEUE_GET_CONFIG_REQUEST: qgcr11 = b.data; *queue = OFPQ_ALL; enum ofperr error = ofputil_port_from_ofp11(qgcr11->port, port); if (error || *port == OFPP_ANY) { return error; } break; case OFPRAW_OFPST14_QUEUE_DESC_REQUEST: qdr14 = b.data; *queue = ntohl(qdr14->queue); return ofputil_port_from_ofp11(qdr14->port, port); default: OVS_NOT_REACHED(); } return (ofp_to_u16(*port) < ofp_to_u16(OFPP_MAX) ? 0 : OFPERR_OFPQOFC_BAD_PORT); } /* Constructs and returns the beginning of a reply to * OFPT_QUEUE_GET_CONFIG_REQUEST or OFPMP_QUEUE_DESC request 'oh'. The caller * may append information about individual queues with * ofputil_append_queue_get_config_reply(). */ void ofputil_start_queue_get_config_reply(const struct ofp_header *request, struct ovs_list *replies) { struct ofpbuf *reply; enum ofperr error; ofp_port_t port; uint32_t queue; error = ofputil_decode_queue_get_config_request(request, &port, &queue); ovs_assert(!error); enum ofpraw raw = ofpraw_decode_assert(request); switch ((int) raw) { case OFPRAW_OFPT10_QUEUE_GET_CONFIG_REQUEST: reply = ofpraw_alloc_reply(OFPRAW_OFPT10_QUEUE_GET_CONFIG_REPLY, request, 0); struct ofp10_queue_get_config_reply *qgcr10 = ofpbuf_put_zeros(reply, sizeof *qgcr10); qgcr10->port = htons(ofp_to_u16(port)); break; case OFPRAW_OFPT11_QUEUE_GET_CONFIG_REQUEST: reply = ofpraw_alloc_reply(OFPRAW_OFPT11_QUEUE_GET_CONFIG_REPLY, request, 0); struct ofp11_queue_get_config_reply *qgcr11 = ofpbuf_put_zeros(reply, sizeof *qgcr11); qgcr11->port = ofputil_port_to_ofp11(port); break; case OFPRAW_OFPST14_QUEUE_DESC_REQUEST: reply = ofpraw_alloc_stats_reply(request, 0); break; default: OVS_NOT_REACHED(); } ovs_list_init(replies); ovs_list_push_back(replies, &reply->list_node); } static void put_ofp10_queue_rate(struct ofpbuf *reply, enum ofp10_queue_properties property, uint16_t rate) { if (rate != UINT16_MAX) { struct ofp10_queue_prop_rate *oqpr; oqpr = ofpbuf_put_zeros(reply, sizeof *oqpr); oqpr->prop_header.property = htons(property); oqpr->prop_header.len = htons(sizeof *oqpr); oqpr->rate = htons(rate); } } static void put_ofp14_queue_rate(struct ofpbuf *reply, enum ofp14_queue_desc_prop_type type, uint16_t rate) { if (rate != UINT16_MAX) { ofpprop_put_u16(reply, type, rate); } } void ofputil_append_queue_get_config_reply(const struct ofputil_queue_config *qc, struct ovs_list *replies) { enum ofp_version ofp_version = ofpmp_version(replies); struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = reply->size; size_t len_ofs; ovs_be16 *len; if (ofp_version < OFP14_VERSION) { if (ofp_version < OFP12_VERSION) { struct ofp10_packet_queue *opq10; opq10 = ofpbuf_put_zeros(reply, sizeof *opq10); opq10->queue_id = htonl(qc->queue); len_ofs = (char *) &opq10->len - (char *) reply->data; } else { struct ofp12_packet_queue *opq12; opq12 = ofpbuf_put_zeros(reply, sizeof *opq12); opq12->port = ofputil_port_to_ofp11(qc->port); opq12->queue_id = htonl(qc->queue); len_ofs = (char *) &opq12->len - (char *) reply->data; } put_ofp10_queue_rate(reply, OFPQT10_MIN_RATE, qc->min_rate); put_ofp10_queue_rate(reply, OFPQT11_MAX_RATE, qc->max_rate); } else { struct ofp14_queue_desc *oqd = ofpbuf_put_zeros(reply, sizeof *oqd); oqd->port_no = ofputil_port_to_ofp11(qc->port); oqd->queue_id = htonl(qc->queue); len_ofs = (char *) &oqd->len - (char *) reply->data; put_ofp14_queue_rate(reply, OFPQDPT14_MIN_RATE, qc->min_rate); put_ofp14_queue_rate(reply, OFPQDPT14_MAX_RATE, qc->max_rate); } len = ofpbuf_at(reply, len_ofs, sizeof *len); *len = htons(reply->size - start_ofs); if (ofp_version >= OFP14_VERSION) { ofpmp_postappend(replies, start_ofs); } } static enum ofperr parse_ofp10_queue_rate(const struct ofp10_queue_prop_header *hdr, uint16_t *rate) { const struct ofp10_queue_prop_rate *oqpr; if (hdr->len == htons(sizeof *oqpr)) { oqpr = (const struct ofp10_queue_prop_rate *) hdr; *rate = ntohs(oqpr->rate); return 0; } else { return OFPERR_OFPBRC_BAD_LEN; } } static int ofputil_pull_queue_get_config_reply10(struct ofpbuf *msg, struct ofputil_queue_config *queue) { const struct ofp_header *oh = msg->header; unsigned int opq_len; /* Length of protocol-specific queue header. */ unsigned int len; /* Total length of queue + properties. */ /* Obtain the port number from the message header. */ if (oh->version == OFP10_VERSION) { const struct ofp10_queue_get_config_reply *oqgcr10 = msg->msg; queue->port = u16_to_ofp(ntohs(oqgcr10->port)); } else { const struct ofp11_queue_get_config_reply *oqgcr11 = msg->msg; enum ofperr error = ofputil_port_from_ofp11(oqgcr11->port, &queue->port); if (error) { return error; } } /* Pull off the queue header and get the queue number and length. */ if (oh->version < OFP12_VERSION) { const struct ofp10_packet_queue *opq10; opq10 = ofpbuf_try_pull(msg, sizeof *opq10); if (!opq10) { return OFPERR_OFPBRC_BAD_LEN; } queue->queue = ntohl(opq10->queue_id); len = ntohs(opq10->len); opq_len = sizeof *opq10; } else { const struct ofp12_packet_queue *opq12; opq12 = ofpbuf_try_pull(msg, sizeof *opq12); if (!opq12) { return OFPERR_OFPBRC_BAD_LEN; } queue->queue = ntohl(opq12->queue_id); len = ntohs(opq12->len); opq_len = sizeof *opq12; } /* Length check. */ if (len < opq_len || len > msg->size + opq_len || len % 8) { return OFPERR_OFPBRC_BAD_LEN; } len -= opq_len; /* Pull properties. The format of these properties differs from used in * OF1.4+ so we can't use the common property functions. */ while (len > 0) { const struct ofp10_queue_prop_header *hdr; unsigned int property; unsigned int prop_len; enum ofperr error = 0; hdr = ofpbuf_at_assert(msg, 0, sizeof *hdr); prop_len = ntohs(hdr->len); if (prop_len < sizeof *hdr || prop_len > len || prop_len % 8) { return OFPERR_OFPBRC_BAD_LEN; } property = ntohs(hdr->property); switch (property) { case OFPQT10_MIN_RATE: error = parse_ofp10_queue_rate(hdr, &queue->min_rate); break; case OFPQT11_MAX_RATE: error = parse_ofp10_queue_rate(hdr, &queue->max_rate); break; default: VLOG_INFO_RL(&bad_ofmsg_rl, "unknown queue property %u", property); break; } if (error) { return error; } ofpbuf_pull(msg, prop_len); len -= prop_len; } return 0; } static int ofputil_pull_queue_get_config_reply14(struct ofpbuf *msg, struct ofputil_queue_config *queue) { struct ofp14_queue_desc *oqd14 = ofpbuf_try_pull(msg, sizeof *oqd14); if (!oqd14) { return OFPERR_OFPBRC_BAD_LEN; } enum ofperr error = ofputil_port_from_ofp11(oqd14->port_no, &queue->port); if (error) { return error; } queue->queue = ntohl(oqd14->queue_id); /* Length check. */ unsigned int len = ntohs(oqd14->len); if (len < sizeof *oqd14 || len > msg->size + sizeof *oqd14 || len % 8) { return OFPERR_OFPBRC_BAD_LEN; } len -= sizeof *oqd14; struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); while (properties.size > 0) { struct ofpbuf payload; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPQDPT14_MIN_RATE: error = ofpprop_parse_u16(&payload, &queue->min_rate); break; case OFPQDPT14_MAX_RATE: error = ofpprop_parse_u16(&payload, &queue->max_rate); break; default: error = OFPPROP_UNKNOWN(true, "queue desc", type); break; } if (error) { return error; } } return 0; } /* Decodes information about a queue from the OFPT_QUEUE_GET_CONFIG_REPLY in * 'reply' and stores it in '*queue'. ofputil_decode_queue_get_config_reply() * must already have pulled off the main header. * * This function returns EOF if the last queue has already been decoded, 0 if a * queue was successfully decoded into '*queue', or an ofperr if there was a * problem decoding 'reply'. */ int ofputil_pull_queue_get_config_reply(struct ofpbuf *msg, struct ofputil_queue_config *queue) { enum ofpraw raw; if (!msg->header) { /* Pull OpenFlow header. */ raw = ofpraw_pull_assert(msg); /* Pull protocol-specific ofp_queue_get_config_reply header (OF1.4 * doesn't have one at all). */ if (raw == OFPRAW_OFPT10_QUEUE_GET_CONFIG_REPLY) { ofpbuf_pull(msg, sizeof(struct ofp10_queue_get_config_reply)); } else if (raw == OFPRAW_OFPT11_QUEUE_GET_CONFIG_REPLY) { ofpbuf_pull(msg, sizeof(struct ofp11_queue_get_config_reply)); } else { ovs_assert(raw == OFPRAW_OFPST14_QUEUE_DESC_REPLY); } } else { raw = ofpraw_decode_assert(msg->header); } queue->min_rate = UINT16_MAX; queue->max_rate = UINT16_MAX; if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST14_QUEUE_DESC_REPLY) { return ofputil_pull_queue_get_config_reply14(msg, queue); } else { return ofputil_pull_queue_get_config_reply10(msg, queue); } } /* Converts an OFPST_FLOW, OFPST_AGGREGATE, NXST_FLOW, or NXST_AGGREGATE * request 'oh', into an abstract flow_stats_request in 'fsr'. Returns 0 if * successful, otherwise an OpenFlow error code. * * 'vl_mff_map' is an optional parameter that is used to validate the length * of variable length mf_fields in 'match'. If it is not provided, the * default mf_fields with maximum length will be used. */ enum ofperr ofputil_decode_flow_stats_request(struct ofputil_flow_stats_request *fsr, const struct ofp_header *oh, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); switch ((int) raw) { case OFPRAW_OFPST10_FLOW_REQUEST: return ofputil_decode_ofpst10_flow_request(fsr, b.data, false); case OFPRAW_OFPST10_AGGREGATE_REQUEST: return ofputil_decode_ofpst10_flow_request(fsr, b.data, true); case OFPRAW_OFPST11_FLOW_REQUEST: return ofputil_decode_ofpst11_flow_request(fsr, &b, false, tun_table, vl_mff_map); case OFPRAW_OFPST11_AGGREGATE_REQUEST: return ofputil_decode_ofpst11_flow_request(fsr, &b, true, tun_table, vl_mff_map); case OFPRAW_NXST_FLOW_REQUEST: return ofputil_decode_nxst_flow_request(fsr, &b, false, tun_table, vl_mff_map); case OFPRAW_NXST_AGGREGATE_REQUEST: return ofputil_decode_nxst_flow_request(fsr, &b, true, tun_table, vl_mff_map); default: /* Hey, the caller lied. */ OVS_NOT_REACHED(); } } /* Converts abstract flow_stats_request 'fsr' into an OFPST_FLOW, * OFPST_AGGREGATE, NXST_FLOW, or NXST_AGGREGATE request 'oh' according to * 'protocol', and returns the message. */ struct ofpbuf * ofputil_encode_flow_stats_request(const struct ofputil_flow_stats_request *fsr, enum ofputil_protocol protocol) { struct ofpbuf *msg; enum ofpraw raw; switch (protocol) { case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: { struct ofp11_flow_stats_request *ofsr; raw = (fsr->aggregate ? OFPRAW_OFPST11_AGGREGATE_REQUEST : OFPRAW_OFPST11_FLOW_REQUEST); msg = ofpraw_alloc(raw, ofputil_protocol_to_ofp_version(protocol), ofputil_match_typical_len(protocol)); ofsr = ofpbuf_put_zeros(msg, sizeof *ofsr); ofsr->table_id = fsr->table_id; ofsr->out_port = ofputil_port_to_ofp11(fsr->out_port); ofsr->out_group = htonl(fsr->out_group); ofsr->cookie = fsr->cookie; ofsr->cookie_mask = fsr->cookie_mask; ofputil_put_ofp11_match(msg, &fsr->match, protocol); break; } case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: { struct ofp10_flow_stats_request *ofsr; raw = (fsr->aggregate ? OFPRAW_OFPST10_AGGREGATE_REQUEST : OFPRAW_OFPST10_FLOW_REQUEST); msg = ofpraw_alloc(raw, OFP10_VERSION, 0); ofsr = ofpbuf_put_zeros(msg, sizeof *ofsr); ofputil_match_to_ofp10_match(&fsr->match, &ofsr->match); ofsr->table_id = fsr->table_id; ofsr->out_port = htons(ofp_to_u16(fsr->out_port)); break; } case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: { struct nx_flow_stats_request *nfsr; int match_len; raw = (fsr->aggregate ? OFPRAW_NXST_AGGREGATE_REQUEST : OFPRAW_NXST_FLOW_REQUEST); msg = ofpraw_alloc(raw, OFP10_VERSION, NXM_TYPICAL_LEN); ofpbuf_put_zeros(msg, sizeof *nfsr); match_len = nx_put_match(msg, &fsr->match, fsr->cookie, fsr->cookie_mask); nfsr = msg->msg; nfsr->out_port = htons(ofp_to_u16(fsr->out_port)); nfsr->match_len = htons(match_len); nfsr->table_id = fsr->table_id; break; } default: OVS_NOT_REACHED(); } return msg; } /* Converts an OFPST_FLOW or NXST_FLOW reply in 'msg' into an abstract * ofputil_flow_stats in 'fs'. * * Multiple OFPST_FLOW or NXST_FLOW replies can be packed into a single * OpenFlow message. Calling this function multiple times for a single 'msg' * iterates through the replies. The caller must initially leave 'msg''s layer * pointers null and not modify them between calls. * * Most switches don't send the values needed to populate fs->idle_age and * fs->hard_age, so those members will usually be set to 0. If the switch from * which 'msg' originated is known to implement NXT_FLOW_AGE, then pass * 'flow_age_extension' as true so that the contents of 'msg' determine the * 'idle_age' and 'hard_age' members in 'fs'. * * Uses 'ofpacts' to store the abstract OFPACT_* version of the flow stats * reply's actions. The caller must initialize 'ofpacts' and retains ownership * of it. 'fs->ofpacts' will point into the 'ofpacts' buffer. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_flow_stats_reply(struct ofputil_flow_stats *fs, struct ofpbuf *msg, bool flow_age_extension, struct ofpbuf *ofpacts) { const struct ofp_header *oh; size_t instructions_len; enum ofperr error; enum ofpraw raw; error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } oh = msg->header; if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST11_FLOW_REPLY || raw == OFPRAW_OFPST13_FLOW_REPLY) { const struct ofp11_flow_stats *ofs; size_t length; uint16_t padded_match_len; ofs = ofpbuf_try_pull(msg, sizeof *ofs); if (!ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply has %"PRIu32" leftover " "bytes at end", msg->size); return EINVAL; } length = ntohs(ofs->length); if (length < sizeof *ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply claims invalid " "length %"PRIuSIZE, length); return EINVAL; } if (ofputil_pull_ofp11_match(msg, NULL, NULL, &fs->match, &padded_match_len)) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply bad match"); return EINVAL; } instructions_len = length - sizeof *ofs - padded_match_len; fs->priority = ntohs(ofs->priority); fs->table_id = ofs->table_id; fs->duration_sec = ntohl(ofs->duration_sec); fs->duration_nsec = ntohl(ofs->duration_nsec); fs->idle_timeout = ntohs(ofs->idle_timeout); fs->hard_timeout = ntohs(ofs->hard_timeout); if (oh->version >= OFP14_VERSION) { fs->importance = ntohs(ofs->importance); } else { fs->importance = 0; } if (raw == OFPRAW_OFPST13_FLOW_REPLY) { error = ofputil_decode_flow_mod_flags(ofs->flags, -1, oh->version, &fs->flags); if (error) { return error; } } else { fs->flags = 0; } fs->idle_age = -1; fs->hard_age = -1; fs->cookie = ofs->cookie; fs->packet_count = ntohll(ofs->packet_count); fs->byte_count = ntohll(ofs->byte_count); } else if (raw == OFPRAW_OFPST10_FLOW_REPLY) { const struct ofp10_flow_stats *ofs; size_t length; ofs = ofpbuf_try_pull(msg, sizeof *ofs); if (!ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply has %"PRIu32" leftover " "bytes at end", msg->size); return EINVAL; } length = ntohs(ofs->length); if (length < sizeof *ofs) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply claims invalid " "length %"PRIuSIZE, length); return EINVAL; } instructions_len = length - sizeof *ofs; fs->cookie = get_32aligned_be64(&ofs->cookie); ofputil_match_from_ofp10_match(&ofs->match, &fs->match); fs->priority = ntohs(ofs->priority); fs->table_id = ofs->table_id; fs->duration_sec = ntohl(ofs->duration_sec); fs->duration_nsec = ntohl(ofs->duration_nsec); fs->idle_timeout = ntohs(ofs->idle_timeout); fs->hard_timeout = ntohs(ofs->hard_timeout); fs->importance = 0; fs->idle_age = -1; fs->hard_age = -1; fs->packet_count = ntohll(get_32aligned_be64(&ofs->packet_count)); fs->byte_count = ntohll(get_32aligned_be64(&ofs->byte_count)); fs->flags = 0; } else if (raw == OFPRAW_NXST_FLOW_REPLY) { const struct nx_flow_stats *nfs; size_t match_len, length; nfs = ofpbuf_try_pull(msg, sizeof *nfs); if (!nfs) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW reply has %"PRIu32" leftover " "bytes at end", msg->size); return EINVAL; } length = ntohs(nfs->length); match_len = ntohs(nfs->match_len); if (length < sizeof *nfs + ROUND_UP(match_len, 8)) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW reply with match_len=%"PRIuSIZE" " "claims invalid length %"PRIuSIZE, match_len, length); return EINVAL; } if (nx_pull_match(msg, match_len, &fs->match, NULL, NULL, NULL, NULL)) { return EINVAL; } instructions_len = length - sizeof *nfs - ROUND_UP(match_len, 8); fs->cookie = nfs->cookie; fs->table_id = nfs->table_id; fs->duration_sec = ntohl(nfs->duration_sec); fs->duration_nsec = ntohl(nfs->duration_nsec); fs->priority = ntohs(nfs->priority); fs->idle_timeout = ntohs(nfs->idle_timeout); fs->hard_timeout = ntohs(nfs->hard_timeout); fs->importance = 0; fs->idle_age = -1; fs->hard_age = -1; if (flow_age_extension) { if (nfs->idle_age) { fs->idle_age = ntohs(nfs->idle_age) - 1; } if (nfs->hard_age) { fs->hard_age = ntohs(nfs->hard_age) - 1; } } fs->packet_count = ntohll(nfs->packet_count); fs->byte_count = ntohll(nfs->byte_count); fs->flags = 0; } else { OVS_NOT_REACHED(); } if (ofpacts_pull_openflow_instructions(msg, instructions_len, oh->version, NULL, NULL, ofpacts)) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_FLOW reply bad instructions"); return EINVAL; } fs->ofpacts = ofpacts->data; fs->ofpacts_len = ofpacts->size; return 0; } /* Returns 'count' unchanged except that UINT64_MAX becomes 0. * * We use this in situations where OVS internally uses UINT64_MAX to mean * "value unknown" but OpenFlow 1.0 does not define any unknown value. */ static uint64_t unknown_to_zero(uint64_t count) { return count != UINT64_MAX ? count : 0; } /* Appends an OFPST_FLOW or NXST_FLOW reply that contains the data in 'fs' to * those already present in the list of ofpbufs in 'replies'. 'replies' should * have been initialized with ofpmp_init(). */ void ofputil_append_flow_stats_reply(const struct ofputil_flow_stats *fs, struct ovs_list *replies, const struct tun_table *tun_table) { struct ofputil_flow_stats *fs_ = CONST_CAST(struct ofputil_flow_stats *, fs); const struct tun_table *orig_tun_table; struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = reply->size; enum ofp_version version = ofpmp_version(replies); enum ofpraw raw = ofpmp_decode_raw(replies); orig_tun_table = fs->match.flow.tunnel.metadata.tab; fs_->match.flow.tunnel.metadata.tab = tun_table; if (raw == OFPRAW_OFPST11_FLOW_REPLY || raw == OFPRAW_OFPST13_FLOW_REPLY) { struct ofp11_flow_stats *ofs; ofpbuf_put_uninit(reply, sizeof *ofs); oxm_put_match(reply, &fs->match, version); ofpacts_put_openflow_instructions(fs->ofpacts, fs->ofpacts_len, reply, version); ofs = ofpbuf_at_assert(reply, start_ofs, sizeof *ofs); ofs->length = htons(reply->size - start_ofs); ofs->table_id = fs->table_id; ofs->pad = 0; ofs->duration_sec = htonl(fs->duration_sec); ofs->duration_nsec = htonl(fs->duration_nsec); ofs->priority = htons(fs->priority); ofs->idle_timeout = htons(fs->idle_timeout); ofs->hard_timeout = htons(fs->hard_timeout); if (version >= OFP14_VERSION) { ofs->importance = htons(fs->importance); } else { ofs->importance = 0; } if (raw == OFPRAW_OFPST13_FLOW_REPLY) { ofs->flags = ofputil_encode_flow_mod_flags(fs->flags, version); } else { ofs->flags = 0; } memset(ofs->pad2, 0, sizeof ofs->pad2); ofs->cookie = fs->cookie; ofs->packet_count = htonll(unknown_to_zero(fs->packet_count)); ofs->byte_count = htonll(unknown_to_zero(fs->byte_count)); } else if (raw == OFPRAW_OFPST10_FLOW_REPLY) { struct ofp10_flow_stats *ofs; ofpbuf_put_uninit(reply, sizeof *ofs); ofpacts_put_openflow_actions(fs->ofpacts, fs->ofpacts_len, reply, version); ofs = ofpbuf_at_assert(reply, start_ofs, sizeof *ofs); ofs->length = htons(reply->size - start_ofs); ofs->table_id = fs->table_id; ofs->pad = 0; ofputil_match_to_ofp10_match(&fs->match, &ofs->match); ofs->duration_sec = htonl(fs->duration_sec); ofs->duration_nsec = htonl(fs->duration_nsec); ofs->priority = htons(fs->priority); ofs->idle_timeout = htons(fs->idle_timeout); ofs->hard_timeout = htons(fs->hard_timeout); memset(ofs->pad2, 0, sizeof ofs->pad2); put_32aligned_be64(&ofs->cookie, fs->cookie); put_32aligned_be64(&ofs->packet_count, htonll(unknown_to_zero(fs->packet_count))); put_32aligned_be64(&ofs->byte_count, htonll(unknown_to_zero(fs->byte_count))); } else if (raw == OFPRAW_NXST_FLOW_REPLY) { struct nx_flow_stats *nfs; int match_len; ofpbuf_put_uninit(reply, sizeof *nfs); match_len = nx_put_match(reply, &fs->match, 0, 0); ofpacts_put_openflow_actions(fs->ofpacts, fs->ofpacts_len, reply, version); nfs = ofpbuf_at_assert(reply, start_ofs, sizeof *nfs); nfs->length = htons(reply->size - start_ofs); nfs->table_id = fs->table_id; nfs->pad = 0; nfs->duration_sec = htonl(fs->duration_sec); nfs->duration_nsec = htonl(fs->duration_nsec); nfs->priority = htons(fs->priority); nfs->idle_timeout = htons(fs->idle_timeout); nfs->hard_timeout = htons(fs->hard_timeout); nfs->idle_age = htons(fs->idle_age < 0 ? 0 : fs->idle_age < UINT16_MAX ? fs->idle_age + 1 : UINT16_MAX); nfs->hard_age = htons(fs->hard_age < 0 ? 0 : fs->hard_age < UINT16_MAX ? fs->hard_age + 1 : UINT16_MAX); nfs->match_len = htons(match_len); nfs->cookie = fs->cookie; nfs->packet_count = htonll(fs->packet_count); nfs->byte_count = htonll(fs->byte_count); } else { OVS_NOT_REACHED(); } ofpmp_postappend(replies, start_ofs); fs_->match.flow.tunnel.metadata.tab = orig_tun_table; } /* Converts abstract ofputil_aggregate_stats 'stats' into an OFPST_AGGREGATE or * NXST_AGGREGATE reply matching 'request', and returns the message. */ struct ofpbuf * ofputil_encode_aggregate_stats_reply( const struct ofputil_aggregate_stats *stats, const struct ofp_header *request) { struct ofp_aggregate_stats_reply *asr; uint64_t packet_count; uint64_t byte_count; struct ofpbuf *msg; enum ofpraw raw; ofpraw_decode(&raw, request); if (raw == OFPRAW_OFPST10_AGGREGATE_REQUEST) { packet_count = unknown_to_zero(stats->packet_count); byte_count = unknown_to_zero(stats->byte_count); } else { packet_count = stats->packet_count; byte_count = stats->byte_count; } msg = ofpraw_alloc_stats_reply(request, 0); asr = ofpbuf_put_zeros(msg, sizeof *asr); put_32aligned_be64(&asr->packet_count, htonll(packet_count)); put_32aligned_be64(&asr->byte_count, htonll(byte_count)); asr->flow_count = htonl(stats->flow_count); return msg; } enum ofperr ofputil_decode_aggregate_stats_reply(struct ofputil_aggregate_stats *stats, const struct ofp_header *reply) { struct ofpbuf msg = ofpbuf_const_initializer(reply, ntohs(reply->length)); ofpraw_pull_assert(&msg); struct ofp_aggregate_stats_reply *asr = msg.msg; stats->packet_count = ntohll(get_32aligned_be64(&asr->packet_count)); stats->byte_count = ntohll(get_32aligned_be64(&asr->byte_count)); stats->flow_count = ntohl(asr->flow_count); return 0; } /* Converts an OFPT_FLOW_REMOVED or NXT_FLOW_REMOVED message 'oh' into an * abstract ofputil_flow_removed in 'fr'. Returns 0 if successful, otherwise * an OpenFlow error code. */ enum ofperr ofputil_decode_flow_removed(struct ofputil_flow_removed *fr, const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT11_FLOW_REMOVED) { const struct ofp12_flow_removed *ofr; enum ofperr error; ofr = ofpbuf_pull(&b, sizeof *ofr); error = ofputil_pull_ofp11_match(&b, NULL, NULL, &fr->match, NULL); if (error) { return error; } fr->priority = ntohs(ofr->priority); fr->cookie = ofr->cookie; fr->reason = ofr->reason; fr->table_id = ofr->table_id; fr->duration_sec = ntohl(ofr->duration_sec); fr->duration_nsec = ntohl(ofr->duration_nsec); fr->idle_timeout = ntohs(ofr->idle_timeout); fr->hard_timeout = ntohs(ofr->hard_timeout); fr->packet_count = ntohll(ofr->packet_count); fr->byte_count = ntohll(ofr->byte_count); } else if (raw == OFPRAW_OFPT10_FLOW_REMOVED) { const struct ofp10_flow_removed *ofr; ofr = ofpbuf_pull(&b, sizeof *ofr); ofputil_match_from_ofp10_match(&ofr->match, &fr->match); fr->priority = ntohs(ofr->priority); fr->cookie = ofr->cookie; fr->reason = ofr->reason; fr->table_id = 255; fr->duration_sec = ntohl(ofr->duration_sec); fr->duration_nsec = ntohl(ofr->duration_nsec); fr->idle_timeout = ntohs(ofr->idle_timeout); fr->hard_timeout = 0; fr->packet_count = ntohll(ofr->packet_count); fr->byte_count = ntohll(ofr->byte_count); } else if (raw == OFPRAW_NXT_FLOW_REMOVED) { struct nx_flow_removed *nfr; enum ofperr error; nfr = ofpbuf_pull(&b, sizeof *nfr); error = nx_pull_match(&b, ntohs(nfr->match_len), &fr->match, NULL, NULL, NULL, NULL); if (error) { return error; } if (b.size) { return OFPERR_OFPBRC_BAD_LEN; } fr->priority = ntohs(nfr->priority); fr->cookie = nfr->cookie; fr->reason = nfr->reason; fr->table_id = nfr->table_id ? nfr->table_id - 1 : 255; fr->duration_sec = ntohl(nfr->duration_sec); fr->duration_nsec = ntohl(nfr->duration_nsec); fr->idle_timeout = ntohs(nfr->idle_timeout); fr->hard_timeout = 0; fr->packet_count = ntohll(nfr->packet_count); fr->byte_count = ntohll(nfr->byte_count); } else { OVS_NOT_REACHED(); } return 0; } /* Converts abstract ofputil_flow_removed 'fr' into an OFPT_FLOW_REMOVED or * NXT_FLOW_REMOVED message 'oh' according to 'protocol', and returns the * message. */ struct ofpbuf * ofputil_encode_flow_removed(const struct ofputil_flow_removed *fr, enum ofputil_protocol protocol) { struct ofpbuf *msg; enum ofp_flow_removed_reason reason = fr->reason; if (reason == OFPRR_METER_DELETE && !(protocol & OFPUTIL_P_OF14_UP)) { reason = OFPRR_DELETE; } switch (protocol) { case OFPUTIL_P_OF11_STD: case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: { struct ofp12_flow_removed *ofr; msg = ofpraw_alloc_xid(OFPRAW_OFPT11_FLOW_REMOVED, ofputil_protocol_to_ofp_version(protocol), htonl(0), ofputil_match_typical_len(protocol)); ofr = ofpbuf_put_zeros(msg, sizeof *ofr); ofr->cookie = fr->cookie; ofr->priority = htons(fr->priority); ofr->reason = reason; ofr->table_id = fr->table_id; ofr->duration_sec = htonl(fr->duration_sec); ofr->duration_nsec = htonl(fr->duration_nsec); ofr->idle_timeout = htons(fr->idle_timeout); ofr->hard_timeout = htons(fr->hard_timeout); ofr->packet_count = htonll(fr->packet_count); ofr->byte_count = htonll(fr->byte_count); ofputil_put_ofp11_match(msg, &fr->match, protocol); break; } case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: { struct ofp10_flow_removed *ofr; msg = ofpraw_alloc_xid(OFPRAW_OFPT10_FLOW_REMOVED, OFP10_VERSION, htonl(0), 0); ofr = ofpbuf_put_zeros(msg, sizeof *ofr); ofputil_match_to_ofp10_match(&fr->match, &ofr->match); ofr->cookie = fr->cookie; ofr->priority = htons(fr->priority); ofr->reason = reason; ofr->duration_sec = htonl(fr->duration_sec); ofr->duration_nsec = htonl(fr->duration_nsec); ofr->idle_timeout = htons(fr->idle_timeout); ofr->packet_count = htonll(unknown_to_zero(fr->packet_count)); ofr->byte_count = htonll(unknown_to_zero(fr->byte_count)); break; } case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: { struct nx_flow_removed *nfr; int match_len; msg = ofpraw_alloc_xid(OFPRAW_NXT_FLOW_REMOVED, OFP10_VERSION, htonl(0), NXM_TYPICAL_LEN); ofpbuf_put_zeros(msg, sizeof *nfr); match_len = nx_put_match(msg, &fr->match, 0, 0); nfr = msg->msg; nfr->cookie = fr->cookie; nfr->priority = htons(fr->priority); nfr->reason = reason; nfr->table_id = fr->table_id + 1; nfr->duration_sec = htonl(fr->duration_sec); nfr->duration_nsec = htonl(fr->duration_nsec); nfr->idle_timeout = htons(fr->idle_timeout); nfr->match_len = htons(match_len); nfr->packet_count = htonll(fr->packet_count); nfr->byte_count = htonll(fr->byte_count); break; } default: OVS_NOT_REACHED(); } return msg; } /* The caller has done basic initialization of '*pin'; the other output * arguments needs to be initialized. */ static enum ofperr decode_nx_packet_in2(const struct ofp_header *oh, bool loose, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofputil_packet_in *pin, size_t *total_len, uint32_t *buffer_id, struct ofpbuf *continuation) { *total_len = 0; *buffer_id = UINT32_MAX; struct ofpbuf properties; ofpbuf_use_const(&properties, oh, ntohs(oh->length)); ofpraw_pull_assert(&properties); while (properties.size > 0) { struct ofpbuf payload; uint64_t type; enum ofperr error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case NXPINT_PACKET: pin->packet = payload.msg; pin->packet_len = ofpbuf_msgsize(&payload); break; case NXPINT_FULL_LEN: { uint32_t u32; error = ofpprop_parse_u32(&payload, &u32); *total_len = u32; break; } case NXPINT_BUFFER_ID: error = ofpprop_parse_u32(&payload, buffer_id); break; case NXPINT_TABLE_ID: error = ofpprop_parse_u8(&payload, &pin->table_id); break; case NXPINT_COOKIE: error = ofpprop_parse_be64(&payload, &pin->cookie); break; case NXPINT_REASON: { uint8_t reason; error = ofpprop_parse_u8(&payload, &reason); pin->reason = reason; break; } case NXPINT_METADATA: error = oxm_decode_match(payload.msg, ofpbuf_msgsize(&payload), loose, tun_table, vl_mff_map, &pin->flow_metadata); break; case NXPINT_USERDATA: pin->userdata = payload.msg; pin->userdata_len = ofpbuf_msgsize(&payload); break; case NXPINT_CONTINUATION: if (continuation) { error = ofpprop_parse_nested(&payload, continuation); } break; default: error = OFPPROP_UNKNOWN(loose, "NX_PACKET_IN2", type); break; } if (error) { return error; } } if (!pin->packet_len) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXT_PACKET_IN2 lacks packet"); return OFPERR_OFPBRC_BAD_LEN; } else if (!*total_len) { *total_len = pin->packet_len; } else if (*total_len < pin->packet_len) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXT_PACKET_IN2 claimed full_len < len"); return OFPERR_OFPBRC_BAD_LEN; } return 0; } /* Decodes the packet-in message starting at 'oh' into '*pin'. Populates * 'pin->packet' and 'pin->packet_len' with the part of the packet actually * included in the message. If 'total_lenp' is nonnull, populates * '*total_lenp' with the original length of the packet (which is larger than * 'packet->len' if only part of the packet was included). If 'buffer_idp' is * nonnull, stores the packet's buffer ID in '*buffer_idp' (UINT32_MAX if it * was not buffered). * * Populates 'continuation', if nonnull, with the continuation data from the * packet-in (an empty buffer, if 'oh' did not contain continuation data). The * format of this data is supposed to be opaque to anything other than * ovs-vswitchd, so that in any other process the only reasonable use of this * data is to be copied into an NXT_RESUME message via ofputil_encode_resume(). * * This function points 'pin->packet' into 'oh', so the caller should not free * it separately from the original OpenFlow message. This is also true for * 'pin->userdata' (which could also end up NULL if there is no userdata). * * 'vl_mff_map' is an optional parameter that is used to validate the length * of variable length mf_fields in 'match'. If it is not provided, the * default mf_fields with maximum length will be used. * * Returns 0 if successful, otherwise an OpenFlow error code. */ enum ofperr ofputil_decode_packet_in(const struct ofp_header *oh, bool loose, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofputil_packet_in *pin, size_t *total_lenp, uint32_t *buffer_idp, struct ofpbuf *continuation) { uint32_t buffer_id; size_t total_len; memset(pin, 0, sizeof *pin); pin->cookie = OVS_BE64_MAX; if (continuation) { ofpbuf_use_const(continuation, NULL, 0); } struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT13_PACKET_IN || raw == OFPRAW_OFPT12_PACKET_IN) { const struct ofp12_packet_in *opi = ofpbuf_pull(&b, sizeof *opi); const ovs_be64 *cookie = (raw == OFPRAW_OFPT13_PACKET_IN ? ofpbuf_pull(&b, sizeof *cookie) : NULL); enum ofperr error = oxm_pull_match_loose(&b, tun_table, &pin->flow_metadata); if (error) { return error; } if (!ofpbuf_try_pull(&b, 2)) { return OFPERR_OFPBRC_BAD_LEN; } pin->reason = opi->reason; pin->table_id = opi->table_id; buffer_id = ntohl(opi->buffer_id); total_len = ntohs(opi->total_len); if (cookie) { pin->cookie = *cookie; } pin->packet = b.data; pin->packet_len = b.size; } else if (raw == OFPRAW_OFPT10_PACKET_IN) { const struct ofp10_packet_in *opi; opi = ofpbuf_pull(&b, offsetof(struct ofp10_packet_in, data)); pin->packet = CONST_CAST(uint8_t *, opi->data); pin->packet_len = b.size; match_init_catchall(&pin->flow_metadata); match_set_in_port(&pin->flow_metadata, u16_to_ofp(ntohs(opi->in_port))); pin->reason = opi->reason; buffer_id = ntohl(opi->buffer_id); total_len = ntohs(opi->total_len); } else if (raw == OFPRAW_OFPT11_PACKET_IN) { const struct ofp11_packet_in *opi; ofp_port_t in_port; enum ofperr error; opi = ofpbuf_pull(&b, sizeof *opi); pin->packet = b.data; pin->packet_len = b.size; buffer_id = ntohl(opi->buffer_id); error = ofputil_port_from_ofp11(opi->in_port, &in_port); if (error) { return error; } match_init_catchall(&pin->flow_metadata); match_set_in_port(&pin->flow_metadata, in_port); total_len = ntohs(opi->total_len); pin->reason = opi->reason; pin->table_id = opi->table_id; } else if (raw == OFPRAW_NXT_PACKET_IN) { const struct nx_packet_in *npi; int error; npi = ofpbuf_pull(&b, sizeof *npi); error = nx_pull_match_loose(&b, ntohs(npi->match_len), &pin->flow_metadata, NULL, NULL, NULL); if (error) { return error; } if (!ofpbuf_try_pull(&b, 2)) { return OFPERR_OFPBRC_BAD_LEN; } pin->reason = npi->reason; pin->table_id = npi->table_id; pin->cookie = npi->cookie; buffer_id = ntohl(npi->buffer_id); total_len = ntohs(npi->total_len); pin->packet = b.data; pin->packet_len = b.size; } else if (raw == OFPRAW_NXT_PACKET_IN2 || raw == OFPRAW_NXT_RESUME) { enum ofperr error = decode_nx_packet_in2(oh, loose, tun_table, vl_mff_map, pin, &total_len, &buffer_id, continuation); if (error) { return error; } } else { OVS_NOT_REACHED(); } if (total_lenp) { *total_lenp = total_len; } if (buffer_idp) { *buffer_idp = buffer_id; } return 0; } static int encode_packet_in_reason(enum ofp_packet_in_reason reason, enum ofp_version version) { switch (reason) { case OFPR_NO_MATCH: case OFPR_ACTION: case OFPR_INVALID_TTL: return reason; case OFPR_ACTION_SET: case OFPR_GROUP: case OFPR_PACKET_OUT: return version < OFP14_VERSION ? OFPR_ACTION : reason; case OFPR_EXPLICIT_MISS: return version < OFP13_VERSION ? OFPR_ACTION : OFPR_NO_MATCH; case OFPR_IMPLICIT_MISS: return OFPR_NO_MATCH; case OFPR_N_REASONS: default: OVS_NOT_REACHED(); } } /* Only NXT_PACKET_IN2 (not NXT_RESUME) should include NXCPT_USERDATA, so this * function omits it. The caller can add it itself if desired. */ static void ofputil_put_packet_in(const struct ofputil_packet_in *pin, enum ofp_version version, size_t include_bytes, struct ofpbuf *msg) { /* Add packet properties. */ ofpprop_put(msg, NXPINT_PACKET, pin->packet, include_bytes); if (include_bytes != pin->packet_len) { ofpprop_put_u32(msg, NXPINT_FULL_LEN, pin->packet_len); } /* Add flow properties. */ ofpprop_put_u8(msg, NXPINT_TABLE_ID, pin->table_id); if (pin->cookie != OVS_BE64_MAX) { ofpprop_put_be64(msg, NXPINT_COOKIE, pin->cookie); } /* Add other properties. */ ofpprop_put_u8(msg, NXPINT_REASON, encode_packet_in_reason(pin->reason, version)); size_t start = ofpprop_start(msg, NXPINT_METADATA); oxm_put_raw(msg, &pin->flow_metadata, version); ofpprop_end(msg, start); } static void put_actions_property(struct ofpbuf *msg, uint64_t prop_type, enum ofp_version version, const struct ofpact *actions, size_t actions_len) { if (actions_len) { size_t start = ofpprop_start_nested(msg, prop_type); ofpacts_put_openflow_actions(actions, actions_len, msg, version); ofpprop_end(msg, start); } } enum nx_continuation_prop_type { NXCPT_BRIDGE = 0x8000, NXCPT_STACK, NXCPT_MIRRORS, NXCPT_CONNTRACKED, NXCPT_TABLE_ID, NXCPT_COOKIE, NXCPT_ACTIONS, NXCPT_ACTION_SET, }; /* Only NXT_PACKET_IN2 (not NXT_RESUME) should include NXCPT_USERDATA, so this * function omits it. The caller can add it itself if desired. */ static void ofputil_put_packet_in_private(const struct ofputil_packet_in_private *pin, enum ofp_version version, size_t include_bytes, struct ofpbuf *msg) { ofputil_put_packet_in(&pin->public, version, include_bytes, msg); size_t continuation_ofs = ofpprop_start_nested(msg, NXPINT_CONTINUATION); size_t inner_ofs = msg->size; if (!uuid_is_zero(&pin->bridge)) { ofpprop_put_uuid(msg, NXCPT_BRIDGE, &pin->bridge); } struct ofpbuf pin_stack; ofpbuf_use_const(&pin_stack, pin->stack, pin->stack_size); while (pin_stack.size) { uint8_t len; uint8_t *val = nx_stack_pop(&pin_stack, &len); ofpprop_put(msg, NXCPT_STACK, val, len); } if (pin->mirrors) { ofpprop_put_u32(msg, NXCPT_MIRRORS, pin->mirrors); } if (pin->conntracked) { ofpprop_put_flag(msg, NXCPT_CONNTRACKED); } if (pin->actions_len) { /* Divide 'pin->actions' into groups that begins with an * unroll_xlate action. For each group, emit a NXCPT_TABLE_ID and * NXCPT_COOKIE property (if either has changed; each is initially * assumed 0), then a NXCPT_ACTIONS property with the grouped * actions. * * The alternative is to make OFPACT_UNROLL_XLATE public. We can * always do that later, since this is a private property. */ const struct ofpact *const end = ofpact_end(pin->actions, pin->actions_len); const struct ofpact_unroll_xlate *unroll = NULL; uint8_t table_id = 0; ovs_be64 cookie = 0; const struct ofpact *a; for (a = pin->actions; ; a = ofpact_next(a)) { if (a == end || a->type == OFPACT_UNROLL_XLATE) { if (unroll) { if (table_id != unroll->rule_table_id) { ofpprop_put_u8(msg, NXCPT_TABLE_ID, unroll->rule_table_id); table_id = unroll->rule_table_id; } if (cookie != unroll->rule_cookie) { ofpprop_put_be64(msg, NXCPT_COOKIE, unroll->rule_cookie); cookie = unroll->rule_cookie; } } const struct ofpact *start = unroll ? ofpact_next(&unroll->ofpact) : pin->actions; put_actions_property(msg, NXCPT_ACTIONS, version, start, (a - start) * sizeof *a); if (a == end) { break; } unroll = ofpact_get_UNROLL_XLATE(a); } } } if (pin->action_set_len) { size_t start = ofpprop_start_nested(msg, NXCPT_ACTION_SET); ofpacts_put_openflow_actions(pin->action_set, pin->action_set_len, msg, version); ofpprop_end(msg, start); } if (msg->size > inner_ofs) { ofpprop_end(msg, continuation_ofs); } else { msg->size = continuation_ofs; } } static struct ofpbuf * ofputil_encode_ofp10_packet_in(const struct ofputil_packet_in *pin) { struct ofp10_packet_in *opi; struct ofpbuf *msg; msg = ofpraw_alloc_xid(OFPRAW_OFPT10_PACKET_IN, OFP10_VERSION, htonl(0), pin->packet_len); opi = ofpbuf_put_zeros(msg, offsetof(struct ofp10_packet_in, data)); opi->total_len = htons(pin->packet_len); opi->in_port = htons(ofp_to_u16(pin->flow_metadata.flow.in_port.ofp_port)); opi->reason = encode_packet_in_reason(pin->reason, OFP10_VERSION); opi->buffer_id = htonl(UINT32_MAX); return msg; } static struct ofpbuf * ofputil_encode_nx_packet_in(const struct ofputil_packet_in *pin, enum ofp_version version) { struct nx_packet_in *npi; struct ofpbuf *msg; size_t match_len; /* The final argument is just an estimate of the space required. */ msg = ofpraw_alloc_xid(OFPRAW_NXT_PACKET_IN, version, htonl(0), NXM_TYPICAL_LEN + 2 + pin->packet_len); ofpbuf_put_zeros(msg, sizeof *npi); match_len = nx_put_match(msg, &pin->flow_metadata, 0, 0); ofpbuf_put_zeros(msg, 2); npi = msg->msg; npi->buffer_id = htonl(UINT32_MAX); npi->total_len = htons(pin->packet_len); npi->reason = encode_packet_in_reason(pin->reason, version); npi->table_id = pin->table_id; npi->cookie = pin->cookie; npi->match_len = htons(match_len); return msg; } static struct ofpbuf * ofputil_encode_nx_packet_in2(const struct ofputil_packet_in_private *pin, enum ofp_version version, size_t include_bytes) { /* 'extra' is just an estimate of the space required. */ size_t extra = (pin->public.packet_len + NXM_TYPICAL_LEN /* flow_metadata */ + pin->stack_size * 4 + pin->actions_len + pin->action_set_len + 256); /* fudge factor */ struct ofpbuf *msg = ofpraw_alloc_xid(OFPRAW_NXT_PACKET_IN2, version, htonl(0), extra); ofputil_put_packet_in_private(pin, version, include_bytes, msg); if (pin->public.userdata_len) { ofpprop_put(msg, NXPINT_USERDATA, pin->public.userdata, pin->public.userdata_len); } ofpmsg_update_length(msg); return msg; } static struct ofpbuf * ofputil_encode_ofp11_packet_in(const struct ofputil_packet_in *pin) { struct ofp11_packet_in *opi; struct ofpbuf *msg; msg = ofpraw_alloc_xid(OFPRAW_OFPT11_PACKET_IN, OFP11_VERSION, htonl(0), pin->packet_len); opi = ofpbuf_put_zeros(msg, sizeof *opi); opi->buffer_id = htonl(UINT32_MAX); opi->in_port = ofputil_port_to_ofp11( pin->flow_metadata.flow.in_port.ofp_port); opi->in_phy_port = opi->in_port; opi->total_len = htons(pin->packet_len); opi->reason = encode_packet_in_reason(pin->reason, OFP11_VERSION); opi->table_id = pin->table_id; return msg; } static struct ofpbuf * ofputil_encode_ofp12_packet_in(const struct ofputil_packet_in *pin, enum ofp_version version) { enum ofpraw raw = (version >= OFP13_VERSION ? OFPRAW_OFPT13_PACKET_IN : OFPRAW_OFPT12_PACKET_IN); struct ofpbuf *msg; /* The final argument is just an estimate of the space required. */ msg = ofpraw_alloc_xid(raw, version, htonl(0), NXM_TYPICAL_LEN + 2 + pin->packet_len); struct ofp12_packet_in *opi = ofpbuf_put_zeros(msg, sizeof *opi); opi->buffer_id = htonl(UINT32_MAX); opi->total_len = htons(pin->packet_len); opi->reason = encode_packet_in_reason(pin->reason, version); opi->table_id = pin->table_id; if (version >= OFP13_VERSION) { ovs_be64 cookie = pin->cookie; ofpbuf_put(msg, &cookie, sizeof cookie); } oxm_put_match(msg, &pin->flow_metadata, version); ofpbuf_put_zeros(msg, 2); return msg; } /* Converts abstract ofputil_packet_in_private 'pin' into a PACKET_IN message * for 'protocol', using the packet-in format specified by 'packet_in_format'. * * This function is really meant only for use by ovs-vswitchd. To any other * code, the "continuation" data, i.e. the data that is in struct * ofputil_packet_in_private but not in struct ofputil_packet_in, is supposed * to be opaque (and it might change from one OVS version to another). Thus, * if any other code wants to encode a packet-in, it should use a non-"private" * version of this function. (Such a version doesn't currently exist because * only ovs-vswitchd currently wants to encode packet-ins. If you need one, * write it...) */ struct ofpbuf * ofputil_encode_packet_in_private(const struct ofputil_packet_in_private *pin, enum ofputil_protocol protocol, enum nx_packet_in_format packet_in_format) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *msg; switch (packet_in_format) { case NXPIF_STANDARD: switch (protocol) { case OFPUTIL_P_OF10_STD: case OFPUTIL_P_OF10_STD_TID: case OFPUTIL_P_OF10_NXM: case OFPUTIL_P_OF10_NXM_TID: msg = ofputil_encode_ofp10_packet_in(&pin->public); break; case OFPUTIL_P_OF11_STD: msg = ofputil_encode_ofp11_packet_in(&pin->public); break; case OFPUTIL_P_OF12_OXM: case OFPUTIL_P_OF13_OXM: case OFPUTIL_P_OF14_OXM: case OFPUTIL_P_OF15_OXM: case OFPUTIL_P_OF16_OXM: msg = ofputil_encode_ofp12_packet_in(&pin->public, version); break; default: OVS_NOT_REACHED(); } break; case NXPIF_NXT_PACKET_IN: msg = ofputil_encode_nx_packet_in(&pin->public, version); break; case NXPIF_NXT_PACKET_IN2: return ofputil_encode_nx_packet_in2(pin, version, pin->public.packet_len); default: OVS_NOT_REACHED(); } ofpbuf_put(msg, pin->public.packet, pin->public.packet_len); ofpmsg_update_length(msg); return msg; } /* Returns a string form of 'reason'. The return value is either a statically * allocated constant string or the 'bufsize'-byte buffer 'reasonbuf'. * 'bufsize' should be at least OFPUTIL_PACKET_IN_REASON_BUFSIZE. */ const char * ofputil_packet_in_reason_to_string(enum ofp_packet_in_reason reason, char *reasonbuf, size_t bufsize) { switch (reason) { case OFPR_NO_MATCH: return "no_match"; case OFPR_ACTION: return "action"; case OFPR_INVALID_TTL: return "invalid_ttl"; case OFPR_ACTION_SET: return "action_set"; case OFPR_GROUP: return "group"; case OFPR_PACKET_OUT: return "packet_out"; case OFPR_EXPLICIT_MISS: case OFPR_IMPLICIT_MISS: return ""; case OFPR_N_REASONS: default: snprintf(reasonbuf, bufsize, "%d", (int) reason); return reasonbuf; } } bool ofputil_packet_in_reason_from_string(const char *s, enum ofp_packet_in_reason *reason) { int i; for (i = 0; i < OFPR_N_REASONS; i++) { char reasonbuf[OFPUTIL_PACKET_IN_REASON_BUFSIZE]; const char *reason_s; reason_s = ofputil_packet_in_reason_to_string(i, reasonbuf, sizeof reasonbuf); if (!strcasecmp(s, reason_s)) { *reason = i; return true; } } return false; } /* Returns a newly allocated NXT_RESUME message for 'pin', with the given * 'continuation', for 'protocol'. This message is suitable for resuming the * pipeline traveral of the packet represented by 'pin', if sent to the switch * from which 'pin' was received. */ struct ofpbuf * ofputil_encode_resume(const struct ofputil_packet_in *pin, const struct ofpbuf *continuation, enum ofputil_protocol protocol) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); size_t extra = pin->packet_len + NXM_TYPICAL_LEN + continuation->size; struct ofpbuf *msg = ofpraw_alloc_xid(OFPRAW_NXT_RESUME, version, 0, extra); ofputil_put_packet_in(pin, version, pin->packet_len, msg); ofpprop_put_nested(msg, NXPINT_CONTINUATION, continuation); ofpmsg_update_length(msg); return msg; } static enum ofperr parse_stack_prop(const struct ofpbuf *property, struct ofpbuf *stack) { unsigned int len = ofpbuf_msgsize(property); if (len > sizeof(union mf_subvalue)) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXCPT_STACK property has bad length %u", len); return OFPERR_OFPBPC_BAD_LEN; } nx_stack_push_bottom(stack, property->msg, len); return 0; } static enum ofperr parse_actions_property(struct ofpbuf *property, enum ofp_version version, struct ofpbuf *ofpacts) { if (!ofpbuf_try_pull(property, ROUND_UP(ofpbuf_headersize(property), 8))) { VLOG_WARN_RL(&bad_ofmsg_rl, "actions property has bad length %"PRIu32, property->size); return OFPERR_OFPBPC_BAD_LEN; } return ofpacts_pull_openflow_actions(property, property->size, version, NULL, NULL, ofpacts); } /* This is like ofputil_decode_packet_in(), except that it decodes the * continuation data into 'pin'. The format of this data is supposed to be * opaque to any process other than ovs-vswitchd, so this function should not * be used outside ovs-vswitchd. * * 'vl_mff_map' is an optional parameter that is used to validate the length * of variable length mf_fields in 'match'. If it is not provided, the * default mf_fields with maximum length will be used. * * When successful, 'pin' contains some dynamically allocated data. Call * ofputil_packet_in_private_destroy() to free this data. */ enum ofperr ofputil_decode_packet_in_private(const struct ofp_header *oh, bool loose, const struct tun_table *tun_table, const struct vl_mff_map *vl_mff_map, struct ofputil_packet_in_private *pin, size_t *total_len, uint32_t *buffer_id) { memset(pin, 0, sizeof *pin); struct ofpbuf continuation; enum ofperr error; error = ofputil_decode_packet_in(oh, loose, tun_table, vl_mff_map, &pin->public, total_len, buffer_id, &continuation); if (error) { return error; } struct ofpbuf actions, action_set; ofpbuf_init(&actions, 0); ofpbuf_init(&action_set, 0); uint8_t table_id = 0; ovs_be64 cookie = 0; struct ofpbuf stack; ofpbuf_init(&stack, 0); while (continuation.size > 0) { struct ofpbuf payload; uint64_t type; error = ofpprop_pull(&continuation, &payload, &type); if (error) { break; } switch (type) { case NXCPT_BRIDGE: error = ofpprop_parse_uuid(&payload, &pin->bridge); break; case NXCPT_STACK: error = parse_stack_prop(&payload, &stack); break; case NXCPT_MIRRORS: error = ofpprop_parse_u32(&payload, &pin->mirrors); break; case NXCPT_CONNTRACKED: pin->conntracked = true; break; case NXCPT_TABLE_ID: error = ofpprop_parse_u8(&payload, &table_id); break; case NXCPT_COOKIE: error = ofpprop_parse_be64(&payload, &cookie); break; case NXCPT_ACTIONS: { struct ofpact_unroll_xlate *unroll = ofpact_put_UNROLL_XLATE(&actions); unroll->rule_table_id = table_id; unroll->rule_cookie = cookie; error = parse_actions_property(&payload, oh->version, &actions); break; } case NXCPT_ACTION_SET: error = parse_actions_property(&payload, oh->version, &action_set); break; default: error = OFPPROP_UNKNOWN(loose, "continuation", type); break; } if (error) { break; } } pin->actions_len = actions.size; pin->actions = ofpbuf_steal_data(&actions); pin->action_set_len = action_set.size; pin->action_set = ofpbuf_steal_data(&action_set); pin->stack_size = stack.size; pin->stack = ofpbuf_steal_data(&stack); if (error) { ofputil_packet_in_private_destroy(pin); } return error; } /* Frees data in 'pin' that is dynamically allocated by * ofputil_decode_packet_in_private(). * * 'pin->public' contains some pointer members that * ofputil_decode_packet_in_private() doesn't initialize to newly allocated * data, so this function doesn't free those. */ void ofputil_packet_in_private_destroy(struct ofputil_packet_in_private *pin) { if (pin) { free(pin->stack); free(pin->actions); free(pin->action_set); } } /* Converts an OFPT_PACKET_OUT in 'opo' into an abstract ofputil_packet_out in * 'po'. * * Uses 'ofpacts' to store the abstract OFPACT_* version of the packet out * message's actions. The caller must initialize 'ofpacts' and retains * ownership of it. 'po->ofpacts' will point into the 'ofpacts' buffer. * * 'po->packet' refers to the packet data in 'oh', so the buffer containing * 'oh' must not be destroyed while 'po' is being used. * * Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_packet_out(struct ofputil_packet_out *po, const struct ofp_header *oh, struct ofpbuf *ofpacts) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); ofpbuf_clear(ofpacts); if (raw == OFPRAW_OFPT11_PACKET_OUT) { enum ofperr error; const struct ofp11_packet_out *opo = ofpbuf_pull(&b, sizeof *opo); po->buffer_id = ntohl(opo->buffer_id); error = ofputil_port_from_ofp11(opo->in_port, &po->in_port); if (error) { return error; } error = ofpacts_pull_openflow_actions(&b, ntohs(opo->actions_len), oh->version, NULL, NULL, ofpacts); if (error) { return error; } } else if (raw == OFPRAW_OFPT10_PACKET_OUT) { enum ofperr error; const struct ofp10_packet_out *opo = ofpbuf_pull(&b, sizeof *opo); po->buffer_id = ntohl(opo->buffer_id); po->in_port = u16_to_ofp(ntohs(opo->in_port)); error = ofpacts_pull_openflow_actions(&b, ntohs(opo->actions_len), oh->version, NULL, NULL, ofpacts); if (error) { return error; } } else { OVS_NOT_REACHED(); } if (ofp_to_u16(po->in_port) >= ofp_to_u16(OFPP_MAX) && po->in_port != OFPP_LOCAL && po->in_port != OFPP_NONE && po->in_port != OFPP_CONTROLLER) { VLOG_WARN_RL(&bad_ofmsg_rl, "packet-out has bad input port %#"PRIx32, po->in_port); return OFPERR_OFPBRC_BAD_PORT; } po->ofpacts = ofpacts->data; po->ofpacts_len = ofpacts->size; if (po->buffer_id == UINT32_MAX) { po->packet = b.data; po->packet_len = b.size; } else { po->packet = NULL; po->packet_len = 0; } return 0; } /* ofputil_phy_port */ /* NETDEV_F_* to and from OFPPF_* and OFPPF10_*. */ BUILD_ASSERT_DECL((int) NETDEV_F_10MB_HD == OFPPF_10MB_HD); /* bit 0 */ BUILD_ASSERT_DECL((int) NETDEV_F_10MB_FD == OFPPF_10MB_FD); /* bit 1 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_HD == OFPPF_100MB_HD); /* bit 2 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_FD == OFPPF_100MB_FD); /* bit 3 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_HD == OFPPF_1GB_HD); /* bit 4 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_FD == OFPPF_1GB_FD); /* bit 5 */ BUILD_ASSERT_DECL((int) NETDEV_F_10GB_FD == OFPPF_10GB_FD); /* bit 6 */ /* NETDEV_F_ bits 11...15 are OFPPF10_ bits 7...11: */ BUILD_ASSERT_DECL((int) NETDEV_F_COPPER == (OFPPF10_COPPER << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_FIBER == (OFPPF10_FIBER << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_AUTONEG == (OFPPF10_AUTONEG << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE == (OFPPF10_PAUSE << 4)); BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE_ASYM == (OFPPF10_PAUSE_ASYM << 4)); static enum netdev_features netdev_port_features_from_ofp10(ovs_be32 ofp10_) { uint32_t ofp10 = ntohl(ofp10_); return (ofp10 & 0x7f) | ((ofp10 & 0xf80) << 4); } static ovs_be32 netdev_port_features_to_ofp10(enum netdev_features features) { return htonl((features & 0x7f) | ((features & 0xf800) >> 4)); } BUILD_ASSERT_DECL((int) NETDEV_F_10MB_HD == OFPPF_10MB_HD); /* bit 0 */ BUILD_ASSERT_DECL((int) NETDEV_F_10MB_FD == OFPPF_10MB_FD); /* bit 1 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_HD == OFPPF_100MB_HD); /* bit 2 */ BUILD_ASSERT_DECL((int) NETDEV_F_100MB_FD == OFPPF_100MB_FD); /* bit 3 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_HD == OFPPF_1GB_HD); /* bit 4 */ BUILD_ASSERT_DECL((int) NETDEV_F_1GB_FD == OFPPF_1GB_FD); /* bit 5 */ BUILD_ASSERT_DECL((int) NETDEV_F_10GB_FD == OFPPF_10GB_FD); /* bit 6 */ BUILD_ASSERT_DECL((int) NETDEV_F_40GB_FD == OFPPF11_40GB_FD); /* bit 7 */ BUILD_ASSERT_DECL((int) NETDEV_F_100GB_FD == OFPPF11_100GB_FD); /* bit 8 */ BUILD_ASSERT_DECL((int) NETDEV_F_1TB_FD == OFPPF11_1TB_FD); /* bit 9 */ BUILD_ASSERT_DECL((int) NETDEV_F_OTHER == OFPPF11_OTHER); /* bit 10 */ BUILD_ASSERT_DECL((int) NETDEV_F_COPPER == OFPPF11_COPPER); /* bit 11 */ BUILD_ASSERT_DECL((int) NETDEV_F_FIBER == OFPPF11_FIBER); /* bit 12 */ BUILD_ASSERT_DECL((int) NETDEV_F_AUTONEG == OFPPF11_AUTONEG); /* bit 13 */ BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE == OFPPF11_PAUSE); /* bit 14 */ BUILD_ASSERT_DECL((int) NETDEV_F_PAUSE_ASYM == OFPPF11_PAUSE_ASYM);/* bit 15 */ static enum netdev_features netdev_port_features_from_ofp11(ovs_be32 ofp11) { return ntohl(ofp11) & 0xffff; } static ovs_be32 netdev_port_features_to_ofp11(enum netdev_features features) { return htonl(features & 0xffff); } static enum ofperr ofputil_decode_ofp10_phy_port(struct ofputil_phy_port *pp, const struct ofp10_phy_port *opp) { pp->port_no = u16_to_ofp(ntohs(opp->port_no)); pp->hw_addr = opp->hw_addr; ovs_strlcpy(pp->name, opp->name, OFP_MAX_PORT_NAME_LEN); pp->config = ntohl(opp->config) & OFPPC10_ALL; pp->state = ntohl(opp->state) & OFPPS10_ALL; pp->curr = netdev_port_features_from_ofp10(opp->curr); pp->advertised = netdev_port_features_from_ofp10(opp->advertised); pp->supported = netdev_port_features_from_ofp10(opp->supported); pp->peer = netdev_port_features_from_ofp10(opp->peer); pp->curr_speed = netdev_features_to_bps(pp->curr, 0) / 1000; pp->max_speed = netdev_features_to_bps(pp->supported, 0) / 1000; return 0; } static enum ofperr ofputil_decode_ofp11_port(struct ofputil_phy_port *pp, const struct ofp11_port *op) { enum ofperr error; error = ofputil_port_from_ofp11(op->port_no, &pp->port_no); if (error) { return error; } pp->hw_addr = op->hw_addr; ovs_strlcpy(pp->name, op->name, OFP_MAX_PORT_NAME_LEN); pp->config = ntohl(op->config) & OFPPC11_ALL; pp->state = ntohl(op->state) & OFPPS11_ALL; pp->curr = netdev_port_features_from_ofp11(op->curr); pp->advertised = netdev_port_features_from_ofp11(op->advertised); pp->supported = netdev_port_features_from_ofp11(op->supported); pp->peer = netdev_port_features_from_ofp11(op->peer); pp->curr_speed = ntohl(op->curr_speed); pp->max_speed = ntohl(op->max_speed); return 0; } static enum ofperr parse_ofp14_port_ethernet_property(const struct ofpbuf *payload, struct ofputil_phy_port *pp) { struct ofp14_port_desc_prop_ethernet *eth = payload->data; if (payload->size != sizeof *eth) { return OFPERR_OFPBPC_BAD_LEN; } pp->curr = netdev_port_features_from_ofp11(eth->curr); pp->advertised = netdev_port_features_from_ofp11(eth->advertised); pp->supported = netdev_port_features_from_ofp11(eth->supported); pp->peer = netdev_port_features_from_ofp11(eth->peer); pp->curr_speed = ntohl(eth->curr_speed); pp->max_speed = ntohl(eth->max_speed); return 0; } static enum ofperr ofputil_pull_ofp14_port(struct ofputil_phy_port *pp, struct ofpbuf *msg) { struct ofp14_port *op = ofpbuf_try_pull(msg, sizeof *op); if (!op) { return OFPERR_OFPBRC_BAD_LEN; } size_t len = ntohs(op->length); if (len < sizeof *op || len - sizeof *op > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } len -= sizeof *op; enum ofperr error = ofputil_port_from_ofp11(op->port_no, &pp->port_no); if (error) { return error; } pp->hw_addr = op->hw_addr; ovs_strlcpy(pp->name, op->name, OFP_MAX_PORT_NAME_LEN); pp->config = ntohl(op->config) & OFPPC11_ALL; pp->state = ntohl(op->state) & OFPPS11_ALL; struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPPDPT14_ETHERNET: error = parse_ofp14_port_ethernet_property(&payload, pp); break; default: error = OFPPROP_UNKNOWN(true, "port", type); break; } if (error) { return error; } } return 0; } static void ofputil_encode_ofp10_phy_port(const struct ofputil_phy_port *pp, struct ofp10_phy_port *opp) { memset(opp, 0, sizeof *opp); opp->port_no = htons(ofp_to_u16(pp->port_no)); opp->hw_addr = pp->hw_addr; ovs_strlcpy(opp->name, pp->name, OFP_MAX_PORT_NAME_LEN); opp->config = htonl(pp->config & OFPPC10_ALL); opp->state = htonl(pp->state & OFPPS10_ALL); opp->curr = netdev_port_features_to_ofp10(pp->curr); opp->advertised = netdev_port_features_to_ofp10(pp->advertised); opp->supported = netdev_port_features_to_ofp10(pp->supported); opp->peer = netdev_port_features_to_ofp10(pp->peer); } static void ofputil_encode_ofp11_port(const struct ofputil_phy_port *pp, struct ofp11_port *op) { memset(op, 0, sizeof *op); op->port_no = ofputil_port_to_ofp11(pp->port_no); op->hw_addr = pp->hw_addr; ovs_strlcpy(op->name, pp->name, OFP_MAX_PORT_NAME_LEN); op->config = htonl(pp->config & OFPPC11_ALL); op->state = htonl(pp->state & OFPPS11_ALL); op->curr = netdev_port_features_to_ofp11(pp->curr); op->advertised = netdev_port_features_to_ofp11(pp->advertised); op->supported = netdev_port_features_to_ofp11(pp->supported); op->peer = netdev_port_features_to_ofp11(pp->peer); op->curr_speed = htonl(pp->curr_speed); op->max_speed = htonl(pp->max_speed); } static void ofputil_put_ofp14_port(const struct ofputil_phy_port *pp, struct ofpbuf *b) { struct ofp14_port *op; struct ofp14_port_desc_prop_ethernet *eth; ofpbuf_prealloc_tailroom(b, sizeof *op + sizeof *eth); op = ofpbuf_put_zeros(b, sizeof *op); op->port_no = ofputil_port_to_ofp11(pp->port_no); op->length = htons(sizeof *op + sizeof *eth); op->hw_addr = pp->hw_addr; ovs_strlcpy(op->name, pp->name, sizeof op->name); op->config = htonl(pp->config & OFPPC11_ALL); op->state = htonl(pp->state & OFPPS11_ALL); eth = ofpprop_put_zeros(b, OFPPDPT14_ETHERNET, sizeof *eth); eth->curr = netdev_port_features_to_ofp11(pp->curr); eth->advertised = netdev_port_features_to_ofp11(pp->advertised); eth->supported = netdev_port_features_to_ofp11(pp->supported); eth->peer = netdev_port_features_to_ofp11(pp->peer); eth->curr_speed = htonl(pp->curr_speed); eth->max_speed = htonl(pp->max_speed); } static void ofputil_put_phy_port(enum ofp_version ofp_version, const struct ofputil_phy_port *pp, struct ofpbuf *b) { switch (ofp_version) { case OFP10_VERSION: { struct ofp10_phy_port *opp = ofpbuf_put_uninit(b, sizeof *opp); ofputil_encode_ofp10_phy_port(pp, opp); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { struct ofp11_port *op = ofpbuf_put_uninit(b, sizeof *op); ofputil_encode_ofp11_port(pp, op); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: ofputil_put_ofp14_port(pp, b); break; default: OVS_NOT_REACHED(); } } enum ofperr ofputil_decode_port_desc_stats_request(const struct ofp_header *request, ofp_port_t *port) { struct ofpbuf b = ofpbuf_const_initializer(request, ntohs(request->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPST10_PORT_DESC_REQUEST) { *port = OFPP_ANY; return 0; } else if (raw == OFPRAW_OFPST15_PORT_DESC_REQUEST) { ovs_be32 *ofp11_port; ofp11_port = ofpbuf_pull(&b, sizeof *ofp11_port); return ofputil_port_from_ofp11(*ofp11_port, port); } else { OVS_NOT_REACHED(); } } struct ofpbuf * ofputil_encode_port_desc_stats_request(enum ofp_version ofp_version, ofp_port_t port) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: request = ofpraw_alloc(OFPRAW_OFPST10_PORT_DESC_REQUEST, ofp_version, 0); break; case OFP15_VERSION: case OFP16_VERSION:{ struct ofp15_port_desc_request *req; request = ofpraw_alloc(OFPRAW_OFPST15_PORT_DESC_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = ofputil_port_to_ofp11(port); break; } default: OVS_NOT_REACHED(); } return request; } void ofputil_append_port_desc_stats_reply(const struct ofputil_phy_port *pp, struct ovs_list *replies) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_ofs = reply->size; ofputil_put_phy_port(ofpmp_version(replies), pp, reply); ofpmp_postappend(replies, start_ofs); } /* ofputil_switch_config */ /* Decodes 'oh', which must be an OFPT_GET_CONFIG_REPLY or OFPT_SET_CONFIG * message, into 'config'. Returns false if 'oh' contained any flags that * aren't specified in its version of OpenFlow, true otherwise. */ static bool ofputil_decode_switch_config(const struct ofp_header *oh, struct ofputil_switch_config *config) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); const struct ofp_switch_config *osc = ofpbuf_pull(&b, sizeof *osc); config->frag = ntohs(osc->flags) & OFPC_FRAG_MASK; config->miss_send_len = ntohs(osc->miss_send_len); ovs_be16 valid_mask = htons(OFPC_FRAG_MASK); if (oh->version < OFP13_VERSION) { const ovs_be16 ttl_bit = htons(OFPC_INVALID_TTL_TO_CONTROLLER); valid_mask |= ttl_bit; config->invalid_ttl_to_controller = (osc->flags & ttl_bit) != 0; } else { config->invalid_ttl_to_controller = -1; } return !(osc->flags & ~valid_mask); } void ofputil_decode_get_config_reply(const struct ofp_header *oh, struct ofputil_switch_config *config) { ofputil_decode_switch_config(oh, config); } enum ofperr ofputil_decode_set_config(const struct ofp_header *oh, struct ofputil_switch_config *config) { return (ofputil_decode_switch_config(oh, config) ? 0 : OFPERR_OFPSCFC_BAD_FLAGS); } static struct ofpbuf * ofputil_put_switch_config(const struct ofputil_switch_config *config, struct ofpbuf *b) { const struct ofp_header *oh = b->data; struct ofp_switch_config *osc = ofpbuf_put_zeros(b, sizeof *osc); osc->flags = htons(config->frag); if (config->invalid_ttl_to_controller > 0 && oh->version < OFP13_VERSION) { osc->flags |= htons(OFPC_INVALID_TTL_TO_CONTROLLER); } osc->miss_send_len = htons(config->miss_send_len); return b; } struct ofpbuf * ofputil_encode_get_config_reply(const struct ofp_header *request, const struct ofputil_switch_config *config) { struct ofpbuf *b = ofpraw_alloc_reply(OFPRAW_OFPT_GET_CONFIG_REPLY, request, 0); return ofputil_put_switch_config(config, b); } struct ofpbuf * ofputil_encode_set_config(const struct ofputil_switch_config *config, enum ofp_version version) { struct ofpbuf *b = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, version, 0); return ofputil_put_switch_config(config, b); } /* ofputil_switch_features */ #define OFPC_COMMON (OFPC_FLOW_STATS | OFPC_TABLE_STATS | OFPC_PORT_STATS | \ OFPC_IP_REASM | OFPC_QUEUE_STATS) BUILD_ASSERT_DECL((int) OFPUTIL_C_FLOW_STATS == OFPC_FLOW_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_TABLE_STATS == OFPC_TABLE_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_PORT_STATS == OFPC_PORT_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_IP_REASM == OFPC_IP_REASM); BUILD_ASSERT_DECL((int) OFPUTIL_C_QUEUE_STATS == OFPC_QUEUE_STATS); BUILD_ASSERT_DECL((int) OFPUTIL_C_ARP_MATCH_IP == OFPC_ARP_MATCH_IP); BUILD_ASSERT_DECL((int) OFPUTIL_C_PORT_BLOCKED == OFPC12_PORT_BLOCKED); BUILD_ASSERT_DECL((int) OFPUTIL_C_BUNDLES == OFPC14_BUNDLES); BUILD_ASSERT_DECL((int) OFPUTIL_C_FLOW_MONITORING == OFPC14_FLOW_MONITORING); static uint32_t ofputil_capabilities_mask(enum ofp_version ofp_version) { /* Handle capabilities whose bit is unique for all OpenFlow versions */ switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: return OFPC_COMMON | OFPC_ARP_MATCH_IP; case OFP12_VERSION: case OFP13_VERSION: return OFPC_COMMON | OFPC12_PORT_BLOCKED; case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: return OFPC_COMMON | OFPC12_PORT_BLOCKED | OFPC14_BUNDLES | OFPC14_FLOW_MONITORING; default: /* Caller needs to check osf->header.version itself */ return 0; } } /* Pulls an OpenFlow "switch_features" structure from 'b' and decodes it into * an abstract representation in '*features', readying 'b' to iterate over the * OpenFlow port structures following 'osf' with later calls to * ofputil_pull_phy_port(). Returns 0 if successful, otherwise an OFPERR_* * value. */ enum ofperr ofputil_pull_switch_features(struct ofpbuf *b, struct ofputil_switch_features *features) { const struct ofp_header *oh = b->data; enum ofpraw raw = ofpraw_pull_assert(b); const struct ofp_switch_features *osf = ofpbuf_pull(b, sizeof *osf); features->datapath_id = ntohll(osf->datapath_id); features->n_buffers = ntohl(osf->n_buffers); features->n_tables = osf->n_tables; features->auxiliary_id = 0; features->capabilities = ntohl(osf->capabilities) & ofputil_capabilities_mask(oh->version); if (raw == OFPRAW_OFPT10_FEATURES_REPLY) { if (osf->capabilities & htonl(OFPC10_STP)) { features->capabilities |= OFPUTIL_C_STP; } features->ofpacts = ofpact_bitmap_from_openflow(osf->actions, OFP10_VERSION); } else if (raw == OFPRAW_OFPT11_FEATURES_REPLY || raw == OFPRAW_OFPT13_FEATURES_REPLY) { if (osf->capabilities & htonl(OFPC11_GROUP_STATS)) { features->capabilities |= OFPUTIL_C_GROUP_STATS; } features->ofpacts = 0; if (raw == OFPRAW_OFPT13_FEATURES_REPLY) { features->auxiliary_id = osf->auxiliary_id; } } else { return OFPERR_OFPBRC_BAD_VERSION; } return 0; } /* In OpenFlow 1.0, 1.1, and 1.2, an OFPT_FEATURES_REPLY message lists all the * switch's ports, unless there are too many to fit. In OpenFlow 1.3 and * later, an OFPT_FEATURES_REPLY does not list ports at all. * * Given a buffer 'b' that contains a Features Reply message, this message * checks if it contains a complete list of the switch's ports. Returns true, * if so. Returns false if the list is missing (OF1.3+) or incomplete * (OF1.0/1.1/1.2), and in the latter case removes all of the ports from the * message. * * When this function returns false, the caller should send an OFPST_PORT_DESC * stats request to get the ports. */ bool ofputil_switch_features_has_ports(struct ofpbuf *b) { struct ofp_header *oh = b->data; size_t phy_port_size; if (oh->version >= OFP13_VERSION) { /* OpenFlow 1.3+ never has ports in the feature reply. */ return false; } phy_port_size = (oh->version == OFP10_VERSION ? sizeof(struct ofp10_phy_port) : sizeof(struct ofp11_port)); if (ntohs(oh->length) + phy_port_size <= UINT16_MAX) { /* There's room for additional ports in the feature reply. * Assume that the list is complete. */ return true; } /* The feature reply has no room for more ports. Probably the list is * truncated. Drop the ports and tell the caller to retrieve them with * OFPST_PORT_DESC. */ b->size = sizeof *oh + sizeof(struct ofp_switch_features); ofpmsg_update_length(b); return false; } /* Returns a buffer owned by the caller that encodes 'features' in the format * required by 'protocol' with the given 'xid'. The caller should append port * information to the buffer with subsequent calls to * ofputil_put_switch_features_port(). */ struct ofpbuf * ofputil_encode_switch_features(const struct ofputil_switch_features *features, enum ofputil_protocol protocol, ovs_be32 xid) { struct ofp_switch_features *osf; struct ofpbuf *b; enum ofp_version version; enum ofpraw raw; version = ofputil_protocol_to_ofp_version(protocol); switch (version) { case OFP10_VERSION: raw = OFPRAW_OFPT10_FEATURES_REPLY; break; case OFP11_VERSION: case OFP12_VERSION: raw = OFPRAW_OFPT11_FEATURES_REPLY; break; case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: raw = OFPRAW_OFPT13_FEATURES_REPLY; break; default: OVS_NOT_REACHED(); } b = ofpraw_alloc_xid(raw, version, xid, 0); osf = ofpbuf_put_zeros(b, sizeof *osf); osf->datapath_id = htonll(features->datapath_id); osf->n_buffers = htonl(features->n_buffers); osf->n_tables = features->n_tables; osf->capabilities = htonl(features->capabilities & ofputil_capabilities_mask(version)); switch (version) { case OFP10_VERSION: if (features->capabilities & OFPUTIL_C_STP) { osf->capabilities |= htonl(OFPC10_STP); } osf->actions = ofpact_bitmap_to_openflow(features->ofpacts, OFP10_VERSION); break; case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: osf->auxiliary_id = features->auxiliary_id; /* fall through */ case OFP11_VERSION: case OFP12_VERSION: if (features->capabilities & OFPUTIL_C_GROUP_STATS) { osf->capabilities |= htonl(OFPC11_GROUP_STATS); } break; default: OVS_NOT_REACHED(); } return b; } /* Encodes 'pp' into the format required by the switch_features message already * in 'b', which should have been returned by ofputil_encode_switch_features(), * and appends the encoded version to 'b'. */ void ofputil_put_switch_features_port(const struct ofputil_phy_port *pp, struct ofpbuf *b) { const struct ofp_header *oh = b->data; if (oh->version < OFP13_VERSION) { /* Try adding a port description to the message, but drop it again if * the buffer overflows. (This possibility for overflow is why * OpenFlow 1.3+ moved port descriptions into a multipart message.) */ size_t start_ofs = b->size; ofputil_put_phy_port(oh->version, pp, b); if (b->size > UINT16_MAX) { b->size = start_ofs; } } } /* ofputil_port_status */ /* Decodes the OpenFlow "port status" message in '*ops' into an abstract form * in '*ps'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_port_status(const struct ofp_header *oh, struct ofputil_port_status *ps) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); const struct ofp_port_status *ops = ofpbuf_pull(&b, sizeof *ops); if (ops->reason != OFPPR_ADD && ops->reason != OFPPR_DELETE && ops->reason != OFPPR_MODIFY) { return OFPERR_NXBRC_BAD_REASON; } ps->reason = ops->reason; int retval = ofputil_pull_phy_port(oh->version, &b, &ps->desc); ovs_assert(retval != EOF); return retval; } /* Converts the abstract form of a "port status" message in '*ps' into an * OpenFlow message suitable for 'protocol', and returns that encoded form in * a buffer owned by the caller. */ struct ofpbuf * ofputil_encode_port_status(const struct ofputil_port_status *ps, enum ofputil_protocol protocol) { struct ofp_port_status *ops; struct ofpbuf *b; enum ofp_version version; enum ofpraw raw; version = ofputil_protocol_to_ofp_version(protocol); switch (version) { case OFP10_VERSION: raw = OFPRAW_OFPT10_PORT_STATUS; break; case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: raw = OFPRAW_OFPT11_PORT_STATUS; break; case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: raw = OFPRAW_OFPT14_PORT_STATUS; break; default: OVS_NOT_REACHED(); } b = ofpraw_alloc_xid(raw, version, htonl(0), 0); ops = ofpbuf_put_zeros(b, sizeof *ops); ops->reason = ps->reason; ofputil_put_phy_port(version, &ps->desc, b); ofpmsg_update_length(b); return b; } /* ofputil_port_mod */ static enum ofperr parse_port_mod_ethernet_property(struct ofpbuf *property, struct ofputil_port_mod *pm) { ovs_be32 advertise; enum ofperr error; error = ofpprop_parse_be32(property, &advertise); if (!error) { pm->advertise = netdev_port_features_from_ofp11(advertise); } return error; } /* Decodes the OpenFlow "port mod" message in '*oh' into an abstract form in * '*pm'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_port_mod(const struct ofp_header *oh, struct ofputil_port_mod *pm, bool loose) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT10_PORT_MOD) { const struct ofp10_port_mod *opm = b.data; pm->port_no = u16_to_ofp(ntohs(opm->port_no)); pm->hw_addr = opm->hw_addr; pm->config = ntohl(opm->config) & OFPPC10_ALL; pm->mask = ntohl(opm->mask) & OFPPC10_ALL; pm->advertise = netdev_port_features_from_ofp10(opm->advertise); } else if (raw == OFPRAW_OFPT11_PORT_MOD) { const struct ofp11_port_mod *opm = b.data; enum ofperr error; error = ofputil_port_from_ofp11(opm->port_no, &pm->port_no); if (error) { return error; } pm->hw_addr = opm->hw_addr; pm->config = ntohl(opm->config) & OFPPC11_ALL; pm->mask = ntohl(opm->mask) & OFPPC11_ALL; pm->advertise = netdev_port_features_from_ofp11(opm->advertise); } else if (raw == OFPRAW_OFPT14_PORT_MOD) { const struct ofp14_port_mod *opm = ofpbuf_pull(&b, sizeof *opm); enum ofperr error; memset(pm, 0, sizeof *pm); error = ofputil_port_from_ofp11(opm->port_no, &pm->port_no); if (error) { return error; } pm->hw_addr = opm->hw_addr; pm->config = ntohl(opm->config) & OFPPC11_ALL; pm->mask = ntohl(opm->mask) & OFPPC11_ALL; while (b.size > 0) { struct ofpbuf property; enum ofperr error; uint64_t type; error = ofpprop_pull(&b, &property, &type); if (error) { return error; } switch (type) { case OFPPMPT14_ETHERNET: error = parse_port_mod_ethernet_property(&property, pm); break; default: error = OFPPROP_UNKNOWN(loose, "port_mod", type); break; } if (error) { return error; } } } else { return OFPERR_OFPBRC_BAD_TYPE; } pm->config &= pm->mask; return 0; } /* Converts the abstract form of a "port mod" message in '*pm' into an OpenFlow * message suitable for 'protocol', and returns that encoded form in a buffer * owned by the caller. */ struct ofpbuf * ofputil_encode_port_mod(const struct ofputil_port_mod *pm, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *b; switch (ofp_version) { case OFP10_VERSION: { struct ofp10_port_mod *opm; b = ofpraw_alloc(OFPRAW_OFPT10_PORT_MOD, ofp_version, 0); opm = ofpbuf_put_zeros(b, sizeof *opm); opm->port_no = htons(ofp_to_u16(pm->port_no)); opm->hw_addr = pm->hw_addr; opm->config = htonl(pm->config & OFPPC10_ALL); opm->mask = htonl(pm->mask & OFPPC10_ALL); opm->advertise = netdev_port_features_to_ofp10(pm->advertise); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { struct ofp11_port_mod *opm; b = ofpraw_alloc(OFPRAW_OFPT11_PORT_MOD, ofp_version, 0); opm = ofpbuf_put_zeros(b, sizeof *opm); opm->port_no = ofputil_port_to_ofp11(pm->port_no); opm->hw_addr = pm->hw_addr; opm->config = htonl(pm->config & OFPPC11_ALL); opm->mask = htonl(pm->mask & OFPPC11_ALL); opm->advertise = netdev_port_features_to_ofp11(pm->advertise); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp14_port_mod *opm; b = ofpraw_alloc(OFPRAW_OFPT14_PORT_MOD, ofp_version, 0); opm = ofpbuf_put_zeros(b, sizeof *opm); opm->port_no = ofputil_port_to_ofp11(pm->port_no); opm->hw_addr = pm->hw_addr; opm->config = htonl(pm->config & OFPPC11_ALL); opm->mask = htonl(pm->mask & OFPPC11_ALL); if (pm->advertise) { ofpprop_put_be32(b, OFPPMPT14_ETHERNET, netdev_port_features_to_ofp11(pm->advertise)); } break; } default: OVS_NOT_REACHED(); } return b; } /* Table features. */ static enum ofperr pull_table_feature_property(struct ofpbuf *msg, struct ofpbuf *payload, uint64_t *typep) { enum ofperr error; error = ofpprop_pull(msg, payload, typep); if (payload && !error) { ofpbuf_pull(payload, (char *)payload->msg - (char *)payload->header); } return error; } static enum ofperr parse_action_bitmap(struct ofpbuf *payload, enum ofp_version ofp_version, uint64_t *ofpacts) { uint32_t types = 0; while (payload->size > 0) { enum ofperr error; uint64_t type; error = ofpprop_pull__(payload, NULL, 1, 0x10000, &type); if (error) { return error; } if (type < CHAR_BIT * sizeof types) { types |= 1u << type; } } *ofpacts = ofpact_bitmap_from_openflow(htonl(types), ofp_version); return 0; } static enum ofperr parse_instruction_ids(struct ofpbuf *payload, bool loose, uint32_t *insts) { *insts = 0; while (payload->size > 0) { enum ovs_instruction_type inst; enum ofperr error; uint64_t ofpit; /* OF1.3 and OF1.4 aren't clear about padding in the instruction IDs. * It seems clear that they aren't padded to 8 bytes, though, because * both standards say that "non-experimenter instructions are 4 bytes" * and do not mention any padding before the first instruction ID. * (There wouldn't be any point in padding to 8 bytes if the IDs were * aligned on an odd 4-byte boundary.) * * Anyway, we just assume they're all glommed together on byte * boundaries. */ error = ofpprop_pull__(payload, NULL, 1, 0x10000, &ofpit); if (error) { return error; } error = ovs_instruction_type_from_inst_type(&inst, ofpit); if (!error) { *insts |= 1u << inst; } else if (!loose) { return error; } } return 0; } static enum ofperr parse_table_features_next_table(struct ofpbuf *payload, unsigned long int *next_tables) { size_t i; memset(next_tables, 0, bitmap_n_bytes(255)); for (i = 0; i < payload->size; i++) { uint8_t id = ((const uint8_t *) payload->data)[i]; if (id >= 255) { return OFPERR_OFPBPC_BAD_VALUE; } bitmap_set1(next_tables, id); } return 0; } static enum ofperr parse_oxms(struct ofpbuf *payload, bool loose, struct mf_bitmap *exactp, struct mf_bitmap *maskedp) { struct mf_bitmap exact = MF_BITMAP_INITIALIZER; struct mf_bitmap masked = MF_BITMAP_INITIALIZER; while (payload->size > 0) { const struct mf_field *field; enum ofperr error; bool hasmask; error = nx_pull_header(payload, NULL, &field, &hasmask); if (!error) { bitmap_set1(hasmask ? masked.bm : exact.bm, field->id); } else if (error != OFPERR_OFPBMC_BAD_FIELD || !loose) { return error; } } if (exactp) { *exactp = exact; } else if (!bitmap_is_all_zeros(exact.bm, MFF_N_IDS)) { return OFPERR_OFPBMC_BAD_MASK; } if (maskedp) { *maskedp = masked; } else if (!bitmap_is_all_zeros(masked.bm, MFF_N_IDS)) { return OFPERR_OFPBMC_BAD_MASK; } return 0; } /* Converts an OFPMP_TABLE_FEATURES request or reply in 'msg' into an abstract * ofputil_table_features in 'tf'. * * If 'loose' is true, this function ignores properties and values that it does * not understand, as a controller would want to do when interpreting * capabilities provided by a switch. If 'loose' is false, this function * treats unknown properties and values as an error, as a switch would want to * do when interpreting a configuration request made by a controller. * * A single OpenFlow message can specify features for multiple tables. Calling * this function multiple times for a single 'msg' iterates through the tables * in the message. The caller must initially leave 'msg''s layer pointers null * and not modify them between calls. * * Returns 0 if successful, EOF if no tables were left in this 'msg', otherwise * a positive "enum ofperr" value. */ int ofputil_decode_table_features(struct ofpbuf *msg, struct ofputil_table_features *tf, bool loose) { memset(tf, 0, sizeof *tf); if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } const struct ofp_header *oh = msg->header; struct ofp13_table_features *otf = msg->data; if (msg->size < sizeof *otf) { return OFPERR_OFPBPC_BAD_LEN; } unsigned int len = ntohs(otf->length); if (len < sizeof *otf || len % 8 || len > msg->size) { return OFPERR_OFPBPC_BAD_LEN; } tf->table_id = otf->table_id; if (tf->table_id == OFPTT_ALL) { return OFPERR_OFPTFFC_BAD_TABLE; } ovs_strlcpy(tf->name, otf->name, OFP_MAX_TABLE_NAME_LEN); tf->metadata_match = otf->metadata_match; tf->metadata_write = otf->metadata_write; tf->miss_config = OFPUTIL_TABLE_MISS_DEFAULT; if (oh->version >= OFP14_VERSION) { uint32_t caps = ntohl(otf->capabilities); tf->supports_eviction = (caps & OFPTC14_EVICTION) != 0; tf->supports_vacancy_events = (caps & OFPTC14_VACANCY_EVENTS) != 0; } else { tf->supports_eviction = -1; tf->supports_vacancy_events = -1; } tf->max_entries = ntohl(otf->max_entries); struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); ofpbuf_pull(&properties, sizeof *otf); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = pull_table_feature_property(&properties, &payload, &type); if (error) { return error; } switch ((enum ofp13_table_feature_prop_type) type) { case OFPTFPT13_INSTRUCTIONS: error = parse_instruction_ids(&payload, loose, &tf->nonmiss.instructions); break; case OFPTFPT13_INSTRUCTIONS_MISS: error = parse_instruction_ids(&payload, loose, &tf->miss.instructions); break; case OFPTFPT13_NEXT_TABLES: error = parse_table_features_next_table(&payload, tf->nonmiss.next); break; case OFPTFPT13_NEXT_TABLES_MISS: error = parse_table_features_next_table(&payload, tf->miss.next); break; case OFPTFPT13_WRITE_ACTIONS: error = parse_action_bitmap(&payload, oh->version, &tf->nonmiss.write.ofpacts); break; case OFPTFPT13_WRITE_ACTIONS_MISS: error = parse_action_bitmap(&payload, oh->version, &tf->miss.write.ofpacts); break; case OFPTFPT13_APPLY_ACTIONS: error = parse_action_bitmap(&payload, oh->version, &tf->nonmiss.apply.ofpacts); break; case OFPTFPT13_APPLY_ACTIONS_MISS: error = parse_action_bitmap(&payload, oh->version, &tf->miss.apply.ofpacts); break; case OFPTFPT13_MATCH: error = parse_oxms(&payload, loose, &tf->match, &tf->mask); break; case OFPTFPT13_WILDCARDS: error = parse_oxms(&payload, loose, &tf->wildcard, NULL); break; case OFPTFPT13_WRITE_SETFIELD: error = parse_oxms(&payload, loose, &tf->nonmiss.write.set_fields, NULL); break; case OFPTFPT13_WRITE_SETFIELD_MISS: error = parse_oxms(&payload, loose, &tf->miss.write.set_fields, NULL); break; case OFPTFPT13_APPLY_SETFIELD: error = parse_oxms(&payload, loose, &tf->nonmiss.apply.set_fields, NULL); break; case OFPTFPT13_APPLY_SETFIELD_MISS: error = parse_oxms(&payload, loose, &tf->miss.apply.set_fields, NULL); break; case OFPTFPT13_EXPERIMENTER: case OFPTFPT13_EXPERIMENTER_MISS: default: error = OFPPROP_UNKNOWN(loose, "table features", type); break; } if (error) { return error; } } /* Fix inconsistencies: * * - Turn on 'match' bits that are set in 'mask', because maskable * fields are matchable. * * - Turn on 'wildcard' bits that are set in 'mask', because a field * that is arbitrarily maskable can be wildcarded entirely. * * - Turn off 'wildcard' bits that are not in 'match', because a field * must be matchable for it to be meaningfully wildcarded. */ bitmap_or(tf->match.bm, tf->mask.bm, MFF_N_IDS); bitmap_or(tf->wildcard.bm, tf->mask.bm, MFF_N_IDS); bitmap_and(tf->wildcard.bm, tf->match.bm, MFF_N_IDS); return 0; } /* Encodes and returns a request to obtain the table features of a switch. * The message is encoded for OpenFlow version 'ofp_version'. */ struct ofpbuf * ofputil_encode_table_features_request(enum ofp_version ofp_version) { struct ofpbuf *request = NULL; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: case OFP12_VERSION: ovs_fatal(0, "dump-table-features needs OpenFlow 1.3 or later " "(\'-O OpenFlow13\')"); case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: request = ofpraw_alloc(OFPRAW_OFPST13_TABLE_FEATURES_REQUEST, ofp_version, 0); break; default: OVS_NOT_REACHED(); } return request; } static void put_fields_property(struct ofpbuf *reply, const struct mf_bitmap *fields, const struct mf_bitmap *masks, enum ofp13_table_feature_prop_type property, enum ofp_version version) { size_t start_ofs; int field; start_ofs = ofpprop_start(reply, property); BITMAP_FOR_EACH_1 (field, MFF_N_IDS, fields->bm) { nx_put_header(reply, field, version, masks && bitmap_is_set(masks->bm, field)); } ofpprop_end(reply, start_ofs); } static void put_table_action_features(struct ofpbuf *reply, const struct ofputil_table_action_features *taf, enum ofp13_table_feature_prop_type actions_type, enum ofp13_table_feature_prop_type set_fields_type, int miss_offset, enum ofp_version version) { ofpprop_put_bitmap(reply, actions_type + miss_offset, ntohl(ofpact_bitmap_to_openflow(taf->ofpacts, version))); put_fields_property(reply, &taf->set_fields, NULL, set_fields_type + miss_offset, version); } static void put_table_instruction_features( struct ofpbuf *reply, const struct ofputil_table_instruction_features *tif, int miss_offset, enum ofp_version version) { size_t start_ofs; uint8_t table_id; ofpprop_put_bitmap(reply, OFPTFPT13_INSTRUCTIONS + miss_offset, ntohl(ovsinst_bitmap_to_openflow(tif->instructions, version))); start_ofs = ofpprop_start(reply, OFPTFPT13_NEXT_TABLES + miss_offset); BITMAP_FOR_EACH_1 (table_id, 255, tif->next) { ofpbuf_put(reply, &table_id, 1); } ofpprop_end(reply, start_ofs); put_table_action_features(reply, &tif->write, OFPTFPT13_WRITE_ACTIONS, OFPTFPT13_WRITE_SETFIELD, miss_offset, version); put_table_action_features(reply, &tif->apply, OFPTFPT13_APPLY_ACTIONS, OFPTFPT13_APPLY_SETFIELD, miss_offset, version); } void ofputil_append_table_features_reply(const struct ofputil_table_features *tf, struct ovs_list *replies) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); enum ofp_version version = ofpmp_version(replies); size_t start_ofs = reply->size; struct ofp13_table_features *otf; otf = ofpbuf_put_zeros(reply, sizeof *otf); otf->table_id = tf->table_id; ovs_strlcpy(otf->name, tf->name, sizeof otf->name); otf->metadata_match = tf->metadata_match; otf->metadata_write = tf->metadata_write; if (version >= OFP14_VERSION) { if (tf->supports_eviction) { otf->capabilities |= htonl(OFPTC14_EVICTION); } if (tf->supports_vacancy_events) { otf->capabilities |= htonl(OFPTC14_VACANCY_EVENTS); } } otf->max_entries = htonl(tf->max_entries); put_table_instruction_features(reply, &tf->nonmiss, 0, version); put_table_instruction_features(reply, &tf->miss, 1, version); put_fields_property(reply, &tf->match, &tf->mask, OFPTFPT13_MATCH, version); put_fields_property(reply, &tf->wildcard, NULL, OFPTFPT13_WILDCARDS, version); otf = ofpbuf_at_assert(reply, start_ofs, sizeof *otf); otf->length = htons(reply->size - start_ofs); ofpmp_postappend(replies, start_ofs); } static enum ofperr parse_table_desc_vacancy_property(struct ofpbuf *property, struct ofputil_table_desc *td) { struct ofp14_table_mod_prop_vacancy *otv = property->data; if (property->size != sizeof *otv) { return OFPERR_OFPBPC_BAD_LEN; } td->table_vacancy.vacancy_down = otv->vacancy_down; td->table_vacancy.vacancy_up = otv->vacancy_up; td->table_vacancy.vacancy = otv->vacancy; return 0; } /* Decodes the next OpenFlow "table desc" message (of possibly several) from * 'msg' into an abstract form in '*td'. Returns 0 if successful, EOF if the * last "table desc" in 'msg' was already decoded, otherwise an OFPERR_* * value. */ int ofputil_decode_table_desc(struct ofpbuf *msg, struct ofputil_table_desc *td, enum ofp_version version) { memset(td, 0, sizeof *td); if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } struct ofp14_table_desc *otd = ofpbuf_try_pull(msg, sizeof *otd); if (!otd) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFP14_TABLE_DESC reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } td->table_id = otd->table_id; size_t length = ntohs(otd->length); if (length < sizeof *otd || length - sizeof *otd > msg->size) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFP14_TABLE_DESC reply claims invalid " "length %"PRIuSIZE, length); return OFPERR_OFPBRC_BAD_LEN; } length -= sizeof *otd; td->eviction = ofputil_decode_table_eviction(otd->config, version); td->vacancy = ofputil_decode_table_vacancy(otd->config, version); td->eviction_flags = UINT32_MAX; struct ofpbuf properties = ofpbuf_const_initializer( ofpbuf_pull(msg, length), length); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPTMPT14_EVICTION: error = ofpprop_parse_u32(&payload, &td->eviction_flags); break; case OFPTMPT14_VACANCY: error = parse_table_desc_vacancy_property(&payload, td); break; default: error = OFPPROP_UNKNOWN(true, "table_desc", type); break; } if (error) { return error; } } return 0; } /* Encodes and returns a request to obtain description of tables of a switch. * The message is encoded for OpenFlow version 'ofp_version'. */ struct ofpbuf * ofputil_encode_table_desc_request(enum ofp_version ofp_version) { struct ofpbuf *request = NULL; if (ofp_version >= OFP14_VERSION) { request = ofpraw_alloc(OFPRAW_OFPST14_TABLE_DESC_REQUEST, ofp_version, 0); } else { ovs_fatal(0, "dump-table-desc needs OpenFlow 1.4 or later " "(\'-O OpenFlow14\')"); } return request; } /* Function to append Table desc information in a reply list. */ void ofputil_append_table_desc_reply(const struct ofputil_table_desc *td, struct ovs_list *replies, enum ofp_version version) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); size_t start_otd; struct ofp14_table_desc *otd; start_otd = reply->size; ofpbuf_put_zeros(reply, sizeof *otd); if (td->eviction_flags != UINT32_MAX) { ofpprop_put_u32(reply, OFPTMPT14_EVICTION, td->eviction_flags); } if (td->vacancy == OFPUTIL_TABLE_VACANCY_ON) { struct ofp14_table_mod_prop_vacancy *otv; otv = ofpprop_put_zeros(reply, OFPTMPT14_VACANCY, sizeof *otv); otv->vacancy_down = td->table_vacancy.vacancy_down; otv->vacancy_up = td->table_vacancy.vacancy_up; otv->vacancy = td->table_vacancy.vacancy; } otd = ofpbuf_at_assert(reply, start_otd, sizeof *otd); otd->length = htons(reply->size - start_otd); otd->table_id = td->table_id; otd->config = ofputil_encode_table_config(OFPUTIL_TABLE_MISS_DEFAULT, td->eviction, td->vacancy, version); ofpmp_postappend(replies, start_otd); } /* This function parses Vacancy property, and decodes the * ofp14_table_mod_prop_vacancy in ofputil_table_mod. * Returns OFPERR_OFPBPC_BAD_VALUE error code when vacancy_down is * greater than vacancy_up and also when current vacancy has non-zero * value. Returns 0 on success. */ static enum ofperr parse_table_mod_vacancy_property(struct ofpbuf *property, struct ofputil_table_mod *tm) { struct ofp14_table_mod_prop_vacancy *otv = property->data; if (property->size != sizeof *otv) { return OFPERR_OFPBPC_BAD_LEN; } tm->table_vacancy.vacancy_down = otv->vacancy_down; tm->table_vacancy.vacancy_up = otv->vacancy_up; if (tm->table_vacancy.vacancy_down > tm->table_vacancy.vacancy_up) { OFPPROP_LOG(&bad_ofmsg_rl, false, "Value of vacancy_down is greater than vacancy_up"); return OFPERR_OFPBPC_BAD_VALUE; } if (tm->table_vacancy.vacancy_down > 100 || tm->table_vacancy.vacancy_up > 100) { OFPPROP_LOG(&bad_ofmsg_rl, false, "Vacancy threshold percentage " "should not be greater than 100"); return OFPERR_OFPBPC_BAD_VALUE; } tm->table_vacancy.vacancy = otv->vacancy; if (tm->table_vacancy.vacancy) { OFPPROP_LOG(&bad_ofmsg_rl, false, "Vacancy value should be zero for table-mod messages"); return OFPERR_OFPBPC_BAD_VALUE; } return 0; } /* Given 'config', taken from an OpenFlow 'version' message that specifies * table configuration (a table mod, table stats, or table features message), * returns the table vacancy configuration that it specifies. * * Only OpenFlow 1.4 and later specify table vacancy configuration this way, * so for other 'version' this function always returns * OFPUTIL_TABLE_VACANCY_DEFAULT. */ static enum ofputil_table_vacancy ofputil_decode_table_vacancy(ovs_be32 config, enum ofp_version version) { return (version < OFP14_VERSION ? OFPUTIL_TABLE_VACANCY_DEFAULT : config & htonl(OFPTC14_VACANCY_EVENTS) ? OFPUTIL_TABLE_VACANCY_ON : OFPUTIL_TABLE_VACANCY_OFF); } /* Given 'config', taken from an OpenFlow 'version' message that specifies * table configuration (a table mod, table stats, or table features message), * returns the table eviction configuration that it specifies. * * Only OpenFlow 1.4 and later specify table eviction configuration this way, * so for other 'version' values this function always returns * OFPUTIL_TABLE_EVICTION_DEFAULT. */ static enum ofputil_table_eviction ofputil_decode_table_eviction(ovs_be32 config, enum ofp_version version) { return (version < OFP14_VERSION ? OFPUTIL_TABLE_EVICTION_DEFAULT : config & htonl(OFPTC14_EVICTION) ? OFPUTIL_TABLE_EVICTION_ON : OFPUTIL_TABLE_EVICTION_OFF); } /* Returns a bitmap of OFPTC* values suitable for 'config' fields in various * OpenFlow messages of the given 'version', based on the provided 'miss' and * 'eviction' values. */ static ovs_be32 ofputil_encode_table_config(enum ofputil_table_miss miss, enum ofputil_table_eviction eviction, enum ofputil_table_vacancy vacancy, enum ofp_version version) { uint32_t config = 0; /* Search for "OFPTC_* Table Configuration" in the documentation for more * information on the crazy evolution of this field. */ switch (version) { case OFP10_VERSION: /* OpenFlow 1.0 didn't have such a field, any value ought to do. */ return htonl(0); case OFP11_VERSION: case OFP12_VERSION: /* OpenFlow 1.1 and 1.2 define only OFPTC11_TABLE_MISS_*. */ switch (miss) { case OFPUTIL_TABLE_MISS_DEFAULT: /* Really this shouldn't be used for encoding (the caller should * provide a specific value) but I can't imagine that defaulting to * the fall-through case here will hurt. */ case OFPUTIL_TABLE_MISS_CONTROLLER: default: return htonl(OFPTC11_TABLE_MISS_CONTROLLER); case OFPUTIL_TABLE_MISS_CONTINUE: return htonl(OFPTC11_TABLE_MISS_CONTINUE); case OFPUTIL_TABLE_MISS_DROP: return htonl(OFPTC11_TABLE_MISS_DROP); } OVS_NOT_REACHED(); case OFP13_VERSION: /* OpenFlow 1.3 removed OFPTC11_TABLE_MISS_* and didn't define any new * flags, so this is correct. */ return htonl(0); case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: /* OpenFlow 1.4 introduced OFPTC14_EVICTION and * OFPTC14_VACANCY_EVENTS. */ if (eviction == OFPUTIL_TABLE_EVICTION_ON) { config |= OFPTC14_EVICTION; } if (vacancy == OFPUTIL_TABLE_VACANCY_ON) { config |= OFPTC14_VACANCY_EVENTS; } return htonl(config); } OVS_NOT_REACHED(); } /* Given 'config', taken from an OpenFlow 'version' message that specifies * table configuration (a table mod, table stats, or table features message), * returns the table miss configuration that it specifies. * * Only OpenFlow 1.1 and 1.2 specify table miss configurations this way, so for * other 'version' values this function always returns * OFPUTIL_TABLE_MISS_DEFAULT. */ static enum ofputil_table_miss ofputil_decode_table_miss(ovs_be32 config_, enum ofp_version version) { uint32_t config = ntohl(config_); if (version == OFP11_VERSION || version == OFP12_VERSION) { switch (config & OFPTC11_TABLE_MISS_MASK) { case OFPTC11_TABLE_MISS_CONTROLLER: return OFPUTIL_TABLE_MISS_CONTROLLER; case OFPTC11_TABLE_MISS_CONTINUE: return OFPUTIL_TABLE_MISS_CONTINUE; case OFPTC11_TABLE_MISS_DROP: return OFPUTIL_TABLE_MISS_DROP; default: VLOG_WARN_RL(&bad_ofmsg_rl, "bad table miss config %d", config); return OFPUTIL_TABLE_MISS_CONTROLLER; } } else { return OFPUTIL_TABLE_MISS_DEFAULT; } } /* Decodes the OpenFlow "table mod" message in '*oh' into an abstract form in * '*pm'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_table_mod(const struct ofp_header *oh, struct ofputil_table_mod *pm) { memset(pm, 0, sizeof *pm); pm->miss = OFPUTIL_TABLE_MISS_DEFAULT; pm->eviction = OFPUTIL_TABLE_EVICTION_DEFAULT; pm->eviction_flags = UINT32_MAX; pm->vacancy = OFPUTIL_TABLE_VACANCY_DEFAULT; struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT11_TABLE_MOD) { const struct ofp11_table_mod *otm = b.data; pm->table_id = otm->table_id; pm->miss = ofputil_decode_table_miss(otm->config, oh->version); } else if (raw == OFPRAW_OFPT14_TABLE_MOD) { const struct ofp14_table_mod *otm = ofpbuf_pull(&b, sizeof *otm); pm->table_id = otm->table_id; pm->miss = ofputil_decode_table_miss(otm->config, oh->version); pm->eviction = ofputil_decode_table_eviction(otm->config, oh->version); pm->vacancy = ofputil_decode_table_vacancy(otm->config, oh->version); while (b.size > 0) { struct ofpbuf property; enum ofperr error; uint64_t type; error = ofpprop_pull(&b, &property, &type); if (error) { return error; } switch (type) { case OFPTMPT14_EVICTION: error = ofpprop_parse_u32(&property, &pm->eviction); break; case OFPTMPT14_VACANCY: error = parse_table_mod_vacancy_property(&property, pm); break; default: error = OFPERR_OFPBRC_BAD_TYPE; break; } if (error) { return error; } } } else { return OFPERR_OFPBRC_BAD_TYPE; } return 0; } /* Converts the abstract form of a "table mod" message in '*tm' into an * OpenFlow message suitable for 'protocol', and returns that encoded form in a * buffer owned by the caller. */ struct ofpbuf * ofputil_encode_table_mod(const struct ofputil_table_mod *tm, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *b; switch (ofp_version) { case OFP10_VERSION: { ovs_fatal(0, "table mod needs OpenFlow 1.1 or later " "(\'-O OpenFlow11\')"); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { struct ofp11_table_mod *otm; b = ofpraw_alloc(OFPRAW_OFPT11_TABLE_MOD, ofp_version, 0); otm = ofpbuf_put_zeros(b, sizeof *otm); otm->table_id = tm->table_id; otm->config = ofputil_encode_table_config(tm->miss, tm->eviction, tm->vacancy, ofp_version); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp14_table_mod *otm; b = ofpraw_alloc(OFPRAW_OFPT14_TABLE_MOD, ofp_version, 0); otm = ofpbuf_put_zeros(b, sizeof *otm); otm->table_id = tm->table_id; otm->config = ofputil_encode_table_config(tm->miss, tm->eviction, tm->vacancy, ofp_version); if (tm->eviction_flags != UINT32_MAX) { ofpprop_put_u32(b, OFPTMPT14_EVICTION, tm->eviction_flags); } if (tm->vacancy == OFPUTIL_TABLE_VACANCY_ON) { struct ofp14_table_mod_prop_vacancy *otv; otv = ofpprop_put_zeros(b, OFPTMPT14_VACANCY, sizeof *otv); otv->vacancy_down = tm->table_vacancy.vacancy_down; otv->vacancy_up = tm->table_vacancy.vacancy_up; } break; } default: OVS_NOT_REACHED(); } return b; } /* ofputil_role_request */ /* Decodes the OpenFlow "role request" or "role reply" message in '*oh' into * an abstract form in '*rr'. Returns 0 if successful, otherwise an * OFPERR_* value. */ enum ofperr ofputil_decode_role_message(const struct ofp_header *oh, struct ofputil_role_request *rr) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT12_ROLE_REQUEST || raw == OFPRAW_OFPT12_ROLE_REPLY) { const struct ofp12_role_request *orr = b.msg; if (orr->role != htonl(OFPCR12_ROLE_NOCHANGE) && orr->role != htonl(OFPCR12_ROLE_EQUAL) && orr->role != htonl(OFPCR12_ROLE_MASTER) && orr->role != htonl(OFPCR12_ROLE_SLAVE)) { return OFPERR_OFPRRFC_BAD_ROLE; } rr->role = ntohl(orr->role); if (raw == OFPRAW_OFPT12_ROLE_REQUEST ? orr->role == htonl(OFPCR12_ROLE_NOCHANGE) : orr->generation_id == OVS_BE64_MAX) { rr->have_generation_id = false; rr->generation_id = 0; } else { rr->have_generation_id = true; rr->generation_id = ntohll(orr->generation_id); } } else if (raw == OFPRAW_NXT_ROLE_REQUEST || raw == OFPRAW_NXT_ROLE_REPLY) { const struct nx_role_request *nrr = b.msg; BUILD_ASSERT(NX_ROLE_OTHER + 1 == OFPCR12_ROLE_EQUAL); BUILD_ASSERT(NX_ROLE_MASTER + 1 == OFPCR12_ROLE_MASTER); BUILD_ASSERT(NX_ROLE_SLAVE + 1 == OFPCR12_ROLE_SLAVE); if (nrr->role != htonl(NX_ROLE_OTHER) && nrr->role != htonl(NX_ROLE_MASTER) && nrr->role != htonl(NX_ROLE_SLAVE)) { return OFPERR_OFPRRFC_BAD_ROLE; } rr->role = ntohl(nrr->role) + 1; rr->have_generation_id = false; rr->generation_id = 0; } else { OVS_NOT_REACHED(); } return 0; } /* Returns an encoded form of a role reply suitable for the "request" in a * buffer owned by the caller. */ struct ofpbuf * ofputil_encode_role_reply(const struct ofp_header *request, const struct ofputil_role_request *rr) { struct ofpbuf *buf; enum ofpraw raw; raw = ofpraw_decode_assert(request); if (raw == OFPRAW_OFPT12_ROLE_REQUEST) { struct ofp12_role_request *orr; buf = ofpraw_alloc_reply(OFPRAW_OFPT12_ROLE_REPLY, request, 0); orr = ofpbuf_put_zeros(buf, sizeof *orr); orr->role = htonl(rr->role); orr->generation_id = htonll(rr->have_generation_id ? rr->generation_id : UINT64_MAX); } else if (raw == OFPRAW_NXT_ROLE_REQUEST) { struct nx_role_request *nrr; BUILD_ASSERT(NX_ROLE_OTHER == OFPCR12_ROLE_EQUAL - 1); BUILD_ASSERT(NX_ROLE_MASTER == OFPCR12_ROLE_MASTER - 1); BUILD_ASSERT(NX_ROLE_SLAVE == OFPCR12_ROLE_SLAVE - 1); buf = ofpraw_alloc_reply(OFPRAW_NXT_ROLE_REPLY, request, 0); nrr = ofpbuf_put_zeros(buf, sizeof *nrr); nrr->role = htonl(rr->role - 1); } else { OVS_NOT_REACHED(); } return buf; } /* Encodes "role status" message 'status' for sending in the given * 'protocol'. Returns the role status message, if 'protocol' supports them, * otherwise a null pointer. */ struct ofpbuf * ofputil_encode_role_status(const struct ofputil_role_status *status, enum ofputil_protocol protocol) { enum ofp_version version; version = ofputil_protocol_to_ofp_version(protocol); if (version >= OFP14_VERSION) { struct ofp14_role_status *rstatus; struct ofpbuf *buf; buf = ofpraw_alloc_xid(OFPRAW_OFPT14_ROLE_STATUS, version, htonl(0), 0); rstatus = ofpbuf_put_zeros(buf, sizeof *rstatus); rstatus->role = htonl(status->role); rstatus->reason = status->reason; rstatus->generation_id = htonll(status->generation_id); return buf; } else { return NULL; } } enum ofperr ofputil_decode_role_status(const struct ofp_header *oh, struct ofputil_role_status *rs) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); ovs_assert(raw == OFPRAW_OFPT14_ROLE_STATUS); const struct ofp14_role_status *r = b.msg; if (r->role != htonl(OFPCR12_ROLE_NOCHANGE) && r->role != htonl(OFPCR12_ROLE_EQUAL) && r->role != htonl(OFPCR12_ROLE_MASTER) && r->role != htonl(OFPCR12_ROLE_SLAVE)) { return OFPERR_OFPRRFC_BAD_ROLE; } rs->role = ntohl(r->role); rs->generation_id = ntohll(r->generation_id); rs->reason = r->reason; return 0; } /* Encodes 'rf' according to 'protocol', and returns the encoded message. * 'protocol' must be for OpenFlow 1.4 or later. */ struct ofpbuf * ofputil_encode_requestforward(const struct ofputil_requestforward *rf, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *inner; switch (rf->reason) { case OFPRFR_GROUP_MOD: inner = ofputil_encode_group_mod(ofp_version, rf->group_mod); break; case OFPRFR_METER_MOD: inner = ofputil_encode_meter_mod(ofp_version, rf->meter_mod); break; case OFPRFR_N_REASONS: default: OVS_NOT_REACHED(); } struct ofp_header *inner_oh = inner->data; inner_oh->xid = rf->xid; inner_oh->length = htons(inner->size); struct ofpbuf *outer = ofpraw_alloc_xid(OFPRAW_OFPT14_REQUESTFORWARD, ofp_version, htonl(0), inner->size); ofpbuf_put(outer, inner->data, inner->size); ofpbuf_delete(inner); return outer; } /* Decodes OFPT_REQUESTFORWARD message 'outer'. On success, puts the decoded * form into '*rf' and returns 0, and the caller is later responsible for * freeing the content of 'rf', with ofputil_destroy_requestforward(rf). On * failure, returns an ofperr and '*rf' is indeterminate. */ enum ofperr ofputil_decode_requestforward(const struct ofp_header *outer, struct ofputil_requestforward *rf) { struct ofpbuf b = ofpbuf_const_initializer(outer, ntohs(outer->length)); /* Skip past outer message. */ enum ofpraw outer_raw = ofpraw_pull_assert(&b); ovs_assert(outer_raw == OFPRAW_OFPT14_REQUESTFORWARD); /* Validate inner message. */ if (b.size < sizeof(struct ofp_header)) { return OFPERR_OFPBFC_MSG_BAD_LEN; } const struct ofp_header *inner = b.data; unsigned int inner_len = ntohs(inner->length); if (inner_len < sizeof(struct ofp_header) || inner_len > b.size) { return OFPERR_OFPBFC_MSG_BAD_LEN; } if (inner->version != outer->version) { return OFPERR_OFPBRC_BAD_VERSION; } /* Parse inner message. */ enum ofptype type; enum ofperr error = ofptype_decode(&type, inner); if (error) { return error; } rf->xid = inner->xid; if (type == OFPTYPE_GROUP_MOD) { rf->reason = OFPRFR_GROUP_MOD; rf->group_mod = xmalloc(sizeof *rf->group_mod); error = ofputil_decode_group_mod(inner, rf->group_mod); if (error) { free(rf->group_mod); return error; } } else if (type == OFPTYPE_METER_MOD) { rf->reason = OFPRFR_METER_MOD; rf->meter_mod = xmalloc(sizeof *rf->meter_mod); ofpbuf_init(&rf->bands, 64); error = ofputil_decode_meter_mod(inner, rf->meter_mod, &rf->bands); if (error) { free(rf->meter_mod); ofpbuf_uninit(&rf->bands); return error; } } else { return OFPERR_OFPBFC_MSG_UNSUP; } return 0; } /* Frees the content of 'rf', which should have been initialized through a * successful call to ofputil_decode_requestforward(). */ void ofputil_destroy_requestforward(struct ofputil_requestforward *rf) { if (!rf) { return; } switch (rf->reason) { case OFPRFR_GROUP_MOD: ofputil_uninit_group_mod(rf->group_mod); free(rf->group_mod); break; case OFPRFR_METER_MOD: ofpbuf_uninit(&rf->bands); free(rf->meter_mod); break; case OFPRFR_N_REASONS: OVS_NOT_REACHED(); } } /* Table stats. */ /* OpenFlow 1.0 and 1.1 don't distinguish between a field that cannot be * matched and a field that must be wildcarded. This function returns a bitmap * that contains both kinds of fields. */ static struct mf_bitmap wild_or_nonmatchable_fields(const struct ofputil_table_features *features) { struct mf_bitmap wc = features->match; bitmap_not(wc.bm, MFF_N_IDS); bitmap_or(wc.bm, features->wildcard.bm, MFF_N_IDS); return wc; } struct ofp10_wc_map { enum ofp10_flow_wildcards wc10; enum mf_field_id mf; }; static const struct ofp10_wc_map ofp10_wc_map[] = { { OFPFW10_IN_PORT, MFF_IN_PORT }, { OFPFW10_DL_VLAN, MFF_VLAN_VID }, { OFPFW10_DL_SRC, MFF_ETH_SRC }, { OFPFW10_DL_DST, MFF_ETH_DST}, { OFPFW10_DL_TYPE, MFF_ETH_TYPE }, { OFPFW10_NW_PROTO, MFF_IP_PROTO }, { OFPFW10_TP_SRC, MFF_TCP_SRC }, { OFPFW10_TP_DST, MFF_TCP_DST }, { OFPFW10_NW_SRC_MASK, MFF_IPV4_SRC }, { OFPFW10_NW_DST_MASK, MFF_IPV4_DST }, { OFPFW10_DL_VLAN_PCP, MFF_VLAN_PCP }, { OFPFW10_NW_TOS, MFF_IP_DSCP }, }; static ovs_be32 mf_bitmap_to_of10(const struct mf_bitmap *fields) { const struct ofp10_wc_map *p; uint32_t wc10 = 0; for (p = ofp10_wc_map; p < &ofp10_wc_map[ARRAY_SIZE(ofp10_wc_map)]; p++) { if (bitmap_is_set(fields->bm, p->mf)) { wc10 |= p->wc10; } } return htonl(wc10); } static struct mf_bitmap mf_bitmap_from_of10(ovs_be32 wc10_) { struct mf_bitmap fields = MF_BITMAP_INITIALIZER; const struct ofp10_wc_map *p; uint32_t wc10 = ntohl(wc10_); for (p = ofp10_wc_map; p < &ofp10_wc_map[ARRAY_SIZE(ofp10_wc_map)]; p++) { if (wc10 & p->wc10) { bitmap_set1(fields.bm, p->mf); } } return fields; } static void ofputil_put_ofp10_table_stats(const struct ofputil_table_stats *stats, const struct ofputil_table_features *features, struct ofpbuf *buf) { struct mf_bitmap wc = wild_or_nonmatchable_fields(features); struct ofp10_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = features->table_id; ovs_strlcpy(out->name, features->name, sizeof out->name); out->wildcards = mf_bitmap_to_of10(&wc); out->max_entries = htonl(features->max_entries); out->active_count = htonl(stats->active_count); put_32aligned_be64(&out->lookup_count, htonll(stats->lookup_count)); put_32aligned_be64(&out->matched_count, htonll(stats->matched_count)); } struct ofp11_wc_map { enum ofp11_flow_match_fields wc11; enum mf_field_id mf; }; static const struct ofp11_wc_map ofp11_wc_map[] = { { OFPFMF11_IN_PORT, MFF_IN_PORT }, { OFPFMF11_DL_VLAN, MFF_VLAN_VID }, { OFPFMF11_DL_VLAN_PCP, MFF_VLAN_PCP }, { OFPFMF11_DL_TYPE, MFF_ETH_TYPE }, { OFPFMF11_NW_TOS, MFF_IP_DSCP }, { OFPFMF11_NW_PROTO, MFF_IP_PROTO }, { OFPFMF11_TP_SRC, MFF_TCP_SRC }, { OFPFMF11_TP_DST, MFF_TCP_DST }, { OFPFMF11_MPLS_LABEL, MFF_MPLS_LABEL }, { OFPFMF11_MPLS_TC, MFF_MPLS_TC }, /* I don't know what OFPFMF11_TYPE means. */ { OFPFMF11_DL_SRC, MFF_ETH_SRC }, { OFPFMF11_DL_DST, MFF_ETH_DST }, { OFPFMF11_NW_SRC, MFF_IPV4_SRC }, { OFPFMF11_NW_DST, MFF_IPV4_DST }, { OFPFMF11_METADATA, MFF_METADATA }, }; static ovs_be32 mf_bitmap_to_of11(const struct mf_bitmap *fields) { const struct ofp11_wc_map *p; uint32_t wc11 = 0; for (p = ofp11_wc_map; p < &ofp11_wc_map[ARRAY_SIZE(ofp11_wc_map)]; p++) { if (bitmap_is_set(fields->bm, p->mf)) { wc11 |= p->wc11; } } return htonl(wc11); } static struct mf_bitmap mf_bitmap_from_of11(ovs_be32 wc11_) { struct mf_bitmap fields = MF_BITMAP_INITIALIZER; const struct ofp11_wc_map *p; uint32_t wc11 = ntohl(wc11_); for (p = ofp11_wc_map; p < &ofp11_wc_map[ARRAY_SIZE(ofp11_wc_map)]; p++) { if (wc11 & p->wc11) { bitmap_set1(fields.bm, p->mf); } } return fields; } static void ofputil_put_ofp11_table_stats(const struct ofputil_table_stats *stats, const struct ofputil_table_features *features, struct ofpbuf *buf) { struct mf_bitmap wc = wild_or_nonmatchable_fields(features); struct ofp11_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = features->table_id; ovs_strlcpy(out->name, features->name, sizeof out->name); out->wildcards = mf_bitmap_to_of11(&wc); out->match = mf_bitmap_to_of11(&features->match); out->instructions = ovsinst_bitmap_to_openflow( features->nonmiss.instructions, OFP11_VERSION); out->write_actions = ofpact_bitmap_to_openflow( features->nonmiss.write.ofpacts, OFP11_VERSION); out->apply_actions = ofpact_bitmap_to_openflow( features->nonmiss.apply.ofpacts, OFP11_VERSION); out->config = htonl(features->miss_config); out->max_entries = htonl(features->max_entries); out->active_count = htonl(stats->active_count); out->lookup_count = htonll(stats->lookup_count); out->matched_count = htonll(stats->matched_count); } static void ofputil_put_ofp12_table_stats(const struct ofputil_table_stats *stats, const struct ofputil_table_features *features, struct ofpbuf *buf) { struct ofp12_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = features->table_id; ovs_strlcpy(out->name, features->name, sizeof out->name); out->match = oxm_bitmap_from_mf_bitmap(&features->match, OFP12_VERSION); out->wildcards = oxm_bitmap_from_mf_bitmap(&features->wildcard, OFP12_VERSION); out->write_actions = ofpact_bitmap_to_openflow( features->nonmiss.write.ofpacts, OFP12_VERSION); out->apply_actions = ofpact_bitmap_to_openflow( features->nonmiss.apply.ofpacts, OFP12_VERSION); out->write_setfields = oxm_bitmap_from_mf_bitmap( &features->nonmiss.write.set_fields, OFP12_VERSION); out->apply_setfields = oxm_bitmap_from_mf_bitmap( &features->nonmiss.apply.set_fields, OFP12_VERSION); out->metadata_match = features->metadata_match; out->metadata_write = features->metadata_write; out->instructions = ovsinst_bitmap_to_openflow( features->nonmiss.instructions, OFP12_VERSION); out->config = ofputil_encode_table_config(features->miss_config, OFPUTIL_TABLE_EVICTION_DEFAULT, OFPUTIL_TABLE_VACANCY_DEFAULT, OFP12_VERSION); out->max_entries = htonl(features->max_entries); out->active_count = htonl(stats->active_count); out->lookup_count = htonll(stats->lookup_count); out->matched_count = htonll(stats->matched_count); } static void ofputil_put_ofp13_table_stats(const struct ofputil_table_stats *stats, struct ofpbuf *buf) { struct ofp13_table_stats *out; out = ofpbuf_put_zeros(buf, sizeof *out); out->table_id = stats->table_id; out->active_count = htonl(stats->active_count); out->lookup_count = htonll(stats->lookup_count); out->matched_count = htonll(stats->matched_count); } struct ofpbuf * ofputil_encode_table_stats_reply(const struct ofp_header *request) { return ofpraw_alloc_stats_reply(request, 0); } void ofputil_append_table_stats_reply(struct ofpbuf *reply, const struct ofputil_table_stats *stats, const struct ofputil_table_features *features) { struct ofp_header *oh = reply->header; ovs_assert(stats->table_id == features->table_id); switch ((enum ofp_version) oh->version) { case OFP10_VERSION: ofputil_put_ofp10_table_stats(stats, features, reply); break; case OFP11_VERSION: ofputil_put_ofp11_table_stats(stats, features, reply); break; case OFP12_VERSION: ofputil_put_ofp12_table_stats(stats, features, reply); break; case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: ofputil_put_ofp13_table_stats(stats, reply); break; default: OVS_NOT_REACHED(); } } static int ofputil_decode_ofp10_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp10_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; ovs_strlcpy(features->name, ots->name, sizeof features->name); features->max_entries = ntohl(ots->max_entries); features->match = features->wildcard = mf_bitmap_from_of10(ots->wildcards); stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(get_32aligned_be64(&ots->lookup_count)); stats->matched_count = ntohll(get_32aligned_be64(&ots->matched_count)); return 0; } static int ofputil_decode_ofp11_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp11_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; ovs_strlcpy(features->name, ots->name, sizeof features->name); features->max_entries = ntohl(ots->max_entries); features->nonmiss.instructions = ovsinst_bitmap_from_openflow( ots->instructions, OFP11_VERSION); features->nonmiss.write.ofpacts = ofpact_bitmap_from_openflow( ots->write_actions, OFP11_VERSION); features->nonmiss.apply.ofpacts = ofpact_bitmap_from_openflow( ots->write_actions, OFP11_VERSION); features->miss = features->nonmiss; features->miss_config = ofputil_decode_table_miss(ots->config, OFP11_VERSION); features->match = mf_bitmap_from_of11(ots->match); features->wildcard = mf_bitmap_from_of11(ots->wildcards); bitmap_or(features->match.bm, features->wildcard.bm, MFF_N_IDS); stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(ots->lookup_count); stats->matched_count = ntohll(ots->matched_count); return 0; } static int ofputil_decode_ofp12_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp12_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; ovs_strlcpy(features->name, ots->name, sizeof features->name); features->metadata_match = ots->metadata_match; features->metadata_write = ots->metadata_write; features->miss_config = ofputil_decode_table_miss(ots->config, OFP12_VERSION); features->max_entries = ntohl(ots->max_entries); features->nonmiss.instructions = ovsinst_bitmap_from_openflow( ots->instructions, OFP12_VERSION); features->nonmiss.write.ofpacts = ofpact_bitmap_from_openflow( ots->write_actions, OFP12_VERSION); features->nonmiss.apply.ofpacts = ofpact_bitmap_from_openflow( ots->apply_actions, OFP12_VERSION); features->nonmiss.write.set_fields = oxm_bitmap_to_mf_bitmap( ots->write_setfields, OFP12_VERSION); features->nonmiss.apply.set_fields = oxm_bitmap_to_mf_bitmap( ots->apply_setfields, OFP12_VERSION); features->miss = features->nonmiss; features->match = oxm_bitmap_to_mf_bitmap(ots->match, OFP12_VERSION); features->wildcard = oxm_bitmap_to_mf_bitmap(ots->wildcards, OFP12_VERSION); bitmap_or(features->match.bm, features->wildcard.bm, MFF_N_IDS); stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(ots->lookup_count); stats->matched_count = ntohll(ots->matched_count); return 0; } static int ofputil_decode_ofp13_table_stats(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { struct ofp13_table_stats *ots; ots = ofpbuf_try_pull(msg, sizeof *ots); if (!ots) { return OFPERR_OFPBRC_BAD_LEN; } features->table_id = ots->table_id; stats->table_id = ots->table_id; stats->active_count = ntohl(ots->active_count); stats->lookup_count = ntohll(ots->lookup_count); stats->matched_count = ntohll(ots->matched_count); return 0; } int ofputil_decode_table_stats_reply(struct ofpbuf *msg, struct ofputil_table_stats *stats, struct ofputil_table_features *features) { const struct ofp_header *oh; if (!msg->header) { ofpraw_pull_assert(msg); } oh = msg->header; if (!msg->size) { return EOF; } memset(stats, 0, sizeof *stats); memset(features, 0, sizeof *features); features->supports_eviction = -1; features->supports_vacancy_events = -1; switch ((enum ofp_version) oh->version) { case OFP10_VERSION: return ofputil_decode_ofp10_table_stats(msg, stats, features); case OFP11_VERSION: return ofputil_decode_ofp11_table_stats(msg, stats, features); case OFP12_VERSION: return ofputil_decode_ofp12_table_stats(msg, stats, features); case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: return ofputil_decode_ofp13_table_stats(msg, stats, features); default: OVS_NOT_REACHED(); } } /* ofputil_flow_monitor_request */ /* Converts an NXST_FLOW_MONITOR request in 'msg' into an abstract * ofputil_flow_monitor_request in 'rq'. * * Multiple NXST_FLOW_MONITOR requests can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the requests. The caller must initially leave 'msg''s layer * pointers null and not modify them between calls. * * Returns 0 if successful, EOF if no requests were left in this 'msg', * otherwise an OFPERR_* value. */ int ofputil_decode_flow_monitor_request(struct ofputil_flow_monitor_request *rq, struct ofpbuf *msg) { struct nx_flow_monitor_request *nfmr; uint16_t flags; if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } nfmr = ofpbuf_try_pull(msg, sizeof *nfmr); if (!nfmr) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR request has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } flags = ntohs(nfmr->flags); if (!(flags & (NXFMF_ADD | NXFMF_DELETE | NXFMF_MODIFY)) || flags & ~(NXFMF_INITIAL | NXFMF_ADD | NXFMF_DELETE | NXFMF_MODIFY | NXFMF_ACTIONS | NXFMF_OWN)) { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR has bad flags %#"PRIx16, flags); return OFPERR_OFPMOFC_BAD_FLAGS; } if (!is_all_zeros(nfmr->zeros, sizeof nfmr->zeros)) { return OFPERR_NXBRC_MUST_BE_ZERO; } rq->id = ntohl(nfmr->id); rq->flags = flags; rq->out_port = u16_to_ofp(ntohs(nfmr->out_port)); rq->table_id = nfmr->table_id; return nx_pull_match(msg, ntohs(nfmr->match_len), &rq->match, NULL, NULL, NULL, NULL); } void ofputil_append_flow_monitor_request( const struct ofputil_flow_monitor_request *rq, struct ofpbuf *msg) { struct nx_flow_monitor_request *nfmr; size_t start_ofs; int match_len; if (!msg->size) { ofpraw_put(OFPRAW_NXST_FLOW_MONITOR_REQUEST, OFP10_VERSION, msg); } start_ofs = msg->size; ofpbuf_put_zeros(msg, sizeof *nfmr); match_len = nx_put_match(msg, &rq->match, htonll(0), htonll(0)); nfmr = ofpbuf_at_assert(msg, start_ofs, sizeof *nfmr); nfmr->id = htonl(rq->id); nfmr->flags = htons(rq->flags); nfmr->out_port = htons(ofp_to_u16(rq->out_port)); nfmr->match_len = htons(match_len); nfmr->table_id = rq->table_id; } /* Converts an NXST_FLOW_MONITOR reply (also known as a flow update) in 'msg' * into an abstract ofputil_flow_update in 'update'. The caller must have * initialized update->match to point to space allocated for a match. * * Uses 'ofpacts' to store the abstract OFPACT_* version of the update's * actions (except for NXFME_ABBREV, which never includes actions). The caller * must initialize 'ofpacts' and retains ownership of it. 'update->ofpacts' * will point into the 'ofpacts' buffer. * * Multiple flow updates can be packed into a single OpenFlow message. Calling * this function multiple times for a single 'msg' iterates through the * updates. The caller must initially leave 'msg''s layer pointers null and * not modify them between calls. * * Returns 0 if successful, EOF if no updates were left in this 'msg', * otherwise an OFPERR_* value. */ int ofputil_decode_flow_update(struct ofputil_flow_update *update, struct ofpbuf *msg, struct ofpbuf *ofpacts) { struct nx_flow_update_header *nfuh; unsigned int length; struct ofp_header *oh; if (!msg->header) { ofpraw_pull_assert(msg); } ofpbuf_clear(ofpacts); if (!msg->size) { return EOF; } if (msg->size < sizeof(struct nx_flow_update_header)) { goto bad_len; } oh = msg->header; nfuh = msg->data; update->event = ntohs(nfuh->event); length = ntohs(nfuh->length); if (length > msg->size || length % 8) { goto bad_len; } if (update->event == NXFME_ABBREV) { struct nx_flow_update_abbrev *nfua; if (length != sizeof *nfua) { goto bad_len; } nfua = ofpbuf_pull(msg, sizeof *nfua); update->xid = nfua->xid; return 0; } else if (update->event == NXFME_ADDED || update->event == NXFME_DELETED || update->event == NXFME_MODIFIED) { struct nx_flow_update_full *nfuf; unsigned int actions_len; unsigned int match_len; enum ofperr error; if (length < sizeof *nfuf) { goto bad_len; } nfuf = ofpbuf_pull(msg, sizeof *nfuf); match_len = ntohs(nfuf->match_len); if (sizeof *nfuf + match_len > length) { goto bad_len; } update->reason = ntohs(nfuf->reason); update->idle_timeout = ntohs(nfuf->idle_timeout); update->hard_timeout = ntohs(nfuf->hard_timeout); update->table_id = nfuf->table_id; update->cookie = nfuf->cookie; update->priority = ntohs(nfuf->priority); error = nx_pull_match(msg, match_len, &update->match, NULL, NULL, NULL, NULL); if (error) { return error; } actions_len = length - sizeof *nfuf - ROUND_UP(match_len, 8); error = ofpacts_pull_openflow_actions(msg, actions_len, oh->version, NULL, NULL, ofpacts); if (error) { return error; } update->ofpacts = ofpacts->data; update->ofpacts_len = ofpacts->size; return 0; } else { VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR reply has bad event %"PRIu16, ntohs(nfuh->event)); return OFPERR_NXBRC_FM_BAD_EVENT; } bad_len: VLOG_WARN_RL(&bad_ofmsg_rl, "NXST_FLOW_MONITOR reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } uint32_t ofputil_decode_flow_monitor_cancel(const struct ofp_header *oh) { const struct nx_flow_monitor_cancel *cancel = ofpmsg_body(oh); return ntohl(cancel->id); } struct ofpbuf * ofputil_encode_flow_monitor_cancel(uint32_t id) { struct nx_flow_monitor_cancel *nfmc; struct ofpbuf *msg; msg = ofpraw_alloc(OFPRAW_NXT_FLOW_MONITOR_CANCEL, OFP10_VERSION, 0); nfmc = ofpbuf_put_uninit(msg, sizeof *nfmc); nfmc->id = htonl(id); return msg; } void ofputil_start_flow_update(struct ovs_list *replies) { struct ofpbuf *msg; msg = ofpraw_alloc_xid(OFPRAW_NXST_FLOW_MONITOR_REPLY, OFP10_VERSION, htonl(0), 1024); ovs_list_init(replies); ovs_list_push_back(replies, &msg->list_node); } void ofputil_append_flow_update(const struct ofputil_flow_update *update, struct ovs_list *replies, const struct tun_table *tun_table) { struct ofputil_flow_update *update_ = CONST_CAST(struct ofputil_flow_update *, update); const struct tun_table *orig_tun_table; enum ofp_version version = ofpmp_version(replies); struct nx_flow_update_header *nfuh; struct ofpbuf *msg; size_t start_ofs; orig_tun_table = update->match.flow.tunnel.metadata.tab; update_->match.flow.tunnel.metadata.tab = tun_table; msg = ofpbuf_from_list(ovs_list_back(replies)); start_ofs = msg->size; if (update->event == NXFME_ABBREV) { struct nx_flow_update_abbrev *nfua; nfua = ofpbuf_put_zeros(msg, sizeof *nfua); nfua->xid = update->xid; } else { struct nx_flow_update_full *nfuf; int match_len; ofpbuf_put_zeros(msg, sizeof *nfuf); match_len = nx_put_match(msg, &update->match, htonll(0), htonll(0)); ofpacts_put_openflow_actions(update->ofpacts, update->ofpacts_len, msg, version); nfuf = ofpbuf_at_assert(msg, start_ofs, sizeof *nfuf); nfuf->reason = htons(update->reason); nfuf->priority = htons(update->priority); nfuf->idle_timeout = htons(update->idle_timeout); nfuf->hard_timeout = htons(update->hard_timeout); nfuf->match_len = htons(match_len); nfuf->table_id = update->table_id; nfuf->cookie = update->cookie; } nfuh = ofpbuf_at_assert(msg, start_ofs, sizeof *nfuh); nfuh->length = htons(msg->size - start_ofs); nfuh->event = htons(update->event); ofpmp_postappend(replies, start_ofs); update_->match.flow.tunnel.metadata.tab = orig_tun_table; } struct ofpbuf * ofputil_encode_packet_out(const struct ofputil_packet_out *po, enum ofputil_protocol protocol) { enum ofp_version ofp_version = ofputil_protocol_to_ofp_version(protocol); struct ofpbuf *msg; size_t size; size = po->ofpacts_len; if (po->buffer_id == UINT32_MAX) { size += po->packet_len; } switch (ofp_version) { case OFP10_VERSION: { struct ofp10_packet_out *opo; size_t actions_ofs; msg = ofpraw_alloc(OFPRAW_OFPT10_PACKET_OUT, OFP10_VERSION, size); ofpbuf_put_zeros(msg, sizeof *opo); actions_ofs = msg->size; ofpacts_put_openflow_actions(po->ofpacts, po->ofpacts_len, msg, ofp_version); opo = msg->msg; opo->buffer_id = htonl(po->buffer_id); opo->in_port = htons(ofp_to_u16(po->in_port)); opo->actions_len = htons(msg->size - actions_ofs); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_packet_out *opo; size_t len; msg = ofpraw_alloc(OFPRAW_OFPT11_PACKET_OUT, ofp_version, size); ofpbuf_put_zeros(msg, sizeof *opo); len = ofpacts_put_openflow_actions(po->ofpacts, po->ofpacts_len, msg, ofp_version); opo = msg->msg; opo->buffer_id = htonl(po->buffer_id); opo->in_port = ofputil_port_to_ofp11(po->in_port); opo->actions_len = htons(len); break; } default: OVS_NOT_REACHED(); } if (po->buffer_id == UINT32_MAX) { ofpbuf_put(msg, po->packet, po->packet_len); } ofpmsg_update_length(msg); return msg; } /* Creates and returns an OFPT_ECHO_REQUEST message with an empty payload. */ struct ofpbuf * make_echo_request(enum ofp_version ofp_version) { return ofpraw_alloc_xid(OFPRAW_OFPT_ECHO_REQUEST, ofp_version, htonl(0), 0); } /* Creates and returns an OFPT_ECHO_REPLY message matching the * OFPT_ECHO_REQUEST message in 'rq'. */ struct ofpbuf * make_echo_reply(const struct ofp_header *rq) { struct ofpbuf rq_buf = ofpbuf_const_initializer(rq, ntohs(rq->length)); ofpraw_pull_assert(&rq_buf); struct ofpbuf *reply = ofpraw_alloc_reply(OFPRAW_OFPT_ECHO_REPLY, rq, rq_buf.size); ofpbuf_put(reply, rq_buf.data, rq_buf.size); return reply; } struct ofpbuf * ofputil_encode_barrier_request(enum ofp_version ofp_version) { enum ofpraw type; switch (ofp_version) { case OFP16_VERSION: case OFP15_VERSION: case OFP14_VERSION: case OFP13_VERSION: case OFP12_VERSION: case OFP11_VERSION: type = OFPRAW_OFPT11_BARRIER_REQUEST; break; case OFP10_VERSION: type = OFPRAW_OFPT10_BARRIER_REQUEST; break; default: OVS_NOT_REACHED(); } return ofpraw_alloc(type, ofp_version, 0); } const char * ofputil_frag_handling_to_string(enum ofputil_frag_handling frag) { switch (frag) { case OFPUTIL_FRAG_NORMAL: return "normal"; case OFPUTIL_FRAG_DROP: return "drop"; case OFPUTIL_FRAG_REASM: return "reassemble"; case OFPUTIL_FRAG_NX_MATCH: return "nx-match"; } OVS_NOT_REACHED(); } bool ofputil_frag_handling_from_string(const char *s, enum ofputil_frag_handling *frag) { if (!strcasecmp(s, "normal")) { *frag = OFPUTIL_FRAG_NORMAL; } else if (!strcasecmp(s, "drop")) { *frag = OFPUTIL_FRAG_DROP; } else if (!strcasecmp(s, "reassemble")) { *frag = OFPUTIL_FRAG_REASM; } else if (!strcasecmp(s, "nx-match")) { *frag = OFPUTIL_FRAG_NX_MATCH; } else { return false; } return true; } /* Converts the OpenFlow 1.1+ port number 'ofp11_port' into an OpenFlow 1.0 * port number and stores the latter in '*ofp10_port', for the purpose of * decoding OpenFlow 1.1+ protocol messages. Returns 0 if successful, * otherwise an OFPERR_* number. On error, stores OFPP_NONE in '*ofp10_port'. * * See the definition of OFP11_MAX for an explanation of the mapping. */ enum ofperr ofputil_port_from_ofp11(ovs_be32 ofp11_port, ofp_port_t *ofp10_port) { uint32_t ofp11_port_h = ntohl(ofp11_port); if (ofp11_port_h < ofp_to_u16(OFPP_MAX)) { *ofp10_port = u16_to_ofp(ofp11_port_h); return 0; } else if (ofp11_port_h >= ofp11_to_u32(OFPP11_MAX)) { *ofp10_port = u16_to_ofp(ofp11_port_h - OFPP11_OFFSET); return 0; } else { *ofp10_port = OFPP_NONE; VLOG_WARN_RL(&bad_ofmsg_rl, "port %"PRIu32" is outside the supported " "range 0 through %d or 0x%"PRIx32" through 0x%"PRIx32, ofp11_port_h, ofp_to_u16(OFPP_MAX) - 1, ofp11_to_u32(OFPP11_MAX), UINT32_MAX); return OFPERR_OFPBAC_BAD_OUT_PORT; } } /* Returns the OpenFlow 1.1+ port number equivalent to the OpenFlow 1.0 port * number 'ofp10_port', for encoding OpenFlow 1.1+ protocol messages. * * See the definition of OFP11_MAX for an explanation of the mapping. */ ovs_be32 ofputil_port_to_ofp11(ofp_port_t ofp10_port) { return htonl(ofp_to_u16(ofp10_port) < ofp_to_u16(OFPP_MAX) ? ofp_to_u16(ofp10_port) : ofp_to_u16(ofp10_port) + OFPP11_OFFSET); } #define OFPUTIL_NAMED_PORTS \ OFPUTIL_NAMED_PORT(IN_PORT) \ OFPUTIL_NAMED_PORT(TABLE) \ OFPUTIL_NAMED_PORT(NORMAL) \ OFPUTIL_NAMED_PORT(FLOOD) \ OFPUTIL_NAMED_PORT(ALL) \ OFPUTIL_NAMED_PORT(CONTROLLER) \ OFPUTIL_NAMED_PORT(LOCAL) \ OFPUTIL_NAMED_PORT(ANY) \ OFPUTIL_NAMED_PORT(UNSET) /* For backwards compatibility, so that "none" is recognized as OFPP_ANY */ #define OFPUTIL_NAMED_PORTS_WITH_NONE \ OFPUTIL_NAMED_PORTS \ OFPUTIL_NAMED_PORT(NONE) /* Stores the port number represented by 's' into '*portp'. 's' may be an * integer or, for reserved ports, the standard OpenFlow name for the port * (e.g. "LOCAL"). * * Returns true if successful, false if 's' is not a valid OpenFlow port number * or name. The caller should issue an error message in this case, because * this function usually does not. (This gives the caller an opportunity to * look up the port name another way, e.g. by contacting the switch and listing * the names of all its ports). * * This function accepts OpenFlow 1.0 port numbers. It also accepts a subset * of OpenFlow 1.1+ port numbers, mapping those port numbers into the 16-bit * range as described in include/openflow/openflow-1.1.h. */ bool ofputil_port_from_string(const char *s, ofp_port_t *portp) { unsigned int port32; /* int is at least 32 bits wide. */ if (*s == '-') { VLOG_WARN("Negative value %s is not a valid port number.", s); return false; } *portp = 0; if (str_to_uint(s, 10, &port32)) { if (port32 < ofp_to_u16(OFPP_MAX)) { /* Pass. */ } else if (port32 < ofp_to_u16(OFPP_FIRST_RESV)) { VLOG_WARN("port %u is a reserved OF1.0 port number that will " "be translated to %u when talking to an OF1.1 or " "later controller", port32, port32 + OFPP11_OFFSET); } else if (port32 <= ofp_to_u16(OFPP_LAST_RESV)) { char name[OFP_MAX_PORT_NAME_LEN]; ofputil_port_to_string(u16_to_ofp(port32), name, sizeof name); VLOG_WARN_ONCE("referring to port %s as %"PRIu32" is deprecated " "for compatibility with OpenFlow 1.1 and later", name, port32); } else if (port32 < ofp11_to_u32(OFPP11_MAX)) { VLOG_WARN("port %u is outside the supported range 0 through " "%"PRIx16" or 0x%x through 0x%"PRIx32, port32, UINT16_MAX, ofp11_to_u32(OFPP11_MAX), UINT32_MAX); return false; } else { port32 -= OFPP11_OFFSET; } *portp = u16_to_ofp(port32); return true; } else { struct pair { const char *name; ofp_port_t value; }; static const struct pair pairs[] = { #define OFPUTIL_NAMED_PORT(NAME) {#NAME, OFPP_##NAME}, OFPUTIL_NAMED_PORTS_WITH_NONE #undef OFPUTIL_NAMED_PORT }; const struct pair *p; for (p = pairs; p < &pairs[ARRAY_SIZE(pairs)]; p++) { if (!strcasecmp(s, p->name)) { *portp = p->value; return true; } } return false; } } /* Appends to 's' a string representation of the OpenFlow port number 'port'. * Most ports' string representation is just the port number, but for special * ports, e.g. OFPP_LOCAL, it is the name, e.g. "LOCAL". */ void ofputil_format_port(ofp_port_t port, struct ds *s) { char name[OFP_MAX_PORT_NAME_LEN]; ofputil_port_to_string(port, name, sizeof name); ds_put_cstr(s, name); } /* Puts in the 'bufsize' byte in 'namebuf' a null-terminated string * representation of OpenFlow port number 'port'. Most ports are represented * as just the port number, but special ports, e.g. OFPP_LOCAL, are represented * by name, e.g. "LOCAL". */ void ofputil_port_to_string(ofp_port_t port, char namebuf[OFP_MAX_PORT_NAME_LEN], size_t bufsize) { switch (port) { #define OFPUTIL_NAMED_PORT(NAME) \ case OFPP_##NAME: \ ovs_strlcpy(namebuf, #NAME, bufsize); \ break; OFPUTIL_NAMED_PORTS #undef OFPUTIL_NAMED_PORT default: snprintf(namebuf, bufsize, "%"PRIu32, port); break; } } /* Stores the group id represented by 's' into '*group_idp'. 's' may be an * integer or, for reserved group IDs, the standard OpenFlow name for the group * (either "ANY" or "ALL"). * * Returns true if successful, false if 's' is not a valid OpenFlow group ID or * name. */ bool ofputil_group_from_string(const char *s, uint32_t *group_idp) { if (!strcasecmp(s, "any")) { *group_idp = OFPG_ANY; } else if (!strcasecmp(s, "all")) { *group_idp = OFPG_ALL; } else if (!str_to_uint(s, 10, group_idp)) { VLOG_WARN("%s is not a valid group ID. (Valid group IDs are " "32-bit nonnegative integers or the keywords ANY or " "ALL.)", s); return false; } return true; } /* Appends to 's' a string representation of the OpenFlow group ID 'group_id'. * Most groups' string representation is just the number, but for special * groups, e.g. OFPG_ALL, it is the name, e.g. "ALL". */ void ofputil_format_group(uint32_t group_id, struct ds *s) { char name[MAX_GROUP_NAME_LEN]; ofputil_group_to_string(group_id, name, sizeof name); ds_put_cstr(s, name); } /* Puts in the 'bufsize' byte in 'namebuf' a null-terminated string * representation of OpenFlow group ID 'group_id'. Most group are represented * as just their number, but special groups, e.g. OFPG_ALL, are represented * by name, e.g. "ALL". */ void ofputil_group_to_string(uint32_t group_id, char namebuf[MAX_GROUP_NAME_LEN + 1], size_t bufsize) { switch (group_id) { case OFPG_ALL: ovs_strlcpy(namebuf, "ALL", bufsize); break; case OFPG_ANY: ovs_strlcpy(namebuf, "ANY", bufsize); break; default: snprintf(namebuf, bufsize, "%"PRIu32, group_id); break; } } /* Given a buffer 'b' that contains an array of OpenFlow ports of type * 'ofp_version', tries to pull the first element from the array. If * successful, initializes '*pp' with an abstract representation of the * port and returns 0. If no ports remain to be decoded, returns EOF. * On an error, returns a positive OFPERR_* value. */ int ofputil_pull_phy_port(enum ofp_version ofp_version, struct ofpbuf *b, struct ofputil_phy_port *pp) { memset(pp, 0, sizeof *pp); switch (ofp_version) { case OFP10_VERSION: { const struct ofp10_phy_port *opp = ofpbuf_try_pull(b, sizeof *opp); return opp ? ofputil_decode_ofp10_phy_port(pp, opp) : EOF; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: { const struct ofp11_port *op = ofpbuf_try_pull(b, sizeof *op); return op ? ofputil_decode_ofp11_port(pp, op) : EOF; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: return b->size ? ofputil_pull_ofp14_port(pp, b) : EOF; default: OVS_NOT_REACHED(); } } static void ofputil_normalize_match__(struct match *match, bool may_log) { enum { MAY_NW_ADDR = 1 << 0, /* nw_src, nw_dst */ MAY_TP_ADDR = 1 << 1, /* tp_src, tp_dst */ MAY_NW_PROTO = 1 << 2, /* nw_proto */ MAY_IPVx = 1 << 3, /* tos, frag, ttl */ MAY_ARP_SHA = 1 << 4, /* arp_sha */ MAY_ARP_THA = 1 << 5, /* arp_tha */ MAY_IPV6 = 1 << 6, /* ipv6_src, ipv6_dst, ipv6_label */ MAY_ND_TARGET = 1 << 7, /* nd_target */ MAY_MPLS = 1 << 8, /* mpls label and tc */ } may_match; struct flow_wildcards wc; /* Figure out what fields may be matched. */ if (match->flow.dl_type == htons(ETH_TYPE_IP)) { may_match = MAY_NW_PROTO | MAY_IPVx | MAY_NW_ADDR; if (match->flow.nw_proto == IPPROTO_TCP || match->flow.nw_proto == IPPROTO_UDP || match->flow.nw_proto == IPPROTO_SCTP || match->flow.nw_proto == IPPROTO_ICMP) { may_match |= MAY_TP_ADDR; } } else if (match->flow.dl_type == htons(ETH_TYPE_IPV6)) { may_match = MAY_NW_PROTO | MAY_IPVx | MAY_IPV6; if (match->flow.nw_proto == IPPROTO_TCP || match->flow.nw_proto == IPPROTO_UDP || match->flow.nw_proto == IPPROTO_SCTP) { may_match |= MAY_TP_ADDR; } else if (match->flow.nw_proto == IPPROTO_ICMPV6) { may_match |= MAY_TP_ADDR; if (match->flow.tp_src == htons(ND_NEIGHBOR_SOLICIT)) { may_match |= MAY_ND_TARGET | MAY_ARP_SHA; } else if (match->flow.tp_src == htons(ND_NEIGHBOR_ADVERT)) { may_match |= MAY_ND_TARGET | MAY_ARP_THA; } } } else if (match->flow.dl_type == htons(ETH_TYPE_ARP) || match->flow.dl_type == htons(ETH_TYPE_RARP)) { may_match = MAY_NW_PROTO | MAY_NW_ADDR | MAY_ARP_SHA | MAY_ARP_THA; } else if (eth_type_mpls(match->flow.dl_type)) { may_match = MAY_MPLS; } else { may_match = 0; } /* Clear the fields that may not be matched. */ wc = match->wc; if (!(may_match & MAY_NW_ADDR)) { wc.masks.nw_src = wc.masks.nw_dst = htonl(0); } if (!(may_match & MAY_TP_ADDR)) { wc.masks.tp_src = wc.masks.tp_dst = htons(0); } if (!(may_match & MAY_NW_PROTO)) { wc.masks.nw_proto = 0; } if (!(may_match & MAY_IPVx)) { wc.masks.nw_tos = 0; wc.masks.nw_ttl = 0; } if (!(may_match & MAY_ARP_SHA)) { WC_UNMASK_FIELD(&wc, arp_sha); } if (!(may_match & MAY_ARP_THA)) { WC_UNMASK_FIELD(&wc, arp_tha); } if (!(may_match & MAY_IPV6)) { wc.masks.ipv6_src = wc.masks.ipv6_dst = in6addr_any; wc.masks.ipv6_label = htonl(0); } if (!(may_match & MAY_ND_TARGET)) { wc.masks.nd_target = in6addr_any; } if (!(may_match & MAY_MPLS)) { memset(wc.masks.mpls_lse, 0, sizeof wc.masks.mpls_lse); } /* Log any changes. */ if (!flow_wildcards_equal(&wc, &match->wc)) { bool log = may_log && !VLOG_DROP_INFO(&bad_ofmsg_rl); char *pre = log ? match_to_string(match, OFP_DEFAULT_PRIORITY) : NULL; match->wc = wc; match_zero_wildcarded_fields(match); if (log) { char *post = match_to_string(match, OFP_DEFAULT_PRIORITY); VLOG_INFO("normalization changed ofp_match, details:"); VLOG_INFO(" pre: %s", pre); VLOG_INFO("post: %s", post); free(pre); free(post); } } } /* "Normalizes" the wildcards in 'match'. That means: * * 1. If the type of level N is known, then only the valid fields for that * level may be specified. For example, ARP does not have a TOS field, * so nw_tos must be wildcarded if 'match' specifies an ARP flow. * Similarly, IPv4 does not have any IPv6 addresses, so ipv6_src and * ipv6_dst (and other fields) must be wildcarded if 'match' specifies an * IPv4 flow. * * 2. If the type of level N is not known (or not understood by Open * vSwitch), then no fields at all for that level may be specified. For * example, Open vSwitch does not understand SCTP, an L4 protocol, so the * L4 fields tp_src and tp_dst must be wildcarded if 'match' specifies an * SCTP flow. * * If this function changes 'match', it logs a rate-limited informational * message. */ void ofputil_normalize_match(struct match *match) { ofputil_normalize_match__(match, true); } /* Same as ofputil_normalize_match() without the logging. Thus, this function * is suitable for a program's internal use, whereas ofputil_normalize_match() * sense for use on flows received from elsewhere (so that a bug in the program * that sent them can be reported and corrected). */ void ofputil_normalize_match_quiet(struct match *match) { ofputil_normalize_match__(match, false); } static size_t parse_value(const char *s, const char *delimiters) { size_t n = 0; /* Iterate until we reach a delimiter. * * strchr(s, '\0') returns s+strlen(s), so this test handles the null * terminator at the end of 's'. */ while (!strchr(delimiters, s[n])) { if (s[n] == '(') { int level = 0; do { switch (s[n]) { case '\0': return n; case '(': level++; break; case ')': level--; break; } n++; } while (level > 0); } else { n++; } } return n; } /* Parses a key or a key-value pair from '*stringp'. * * On success: Stores the key into '*keyp'. Stores the value, if present, into * '*valuep', otherwise an empty string. Advances '*stringp' past the end of * the key-value pair, preparing it for another call. '*keyp' and '*valuep' * are substrings of '*stringp' created by replacing some of its bytes by null * terminators. Returns true. * * If '*stringp' is just white space or commas, sets '*keyp' and '*valuep' to * NULL and returns false. */ bool ofputil_parse_key_value(char **stringp, char **keyp, char **valuep) { /* Skip white space and delimiters. If that brings us to the end of the * input string, we are done and there are no more key-value pairs. */ *stringp += strspn(*stringp, ", \t\r\n"); if (**stringp == '\0') { *keyp = *valuep = NULL; return false; } /* Extract the key and the delimiter that ends the key-value pair or begins * the value. Advance the input position past the key and delimiter. */ char *key = *stringp; size_t key_len = strcspn(key, ":=(, \t\r\n"); char key_delim = key[key_len]; key[key_len] = '\0'; *stringp += key_len + (key_delim != '\0'); /* Figure out what delimiter ends the value: * * - If key_delim is ":" or "=", the value extends until white space * or a comma. * * - If key_delim is "(", the value extends until ")". * * If there is no value, we are done. */ const char *value_delims; if (key_delim == ':' || key_delim == '=') { value_delims = ", \t\r\n"; } else if (key_delim == '(') { value_delims = ")"; } else { *keyp = key; *valuep = key + key_len; /* Empty string. */ return true; } /* Extract the value. Advance the input position past the value and * delimiter. */ char *value = *stringp; size_t value_len = parse_value(value, value_delims); char value_delim = value[value_len]; value[value_len] = '\0'; *stringp += value_len + (value_delim != '\0'); *keyp = key; *valuep = value; return true; } /* Encode a dump ports request for 'port', the encoded message * will be for OpenFlow version 'ofp_version'. Returns message * as a struct ofpbuf. Returns encoded message on success, NULL on error */ struct ofpbuf * ofputil_encode_dump_ports_request(enum ofp_version ofp_version, ofp_port_t port) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: { struct ofp10_port_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST10_PORT_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = htons(ofp_to_u16(port)); break; } case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_port_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST11_PORT_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = ofputil_port_to_ofp11(port); break; } default: OVS_NOT_REACHED(); } return request; } static void ofputil_port_stats_to_ofp10(const struct ofputil_port_stats *ops, struct ofp10_port_stats *ps10) { ps10->port_no = htons(ofp_to_u16(ops->port_no)); memset(ps10->pad, 0, sizeof ps10->pad); put_32aligned_be64(&ps10->rx_packets, htonll(ops->stats.rx_packets)); put_32aligned_be64(&ps10->tx_packets, htonll(ops->stats.tx_packets)); put_32aligned_be64(&ps10->rx_bytes, htonll(ops->stats.rx_bytes)); put_32aligned_be64(&ps10->tx_bytes, htonll(ops->stats.tx_bytes)); put_32aligned_be64(&ps10->rx_dropped, htonll(ops->stats.rx_dropped)); put_32aligned_be64(&ps10->tx_dropped, htonll(ops->stats.tx_dropped)); put_32aligned_be64(&ps10->rx_errors, htonll(ops->stats.rx_errors)); put_32aligned_be64(&ps10->tx_errors, htonll(ops->stats.tx_errors)); put_32aligned_be64(&ps10->rx_frame_err, htonll(ops->stats.rx_frame_errors)); put_32aligned_be64(&ps10->rx_over_err, htonll(ops->stats.rx_over_errors)); put_32aligned_be64(&ps10->rx_crc_err, htonll(ops->stats.rx_crc_errors)); put_32aligned_be64(&ps10->collisions, htonll(ops->stats.collisions)); } static void ofputil_port_stats_to_ofp11(const struct ofputil_port_stats *ops, struct ofp11_port_stats *ps11) { ps11->port_no = ofputil_port_to_ofp11(ops->port_no); memset(ps11->pad, 0, sizeof ps11->pad); ps11->rx_packets = htonll(ops->stats.rx_packets); ps11->tx_packets = htonll(ops->stats.tx_packets); ps11->rx_bytes = htonll(ops->stats.rx_bytes); ps11->tx_bytes = htonll(ops->stats.tx_bytes); ps11->rx_dropped = htonll(ops->stats.rx_dropped); ps11->tx_dropped = htonll(ops->stats.tx_dropped); ps11->rx_errors = htonll(ops->stats.rx_errors); ps11->tx_errors = htonll(ops->stats.tx_errors); ps11->rx_frame_err = htonll(ops->stats.rx_frame_errors); ps11->rx_over_err = htonll(ops->stats.rx_over_errors); ps11->rx_crc_err = htonll(ops->stats.rx_crc_errors); ps11->collisions = htonll(ops->stats.collisions); } static void ofputil_port_stats_to_ofp13(const struct ofputil_port_stats *ops, struct ofp13_port_stats *ps13) { ofputil_port_stats_to_ofp11(ops, &ps13->ps); ps13->duration_sec = htonl(ops->duration_sec); ps13->duration_nsec = htonl(ops->duration_nsec); } static void ofputil_append_ofp14_port_stats(const struct ofputil_port_stats *ops, struct ovs_list *replies) { struct ofp14_port_stats_prop_ethernet *eth; struct intel_port_stats_rfc2819 *stats_rfc2819; struct ofp14_port_stats *ps14; struct ofpbuf *reply; reply = ofpmp_reserve(replies, sizeof *ps14 + sizeof *eth + sizeof *stats_rfc2819); ps14 = ofpbuf_put_uninit(reply, sizeof *ps14); ps14->length = htons(sizeof *ps14 + sizeof *eth + sizeof *stats_rfc2819); memset(ps14->pad, 0, sizeof ps14->pad); ps14->port_no = ofputil_port_to_ofp11(ops->port_no); ps14->duration_sec = htonl(ops->duration_sec); ps14->duration_nsec = htonl(ops->duration_nsec); ps14->rx_packets = htonll(ops->stats.rx_packets); ps14->tx_packets = htonll(ops->stats.tx_packets); ps14->rx_bytes = htonll(ops->stats.rx_bytes); ps14->tx_bytes = htonll(ops->stats.tx_bytes); ps14->rx_dropped = htonll(ops->stats.rx_dropped); ps14->tx_dropped = htonll(ops->stats.tx_dropped); ps14->rx_errors = htonll(ops->stats.rx_errors); ps14->tx_errors = htonll(ops->stats.tx_errors); eth = ofpprop_put_zeros(reply, OFPPSPT14_ETHERNET, sizeof *eth); eth->rx_frame_err = htonll(ops->stats.rx_frame_errors); eth->rx_over_err = htonll(ops->stats.rx_over_errors); eth->rx_crc_err = htonll(ops->stats.rx_crc_errors); eth->collisions = htonll(ops->stats.collisions); uint64_t prop_type = OFPPROP_EXP(INTEL_VENDOR_ID, INTEL_PORT_STATS_RFC2819); stats_rfc2819 = ofpprop_put_zeros(reply, prop_type, sizeof *stats_rfc2819); memset(stats_rfc2819->pad, 0, sizeof stats_rfc2819->pad); stats_rfc2819->rx_1_to_64_packets = htonll(ops->stats.rx_1_to_64_packets); stats_rfc2819->rx_65_to_127_packets = htonll(ops->stats.rx_65_to_127_packets); stats_rfc2819->rx_128_to_255_packets = htonll(ops->stats.rx_128_to_255_packets); stats_rfc2819->rx_256_to_511_packets = htonll(ops->stats.rx_256_to_511_packets); stats_rfc2819->rx_512_to_1023_packets = htonll(ops->stats.rx_512_to_1023_packets); stats_rfc2819->rx_1024_to_1522_packets = htonll(ops->stats.rx_1024_to_1522_packets); stats_rfc2819->rx_1523_to_max_packets = htonll(ops->stats.rx_1523_to_max_packets); stats_rfc2819->tx_1_to_64_packets = htonll(ops->stats.tx_1_to_64_packets); stats_rfc2819->tx_65_to_127_packets = htonll(ops->stats.tx_65_to_127_packets); stats_rfc2819->tx_128_to_255_packets = htonll(ops->stats.tx_128_to_255_packets); stats_rfc2819->tx_256_to_511_packets = htonll(ops->stats.tx_256_to_511_packets); stats_rfc2819->tx_512_to_1023_packets = htonll(ops->stats.tx_512_to_1023_packets); stats_rfc2819->tx_1024_to_1522_packets = htonll(ops->stats.tx_1024_to_1522_packets); stats_rfc2819->tx_1523_to_max_packets = htonll(ops->stats.tx_1523_to_max_packets); stats_rfc2819->tx_multicast_packets = htonll(ops->stats.tx_multicast_packets); stats_rfc2819->rx_broadcast_packets = htonll(ops->stats.rx_broadcast_packets); stats_rfc2819->tx_broadcast_packets = htonll(ops->stats.tx_broadcast_packets); stats_rfc2819->rx_undersized_errors = htonll(ops->stats.rx_undersized_errors); stats_rfc2819->rx_oversize_errors = htonll(ops->stats.rx_oversize_errors); stats_rfc2819->rx_fragmented_errors = htonll(ops->stats.rx_fragmented_errors); stats_rfc2819->rx_jabber_errors = htonll(ops->stats.rx_jabber_errors); } /* Encode a ports stat for 'ops' and append it to 'replies'. */ void ofputil_append_port_stat(struct ovs_list *replies, const struct ofputil_port_stats *ops) { switch (ofpmp_version(replies)) { case OFP13_VERSION: { struct ofp13_port_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_port_stats_to_ofp13(ops, reply); break; } case OFP12_VERSION: case OFP11_VERSION: { struct ofp11_port_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_port_stats_to_ofp11(ops, reply); break; } case OFP10_VERSION: { struct ofp10_port_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_port_stats_to_ofp10(ops, reply); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: ofputil_append_ofp14_port_stats(ops, replies); break; default: OVS_NOT_REACHED(); } } static enum ofperr ofputil_port_stats_from_ofp10(struct ofputil_port_stats *ops, const struct ofp10_port_stats *ps10) { ops->port_no = u16_to_ofp(ntohs(ps10->port_no)); ops->stats.rx_packets = ntohll(get_32aligned_be64(&ps10->rx_packets)); ops->stats.tx_packets = ntohll(get_32aligned_be64(&ps10->tx_packets)); ops->stats.rx_bytes = ntohll(get_32aligned_be64(&ps10->rx_bytes)); ops->stats.tx_bytes = ntohll(get_32aligned_be64(&ps10->tx_bytes)); ops->stats.rx_dropped = ntohll(get_32aligned_be64(&ps10->rx_dropped)); ops->stats.tx_dropped = ntohll(get_32aligned_be64(&ps10->tx_dropped)); ops->stats.rx_errors = ntohll(get_32aligned_be64(&ps10->rx_errors)); ops->stats.tx_errors = ntohll(get_32aligned_be64(&ps10->tx_errors)); ops->stats.rx_frame_errors = ntohll(get_32aligned_be64(&ps10->rx_frame_err)); ops->stats.rx_over_errors = ntohll(get_32aligned_be64(&ps10->rx_over_err)); ops->stats.rx_crc_errors = ntohll(get_32aligned_be64(&ps10->rx_crc_err)); ops->stats.collisions = ntohll(get_32aligned_be64(&ps10->collisions)); ops->duration_sec = ops->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_port_stats_from_ofp11(struct ofputil_port_stats *ops, const struct ofp11_port_stats *ps11) { enum ofperr error; error = ofputil_port_from_ofp11(ps11->port_no, &ops->port_no); if (error) { return error; } ops->stats.rx_packets = ntohll(ps11->rx_packets); ops->stats.tx_packets = ntohll(ps11->tx_packets); ops->stats.rx_bytes = ntohll(ps11->rx_bytes); ops->stats.tx_bytes = ntohll(ps11->tx_bytes); ops->stats.rx_dropped = ntohll(ps11->rx_dropped); ops->stats.tx_dropped = ntohll(ps11->tx_dropped); ops->stats.rx_errors = ntohll(ps11->rx_errors); ops->stats.tx_errors = ntohll(ps11->tx_errors); ops->stats.rx_frame_errors = ntohll(ps11->rx_frame_err); ops->stats.rx_over_errors = ntohll(ps11->rx_over_err); ops->stats.rx_crc_errors = ntohll(ps11->rx_crc_err); ops->stats.collisions = ntohll(ps11->collisions); ops->duration_sec = ops->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_port_stats_from_ofp13(struct ofputil_port_stats *ops, const struct ofp13_port_stats *ps13) { enum ofperr error = ofputil_port_stats_from_ofp11(ops, &ps13->ps); if (!error) { ops->duration_sec = ntohl(ps13->duration_sec); ops->duration_nsec = ntohl(ps13->duration_nsec); } return error; } static enum ofperr parse_ofp14_port_stats_ethernet_property(const struct ofpbuf *payload, struct ofputil_port_stats *ops) { const struct ofp14_port_stats_prop_ethernet *eth = payload->data; if (payload->size != sizeof *eth) { return OFPERR_OFPBPC_BAD_LEN; } ops->stats.rx_frame_errors = ntohll(eth->rx_frame_err); ops->stats.rx_over_errors = ntohll(eth->rx_over_err); ops->stats.rx_crc_errors = ntohll(eth->rx_crc_err); ops->stats.collisions = ntohll(eth->collisions); return 0; } static enum ofperr parse_intel_port_stats_rfc2819_property(const struct ofpbuf *payload, struct ofputil_port_stats *ops) { const struct intel_port_stats_rfc2819 *rfc2819 = payload->data; if (payload->size != sizeof *rfc2819) { return OFPERR_OFPBPC_BAD_LEN; } ops->stats.rx_1_to_64_packets = ntohll(rfc2819->rx_1_to_64_packets); ops->stats.rx_65_to_127_packets = ntohll(rfc2819->rx_65_to_127_packets); ops->stats.rx_128_to_255_packets = ntohll(rfc2819->rx_128_to_255_packets); ops->stats.rx_256_to_511_packets = ntohll(rfc2819->rx_256_to_511_packets); ops->stats.rx_512_to_1023_packets = ntohll(rfc2819->rx_512_to_1023_packets); ops->stats.rx_1024_to_1522_packets = ntohll(rfc2819->rx_1024_to_1522_packets); ops->stats.rx_1523_to_max_packets = ntohll(rfc2819->rx_1523_to_max_packets); ops->stats.tx_1_to_64_packets = ntohll(rfc2819->tx_1_to_64_packets); ops->stats.tx_65_to_127_packets = ntohll(rfc2819->tx_65_to_127_packets); ops->stats.tx_128_to_255_packets = ntohll(rfc2819->tx_128_to_255_packets); ops->stats.tx_256_to_511_packets = ntohll(rfc2819->tx_256_to_511_packets); ops->stats.tx_512_to_1023_packets = ntohll(rfc2819->tx_512_to_1023_packets); ops->stats.tx_1024_to_1522_packets = ntohll(rfc2819->tx_1024_to_1522_packets); ops->stats.tx_1523_to_max_packets = ntohll(rfc2819->tx_1523_to_max_packets); ops->stats.tx_multicast_packets = ntohll(rfc2819->tx_multicast_packets); ops->stats.rx_broadcast_packets = ntohll(rfc2819->rx_broadcast_packets); ops->stats.tx_broadcast_packets = ntohll(rfc2819->tx_broadcast_packets); ops->stats.rx_undersized_errors = ntohll(rfc2819->rx_undersized_errors); ops->stats.rx_oversize_errors = ntohll(rfc2819->rx_oversize_errors); ops->stats.rx_fragmented_errors = ntohll(rfc2819->rx_fragmented_errors); ops->stats.rx_jabber_errors = ntohll(rfc2819->rx_jabber_errors); return 0; } static enum ofperr parse_intel_port_stats_property(const struct ofpbuf *payload, uint32_t exp_type, struct ofputil_port_stats *ops) { enum ofperr error; switch (exp_type) { case INTEL_PORT_STATS_RFC2819: error = parse_intel_port_stats_rfc2819_property(payload, ops); break; default: error = OFPERR_OFPBPC_BAD_EXP_TYPE; break; } return error; } static enum ofperr ofputil_pull_ofp14_port_stats(struct ofputil_port_stats *ops, struct ofpbuf *msg) { const struct ofp14_port_stats *ps14 = ofpbuf_try_pull(msg, sizeof *ps14); if (!ps14) { return OFPERR_OFPBRC_BAD_LEN; } size_t len = ntohs(ps14->length); if (len < sizeof *ps14 || len - sizeof *ps14 > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } len -= sizeof *ps14; enum ofperr error = ofputil_port_from_ofp11(ps14->port_no, &ops->port_no); if (error) { return error; } ops->duration_sec = ntohl(ps14->duration_sec); ops->duration_nsec = ntohl(ps14->duration_nsec); ops->stats.rx_packets = ntohll(ps14->rx_packets); ops->stats.tx_packets = ntohll(ps14->tx_packets); ops->stats.rx_bytes = ntohll(ps14->rx_bytes); ops->stats.tx_bytes = ntohll(ps14->tx_bytes); ops->stats.rx_dropped = ntohll(ps14->rx_dropped); ops->stats.tx_dropped = ntohll(ps14->tx_dropped); ops->stats.rx_errors = ntohll(ps14->rx_errors); ops->stats.tx_errors = ntohll(ps14->tx_errors); struct ofpbuf properties = ofpbuf_const_initializer(ofpbuf_pull(msg, len), len); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type = 0; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPPSPT14_ETHERNET: error = parse_ofp14_port_stats_ethernet_property(&payload, ops); break; case OFPPROP_EXP(INTEL_VENDOR_ID, INTEL_PORT_STATS_RFC2819): error = parse_intel_port_stats_property(&payload, INTEL_PORT_STATS_RFC2819, ops); break; default: error = OFPPROP_UNKNOWN(true, "port stats", type); break; } if (error) { return error; } } return 0; } /* Returns the number of port stats elements in OFPTYPE_PORT_STATS_REPLY * message 'oh'. */ size_t ofputil_count_port_stats(const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); for (size_t n = 0; ; n++) { struct ofputil_port_stats ps; if (ofputil_decode_port_stats(&ps, &b)) { return n; } } } /* Converts an OFPST_PORT_STATS reply in 'msg' into an abstract * ofputil_port_stats in 'ps'. * * Multiple OFPST_PORT_STATS replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. The caller must initially leave 'msg''s layer pointers * null and not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_port_stats(struct ofputil_port_stats *ps, struct ofpbuf *msg) { enum ofperr error; enum ofpraw raw; memset(&(ps->stats), 0xFF, sizeof (ps->stats)); error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST14_PORT_REPLY) { return ofputil_pull_ofp14_port_stats(ps, msg); } else if (raw == OFPRAW_OFPST13_PORT_REPLY) { const struct ofp13_port_stats *ps13; ps13 = ofpbuf_try_pull(msg, sizeof *ps13); if (!ps13) { goto bad_len; } return ofputil_port_stats_from_ofp13(ps, ps13); } else if (raw == OFPRAW_OFPST11_PORT_REPLY) { const struct ofp11_port_stats *ps11; ps11 = ofpbuf_try_pull(msg, sizeof *ps11); if (!ps11) { goto bad_len; } return ofputil_port_stats_from_ofp11(ps, ps11); } else if (raw == OFPRAW_OFPST10_PORT_REPLY) { const struct ofp10_port_stats *ps10; ps10 = ofpbuf_try_pull(msg, sizeof *ps10); if (!ps10) { goto bad_len; } return ofputil_port_stats_from_ofp10(ps, ps10); } else { OVS_NOT_REACHED(); } bad_len: VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_PORT reply has %"PRIu32" leftover " "bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } /* Parse a port status request message into a 16 bit OpenFlow 1.0 * port number and stores the latter in '*ofp10_port'. * Returns 0 if successful, otherwise an OFPERR_* number. */ enum ofperr ofputil_decode_port_stats_request(const struct ofp_header *request, ofp_port_t *ofp10_port) { switch ((enum ofp_version)request->version) { case OFP16_VERSION: case OFP15_VERSION: case OFP14_VERSION: case OFP13_VERSION: case OFP12_VERSION: case OFP11_VERSION: { const struct ofp11_port_stats_request *psr11 = ofpmsg_body(request); return ofputil_port_from_ofp11(psr11->port_no, ofp10_port); } case OFP10_VERSION: { const struct ofp10_port_stats_request *psr10 = ofpmsg_body(request); *ofp10_port = u16_to_ofp(ntohs(psr10->port_no)); return 0; } default: OVS_NOT_REACHED(); } } static void ofputil_ipfix_stats_to_reply(const struct ofputil_ipfix_stats *ois, struct nx_ipfix_stats_reply *reply) { reply->collector_set_id = htonl(ois->collector_set_id); reply->total_flows = htonll(ois->total_flows); reply->current_flows = htonll(ois->current_flows); reply->pkts = htonll(ois->pkts); reply->ipv4_pkts = htonll(ois->ipv4_pkts); reply->ipv6_pkts = htonll(ois->ipv6_pkts); reply->error_pkts = htonll(ois->error_pkts); reply->ipv4_error_pkts = htonll(ois->ipv4_error_pkts); reply->ipv6_error_pkts = htonll(ois->ipv6_error_pkts); reply->tx_pkts = htonll(ois->tx_pkts); reply->tx_errors = htonll(ois->tx_errors); memset(reply->pad, 0, sizeof reply->pad); } /* Encode a ipfix stat for 'ois' and append it to 'replies'. */ void ofputil_append_ipfix_stat(struct ovs_list *replies, const struct ofputil_ipfix_stats *ois) { struct nx_ipfix_stats_reply *reply = ofpmp_append(replies, sizeof *reply); ofputil_ipfix_stats_to_reply(ois, reply); } static enum ofperr ofputil_ipfix_stats_from_nx(struct ofputil_ipfix_stats *is, const struct nx_ipfix_stats_reply *reply) { is->collector_set_id = ntohl(reply->collector_set_id); is->total_flows = ntohll(reply->total_flows); is->current_flows = ntohll(reply->current_flows); is->pkts = ntohll(reply->pkts); is->ipv4_pkts = ntohll(reply->ipv4_pkts); is->ipv6_pkts = ntohll(reply->ipv6_pkts); is->error_pkts = ntohll(reply->error_pkts); is->ipv4_error_pkts = ntohll(reply->ipv4_error_pkts); is->ipv6_error_pkts = ntohll(reply->ipv6_error_pkts); is->tx_pkts = ntohll(reply->tx_pkts); is->tx_errors = ntohll(reply->tx_errors); return 0; } int ofputil_pull_ipfix_stats(struct ofputil_ipfix_stats *is, struct ofpbuf *msg) { enum ofperr error; enum ofpraw raw; memset(is, 0xFF, sizeof (*is)); error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } else if (raw == OFPRAW_NXST_IPFIX_BRIDGE_REPLY || raw == OFPRAW_NXST_IPFIX_FLOW_REPLY) { struct nx_ipfix_stats_reply *reply; reply = ofpbuf_try_pull(msg, sizeof *reply); return ofputil_ipfix_stats_from_nx(is, reply); } else { OVS_NOT_REACHED(); } } /* Returns the number of ipfix stats elements in * OFPTYPE_IPFIX_BRIDGE_STATS_REPLY or OFPTYPE_IPFIX_FLOW_STATS_REPLY * message 'oh'. */ size_t ofputil_count_ipfix_stats(const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); return b.size / sizeof(struct ofputil_ipfix_stats); } /* Frees all of the "struct ofputil_bucket"s in the 'buckets' list. */ void ofputil_bucket_list_destroy(struct ovs_list *buckets) { struct ofputil_bucket *bucket; LIST_FOR_EACH_POP (bucket, list_node, buckets) { free(bucket->ofpacts); free(bucket); } } /* Clones 'bucket' and its ofpacts data */ static struct ofputil_bucket * ofputil_bucket_clone_data(const struct ofputil_bucket *bucket) { struct ofputil_bucket *new; new = xmemdup(bucket, sizeof *bucket); new->ofpacts = xmemdup(bucket->ofpacts, bucket->ofpacts_len); return new; } /* Clones each of the buckets in the list 'src' appending them * in turn to 'dest' which should be an initialised list. * An exception is that if the pointer value of a bucket in 'src' * matches 'skip' then it is not cloned or appended to 'dest'. * This allows all of 'src' or 'all of 'src' except 'skip' to * be cloned and appended to 'dest'. */ void ofputil_bucket_clone_list(struct ovs_list *dest, const struct ovs_list *src, const struct ofputil_bucket *skip) { struct ofputil_bucket *bucket; LIST_FOR_EACH (bucket, list_node, src) { struct ofputil_bucket *new_bucket; if (bucket == skip) { continue; } new_bucket = ofputil_bucket_clone_data(bucket); ovs_list_push_back(dest, &new_bucket->list_node); } } /* Find a bucket in the list 'buckets' whose bucket id is 'bucket_id' * Returns the first bucket found or NULL if no buckets are found. */ struct ofputil_bucket * ofputil_bucket_find(const struct ovs_list *buckets, uint32_t bucket_id) { struct ofputil_bucket *bucket; if (bucket_id > OFPG15_BUCKET_MAX) { return NULL; } LIST_FOR_EACH (bucket, list_node, buckets) { if (bucket->bucket_id == bucket_id) { return bucket; } } return NULL; } /* Returns true if more than one bucket in the list 'buckets' * have the same bucket id. Returns false otherwise. */ bool ofputil_bucket_check_duplicate_id(const struct ovs_list *buckets) { struct ofputil_bucket *i, *j; LIST_FOR_EACH (i, list_node, buckets) { LIST_FOR_EACH_REVERSE (j, list_node, buckets) { if (i == j) { break; } if (i->bucket_id == j->bucket_id) { return true; } } } return false; } /* Returns the bucket at the front of the list 'buckets'. * Undefined if 'buckets is empty. */ struct ofputil_bucket * ofputil_bucket_list_front(const struct ovs_list *buckets) { static struct ofputil_bucket *bucket; ASSIGN_CONTAINER(bucket, ovs_list_front(buckets), list_node); return bucket; } /* Returns the bucket at the back of the list 'buckets'. * Undefined if 'buckets is empty. */ struct ofputil_bucket * ofputil_bucket_list_back(const struct ovs_list *buckets) { static struct ofputil_bucket *bucket; ASSIGN_CONTAINER(bucket, ovs_list_back(buckets), list_node); return bucket; } /* Returns an OpenFlow group stats request for OpenFlow version 'ofp_version', * that requests stats for group 'group_id'. (Use OFPG_ALL to request stats * for all groups.) * * Group statistics include packet and byte counts for each group. */ struct ofpbuf * ofputil_encode_group_stats_request(enum ofp_version ofp_version, uint32_t group_id) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: ovs_fatal(0, "dump-group-stats needs OpenFlow 1.1 or later " "(\'-O OpenFlow11\')"); case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_group_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST11_GROUP_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->group_id = htonl(group_id); break; } default: OVS_NOT_REACHED(); } return request; } void ofputil_uninit_group_desc(struct ofputil_group_desc *gd) { ofputil_bucket_list_destroy(&gd->buckets); ofputil_group_properties_destroy(&gd->props); } /* Decodes the OpenFlow group description request in 'oh', returning the group * whose description is requested, or OFPG_ALL if stats for all groups was * requested. */ uint32_t ofputil_decode_group_desc_request(const struct ofp_header *oh) { struct ofpbuf request = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&request); if (raw == OFPRAW_OFPST11_GROUP_DESC_REQUEST) { return OFPG_ALL; } else if (raw == OFPRAW_OFPST15_GROUP_DESC_REQUEST) { ovs_be32 *group_id = ofpbuf_pull(&request, sizeof *group_id); return ntohl(*group_id); } else { OVS_NOT_REACHED(); } } /* Returns an OpenFlow group description request for OpenFlow version * 'ofp_version', that requests stats for group 'group_id'. Use OFPG_ALL to * request stats for all groups (OpenFlow 1.4 and earlier always request all * groups). * * Group descriptions include the bucket and action configuration for each * group. */ struct ofpbuf * ofputil_encode_group_desc_request(enum ofp_version ofp_version, uint32_t group_id) { struct ofpbuf *request; switch (ofp_version) { case OFP10_VERSION: ovs_fatal(0, "dump-groups needs OpenFlow 1.1 or later " "(\'-O OpenFlow11\')"); case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: request = ofpraw_alloc(OFPRAW_OFPST11_GROUP_DESC_REQUEST, ofp_version, 0); break; case OFP15_VERSION: case OFP16_VERSION: { struct ofp15_group_desc_request *req; request = ofpraw_alloc(OFPRAW_OFPST15_GROUP_DESC_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->group_id = htonl(group_id); break; } default: OVS_NOT_REACHED(); } return request; } static void ofputil_group_bucket_counters_to_ofp11(const struct ofputil_group_stats *gs, struct ofp11_bucket_counter bucket_cnts[]) { int i; for (i = 0; i < gs->n_buckets; i++) { bucket_cnts[i].packet_count = htonll(gs->bucket_stats[i].packet_count); bucket_cnts[i].byte_count = htonll(gs->bucket_stats[i].byte_count); } } static void ofputil_group_stats_to_ofp11(const struct ofputil_group_stats *gs, struct ofp11_group_stats *gs11, size_t length, struct ofp11_bucket_counter bucket_cnts[]) { memset(gs11, 0, sizeof *gs11); gs11->length = htons(length); gs11->group_id = htonl(gs->group_id); gs11->ref_count = htonl(gs->ref_count); gs11->packet_count = htonll(gs->packet_count); gs11->byte_count = htonll(gs->byte_count); ofputil_group_bucket_counters_to_ofp11(gs, bucket_cnts); } static void ofputil_group_stats_to_ofp13(const struct ofputil_group_stats *gs, struct ofp13_group_stats *gs13, size_t length, struct ofp11_bucket_counter bucket_cnts[]) { ofputil_group_stats_to_ofp11(gs, &gs13->gs, length, bucket_cnts); gs13->duration_sec = htonl(gs->duration_sec); gs13->duration_nsec = htonl(gs->duration_nsec); } /* Encodes 'gs' properly for the format of the list of group statistics * replies already begun in 'replies' and appends it to the list. 'replies' * must have originally been initialized with ofpmp_init(). */ void ofputil_append_group_stats(struct ovs_list *replies, const struct ofputil_group_stats *gs) { size_t bucket_counter_size; struct ofp11_bucket_counter *bucket_counters; size_t length; bucket_counter_size = gs->n_buckets * sizeof(struct ofp11_bucket_counter); switch (ofpmp_version(replies)) { case OFP11_VERSION: case OFP12_VERSION:{ struct ofp11_group_stats *gs11; length = sizeof *gs11 + bucket_counter_size; gs11 = ofpmp_append(replies, length); bucket_counters = (struct ofp11_bucket_counter *)(gs11 + 1); ofputil_group_stats_to_ofp11(gs, gs11, length, bucket_counters); break; } case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp13_group_stats *gs13; length = sizeof *gs13 + bucket_counter_size; gs13 = ofpmp_append(replies, length); bucket_counters = (struct ofp11_bucket_counter *)(gs13 + 1); ofputil_group_stats_to_ofp13(gs, gs13, length, bucket_counters); break; } case OFP10_VERSION: default: OVS_NOT_REACHED(); } } /* Returns an OpenFlow group features request for OpenFlow version * 'ofp_version'. */ struct ofpbuf * ofputil_encode_group_features_request(enum ofp_version ofp_version) { struct ofpbuf *request = NULL; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: ovs_fatal(0, "dump-group-features needs OpenFlow 1.2 or later " "(\'-O OpenFlow12\')"); case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: request = ofpraw_alloc(OFPRAW_OFPST12_GROUP_FEATURES_REQUEST, ofp_version, 0); break; default: OVS_NOT_REACHED(); } return request; } /* Returns a OpenFlow message that encodes 'features' properly as a reply to * group features request 'request'. */ struct ofpbuf * ofputil_encode_group_features_reply( const struct ofputil_group_features *features, const struct ofp_header *request) { struct ofp12_group_features_stats *ogf; struct ofpbuf *reply; int i; reply = ofpraw_alloc_xid(OFPRAW_OFPST12_GROUP_FEATURES_REPLY, request->version, request->xid, 0); ogf = ofpbuf_put_zeros(reply, sizeof *ogf); ogf->types = htonl(features->types); ogf->capabilities = htonl(features->capabilities); for (i = 0; i < OFPGT12_N_TYPES; i++) { ogf->max_groups[i] = htonl(features->max_groups[i]); ogf->actions[i] = ofpact_bitmap_to_openflow(features->ofpacts[i], request->version); } return reply; } /* Decodes group features reply 'oh' into 'features'. */ void ofputil_decode_group_features_reply(const struct ofp_header *oh, struct ofputil_group_features *features) { const struct ofp12_group_features_stats *ogf = ofpmsg_body(oh); int i; features->types = ntohl(ogf->types); features->capabilities = ntohl(ogf->capabilities); for (i = 0; i < OFPGT12_N_TYPES; i++) { features->max_groups[i] = ntohl(ogf->max_groups[i]); features->ofpacts[i] = ofpact_bitmap_from_openflow( ogf->actions[i], oh->version); } } /* Parse a group status request message into a 32 bit OpenFlow 1.1 * group ID and stores the latter in '*group_id'. * Returns 0 if successful, otherwise an OFPERR_* number. */ enum ofperr ofputil_decode_group_stats_request(const struct ofp_header *request, uint32_t *group_id) { const struct ofp11_group_stats_request *gsr11 = ofpmsg_body(request); *group_id = ntohl(gsr11->group_id); return 0; } /* Converts a group stats reply in 'msg' into an abstract ofputil_group_stats * in 'gs'. Assigns freshly allocated memory to gs->bucket_stats for the * caller to eventually free. * * Multiple group stats replies can be packed into a single OpenFlow message. * Calling this function multiple times for a single 'msg' iterates through the * replies. The caller must initially leave 'msg''s layer pointers null and * not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_group_stats_reply(struct ofpbuf *msg, struct ofputil_group_stats *gs) { struct ofp11_bucket_counter *obc; struct ofp11_group_stats *ogs11; enum ofpraw raw; enum ofperr error; size_t base_len; size_t length; size_t i; gs->bucket_stats = NULL; error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } if (raw == OFPRAW_OFPST11_GROUP_REPLY) { base_len = sizeof *ogs11; ogs11 = ofpbuf_try_pull(msg, sizeof *ogs11); gs->duration_sec = gs->duration_nsec = UINT32_MAX; } else if (raw == OFPRAW_OFPST13_GROUP_REPLY) { struct ofp13_group_stats *ogs13; base_len = sizeof *ogs13; ogs13 = ofpbuf_try_pull(msg, sizeof *ogs13); if (ogs13) { ogs11 = &ogs13->gs; gs->duration_sec = ntohl(ogs13->duration_sec); gs->duration_nsec = ntohl(ogs13->duration_nsec); } else { ogs11 = NULL; } } else { OVS_NOT_REACHED(); } if (!ogs11) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s reply has %"PRIu32" leftover bytes at end", ofpraw_get_name(raw), msg->size); return OFPERR_OFPBRC_BAD_LEN; } length = ntohs(ogs11->length); if (length < sizeof base_len) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s reply claims invalid length %"PRIuSIZE, ofpraw_get_name(raw), length); return OFPERR_OFPBRC_BAD_LEN; } gs->group_id = ntohl(ogs11->group_id); gs->ref_count = ntohl(ogs11->ref_count); gs->packet_count = ntohll(ogs11->packet_count); gs->byte_count = ntohll(ogs11->byte_count); gs->n_buckets = (length - base_len) / sizeof *obc; obc = ofpbuf_try_pull(msg, gs->n_buckets * sizeof *obc); if (!obc) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s reply has %"PRIu32" leftover bytes at end", ofpraw_get_name(raw), msg->size); return OFPERR_OFPBRC_BAD_LEN; } gs->bucket_stats = xmalloc(gs->n_buckets * sizeof *gs->bucket_stats); for (i = 0; i < gs->n_buckets; i++) { gs->bucket_stats[i].packet_count = ntohll(obc[i].packet_count); gs->bucket_stats[i].byte_count = ntohll(obc[i].byte_count); } return 0; } static void ofputil_put_ofp11_bucket(const struct ofputil_bucket *bucket, struct ofpbuf *openflow, enum ofp_version ofp_version) { struct ofp11_bucket *ob; size_t start; start = openflow->size; ofpbuf_put_zeros(openflow, sizeof *ob); ofpacts_put_openflow_actions(bucket->ofpacts, bucket->ofpacts_len, openflow, ofp_version); ob = ofpbuf_at_assert(openflow, start, sizeof *ob); ob->len = htons(openflow->size - start); ob->weight = htons(bucket->weight); ob->watch_port = ofputil_port_to_ofp11(bucket->watch_port); ob->watch_group = htonl(bucket->watch_group); } static void ofputil_put_ofp15_bucket(const struct ofputil_bucket *bucket, uint32_t bucket_id, enum ofp11_group_type group_type, struct ofpbuf *openflow, enum ofp_version ofp_version) { struct ofp15_bucket *ob; size_t start, actions_start, actions_len; start = openflow->size; ofpbuf_put_zeros(openflow, sizeof *ob); actions_start = openflow->size; ofpacts_put_openflow_actions(bucket->ofpacts, bucket->ofpacts_len, openflow, ofp_version); actions_len = openflow->size - actions_start; if (group_type == OFPGT11_SELECT) { ofpprop_put_u16(openflow, OFPGBPT15_WEIGHT, bucket->weight); } if (bucket->watch_port != OFPP_ANY) { ofpprop_put_be32(openflow, OFPGBPT15_WATCH_PORT, ofputil_port_to_ofp11(bucket->watch_port)); } if (bucket->watch_group != OFPG_ANY) { ofpprop_put_u32(openflow, OFPGBPT15_WATCH_GROUP, bucket->watch_group); } ob = ofpbuf_at_assert(openflow, start, sizeof *ob); ob->len = htons(openflow->size - start); ob->action_array_len = htons(actions_len); ob->bucket_id = htonl(bucket_id); } static void ofputil_put_group_prop_ntr_selection_method(enum ofp_version ofp_version, const struct ofputil_group_props *gp, struct ofpbuf *openflow) { struct ntr_group_prop_selection_method *prop; size_t start; start = openflow->size; ofpbuf_put_zeros(openflow, sizeof *prop); oxm_put_field_array(openflow, &gp->fields, ofp_version); prop = ofpbuf_at_assert(openflow, start, sizeof *prop); prop->type = htons(OFPGPT15_EXPERIMENTER); prop->experimenter = htonl(NTR_VENDOR_ID); prop->exp_type = htonl(NTRT_SELECTION_METHOD); strcpy(prop->selection_method, gp->selection_method); prop->selection_method_param = htonll(gp->selection_method_param); ofpprop_end(openflow, start); } static void ofputil_append_ofp11_group_desc_reply(const struct ofputil_group_desc *gds, const struct ovs_list *buckets, struct ovs_list *replies, enum ofp_version version) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); struct ofp11_group_desc_stats *ogds; struct ofputil_bucket *bucket; size_t start_ogds; start_ogds = reply->size; ofpbuf_put_zeros(reply, sizeof *ogds); LIST_FOR_EACH (bucket, list_node, buckets) { ofputil_put_ofp11_bucket(bucket, reply, version); } ogds = ofpbuf_at_assert(reply, start_ogds, sizeof *ogds); ogds->length = htons(reply->size - start_ogds); ogds->type = gds->type; ogds->group_id = htonl(gds->group_id); ofpmp_postappend(replies, start_ogds); } static void ofputil_append_ofp15_group_desc_reply(const struct ofputil_group_desc *gds, const struct ovs_list *buckets, struct ovs_list *replies, enum ofp_version version) { struct ofpbuf *reply = ofpbuf_from_list(ovs_list_back(replies)); struct ofp15_group_desc_stats *ogds; struct ofputil_bucket *bucket; size_t start_ogds, start_buckets; start_ogds = reply->size; ofpbuf_put_zeros(reply, sizeof *ogds); start_buckets = reply->size; LIST_FOR_EACH (bucket, list_node, buckets) { ofputil_put_ofp15_bucket(bucket, bucket->bucket_id, gds->type, reply, version); } ogds = ofpbuf_at_assert(reply, start_ogds, sizeof *ogds); ogds->type = gds->type; ogds->group_id = htonl(gds->group_id); ogds->bucket_list_len = htons(reply->size - start_buckets); /* Add group properties */ if (gds->props.selection_method[0]) { ofputil_put_group_prop_ntr_selection_method(version, &gds->props, reply); } ogds = ofpbuf_at_assert(reply, start_ogds, sizeof *ogds); ogds->length = htons(reply->size - start_ogds); ofpmp_postappend(replies, start_ogds); } /* Appends a group stats reply that contains the data in 'gds' to those already * present in the list of ofpbufs in 'replies'. 'replies' should have been * initialized with ofpmp_init(). */ void ofputil_append_group_desc_reply(const struct ofputil_group_desc *gds, const struct ovs_list *buckets, struct ovs_list *replies) { enum ofp_version version = ofpmp_version(replies); switch (version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: ofputil_append_ofp11_group_desc_reply(gds, buckets, replies, version); break; case OFP15_VERSION: case OFP16_VERSION: ofputil_append_ofp15_group_desc_reply(gds, buckets, replies, version); break; case OFP10_VERSION: default: OVS_NOT_REACHED(); } } static enum ofperr ofputil_pull_ofp11_buckets(struct ofpbuf *msg, size_t buckets_length, enum ofp_version version, struct ovs_list *buckets) { struct ofp11_bucket *ob; uint32_t bucket_id = 0; ovs_list_init(buckets); while (buckets_length > 0) { struct ofputil_bucket *bucket; struct ofpbuf ofpacts; enum ofperr error; size_t ob_len; ob = (buckets_length >= sizeof *ob ? ofpbuf_try_pull(msg, sizeof *ob) : NULL); if (!ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "buckets end with %"PRIuSIZE" leftover bytes", buckets_length); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } ob_len = ntohs(ob->len); if (ob_len < sizeof *ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" is not valid", ob_len); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } else if (ob_len > buckets_length) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" exceeds remaining buckets data size %"PRIuSIZE, ob_len, buckets_length); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } buckets_length -= ob_len; ofpbuf_init(&ofpacts, 0); error = ofpacts_pull_openflow_actions(msg, ob_len - sizeof *ob, version, NULL, NULL, &ofpacts); if (error) { ofpbuf_uninit(&ofpacts); ofputil_bucket_list_destroy(buckets); return error; } bucket = xzalloc(sizeof *bucket); bucket->weight = ntohs(ob->weight); error = ofputil_port_from_ofp11(ob->watch_port, &bucket->watch_port); if (error) { ofpbuf_uninit(&ofpacts); ofputil_bucket_list_destroy(buckets); free(bucket); return OFPERR_OFPGMFC_BAD_WATCH; } bucket->watch_group = ntohl(ob->watch_group); bucket->bucket_id = bucket_id++; bucket->ofpacts = ofpbuf_steal_data(&ofpacts); bucket->ofpacts_len = ofpacts.size; ovs_list_push_back(buckets, &bucket->list_node); } return 0; } static enum ofperr ofputil_pull_ofp15_buckets(struct ofpbuf *msg, size_t buckets_length, enum ofp_version version, uint8_t group_type, struct ovs_list *buckets) { struct ofp15_bucket *ob; ovs_list_init(buckets); while (buckets_length > 0) { struct ofputil_bucket *bucket = NULL; struct ofpbuf ofpacts; enum ofperr err = OFPERR_OFPGMFC_BAD_BUCKET; size_t ob_len, actions_len, properties_len; ovs_be32 watch_port = ofputil_port_to_ofp11(OFPP_ANY); ovs_be32 watch_group = htonl(OFPG_ANY); ovs_be16 weight = htons(group_type == OFPGT11_SELECT ? 1 : 0); ofpbuf_init(&ofpacts, 0); ob = ofpbuf_try_pull(msg, sizeof *ob); if (!ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "buckets end with %"PRIuSIZE " leftover bytes", buckets_length); goto err; } ob_len = ntohs(ob->len); actions_len = ntohs(ob->action_array_len); if (ob_len < sizeof *ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" is not valid", ob_len); goto err; } else if (ob_len > buckets_length) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket length " "%"PRIuSIZE" exceeds remaining buckets data size %" PRIuSIZE, ob_len, buckets_length); goto err; } else if (actions_len > ob_len - sizeof *ob) { VLOG_WARN_RL(&bad_ofmsg_rl, "OpenFlow message bucket actions " "length %"PRIuSIZE" exceeds remaining bucket " "data size %"PRIuSIZE, actions_len, ob_len - sizeof *ob); goto err; } buckets_length -= ob_len; err = ofpacts_pull_openflow_actions(msg, actions_len, version, NULL, NULL, &ofpacts); if (err) { goto err; } properties_len = ob_len - sizeof *ob - actions_len; struct ofpbuf properties = ofpbuf_const_initializer( ofpbuf_pull(msg, properties_len), properties_len); while (properties.size > 0) { struct ofpbuf payload; uint64_t type; err = ofpprop_pull(&properties, &payload, &type); if (err) { goto err; } switch (type) { case OFPGBPT15_WEIGHT: err = ofpprop_parse_be16(&payload, &weight); break; case OFPGBPT15_WATCH_PORT: err = ofpprop_parse_be32(&payload, &watch_port); break; case OFPGBPT15_WATCH_GROUP: err = ofpprop_parse_be32(&payload, &watch_group); break; default: err = OFPPROP_UNKNOWN(false, "group bucket", type); break; } if (err) { goto err; } } bucket = xzalloc(sizeof *bucket); bucket->weight = ntohs(weight); err = ofputil_port_from_ofp11(watch_port, &bucket->watch_port); if (err) { err = OFPERR_OFPGMFC_BAD_WATCH; goto err; } bucket->watch_group = ntohl(watch_group); bucket->bucket_id = ntohl(ob->bucket_id); if (bucket->bucket_id > OFPG15_BUCKET_MAX) { VLOG_WARN_RL(&bad_ofmsg_rl, "bucket id (%u) is out of range", bucket->bucket_id); err = OFPERR_OFPGMFC_BAD_BUCKET; goto err; } bucket->ofpacts = ofpbuf_steal_data(&ofpacts); bucket->ofpacts_len = ofpacts.size; ovs_list_push_back(buckets, &bucket->list_node); continue; err: free(bucket); ofpbuf_uninit(&ofpacts); ofputil_bucket_list_destroy(buckets); return err; } if (ofputil_bucket_check_duplicate_id(buckets)) { VLOG_WARN_RL(&bad_ofmsg_rl, "Duplicate bucket id"); ofputil_bucket_list_destroy(buckets); return OFPERR_OFPGMFC_BAD_BUCKET; } return 0; } static void ofputil_init_group_properties(struct ofputil_group_props *gp) { memset(gp, 0, sizeof *gp); } void ofputil_group_properties_copy(struct ofputil_group_props *to, const struct ofputil_group_props *from) { *to = *from; to->fields.values = xmemdup(from->fields.values, from->fields.values_size); } void ofputil_group_properties_destroy(struct ofputil_group_props *gp) { free(gp->fields.values); } static enum ofperr parse_group_prop_ntr_selection_method(struct ofpbuf *payload, enum ofp11_group_type group_type, enum ofp15_group_mod_command group_cmd, struct ofputil_group_props *gp) { struct ntr_group_prop_selection_method *prop = payload->data; size_t fields_len, method_len; enum ofperr error; switch (group_type) { case OFPGT11_SELECT: break; case OFPGT11_ALL: case OFPGT11_INDIRECT: case OFPGT11_FF: OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method property is " "only allowed for select groups"); return OFPERR_OFPBPC_BAD_VALUE; default: return OFPERR_OFPGMFC_BAD_TYPE; } switch (group_cmd) { case OFPGC15_ADD: case OFPGC15_MODIFY: case OFPGC15_ADD_OR_MOD: break; case OFPGC15_DELETE: case OFPGC15_INSERT_BUCKET: case OFPGC15_REMOVE_BUCKET: OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method property is " "only allowed for add and delete group modifications"); return OFPERR_OFPBPC_BAD_VALUE; default: return OFPERR_OFPGMFC_BAD_COMMAND; } if (payload->size < sizeof *prop) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method property " "length %u is not valid", payload->size); return OFPERR_OFPBPC_BAD_LEN; } method_len = strnlen(prop->selection_method, NTR_MAX_SELECTION_METHOD_LEN); if (method_len == NTR_MAX_SELECTION_METHOD_LEN) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method is not null terminated"); return OFPERR_OFPBPC_BAD_VALUE; } if (strcmp("hash", prop->selection_method) && strcmp("dp_hash", prop->selection_method)) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method '%s' is not supported", prop->selection_method); return OFPERR_OFPBPC_BAD_VALUE; } /* 'method_len' is now non-zero. */ strcpy(gp->selection_method, prop->selection_method); gp->selection_method_param = ntohll(prop->selection_method_param); ofpbuf_pull(payload, sizeof *prop); fields_len = ntohs(prop->length) - sizeof *prop; if (fields_len && strcmp("hash", gp->selection_method)) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method %s " "does not support fields", gp->selection_method); return OFPERR_OFPBPC_BAD_VALUE; } error = oxm_pull_field_array(payload->data, fields_len, &gp->fields); if (error) { OFPPROP_LOG(&bad_ofmsg_rl, false, "ntr selection method fields are invalid"); return error; } return 0; } static enum ofperr parse_ofp15_group_properties(struct ofpbuf *msg, enum ofp11_group_type group_type, enum ofp15_group_mod_command group_cmd, struct ofputil_group_props *gp, size_t properties_len) { struct ofpbuf properties = ofpbuf_const_initializer( ofpbuf_pull(msg, properties_len), properties_len); while (properties.size > 0) { struct ofpbuf payload; enum ofperr error; uint64_t type; error = ofpprop_pull(&properties, &payload, &type); if (error) { return error; } switch (type) { case OFPPROP_EXP(NTR_VENDOR_ID, NTRT_SELECTION_METHOD): case OFPPROP_EXP(NTR_COMPAT_VENDOR_ID, NTRT_SELECTION_METHOD): error = parse_group_prop_ntr_selection_method(&payload, group_type, group_cmd, gp); break; default: error = OFPPROP_UNKNOWN(false, "group", type); break; } if (error) { return error; } } return 0; } static int ofputil_decode_ofp11_group_desc_reply(struct ofputil_group_desc *gd, struct ofpbuf *msg, enum ofp_version version) { struct ofp11_group_desc_stats *ogds; size_t length; if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } ogds = ofpbuf_try_pull(msg, sizeof *ogds); if (!ogds) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } gd->type = ogds->type; gd->group_id = ntohl(ogds->group_id); length = ntohs(ogds->length); if (length < sizeof *ogds || length - sizeof *ogds > msg->size) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply claims invalid " "length %"PRIuSIZE, length); return OFPERR_OFPBRC_BAD_LEN; } return ofputil_pull_ofp11_buckets(msg, length - sizeof *ogds, version, &gd->buckets); } static int ofputil_decode_ofp15_group_desc_reply(struct ofputil_group_desc *gd, struct ofpbuf *msg, enum ofp_version version) { struct ofp15_group_desc_stats *ogds; uint16_t length, bucket_list_len; int error; if (!msg->header) { ofpraw_pull_assert(msg); } if (!msg->size) { return EOF; } ogds = ofpbuf_try_pull(msg, sizeof *ogds); if (!ogds) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply has %"PRIu32" " "leftover bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } gd->type = ogds->type; gd->group_id = ntohl(ogds->group_id); length = ntohs(ogds->length); if (length < sizeof *ogds || length - sizeof *ogds > msg->size) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply claims invalid " "length %u", length); return OFPERR_OFPBRC_BAD_LEN; } bucket_list_len = ntohs(ogds->bucket_list_len); if (length < bucket_list_len + sizeof *ogds) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST11_GROUP_DESC reply claims invalid " "bucket list length %u", bucket_list_len); return OFPERR_OFPBRC_BAD_LEN; } error = ofputil_pull_ofp15_buckets(msg, bucket_list_len, version, gd->type, &gd->buckets); if (error) { return error; } /* By definition group desc messages don't have a group mod command. * However, parse_group_prop_ntr_selection_method() checks to make sure * that the command is OFPGC15_ADD or OFPGC15_DELETE to guard * against group mod messages with other commands supplying * a NTR selection method group experimenter property. * Such properties are valid for group desc replies so * claim that the group mod command is OFPGC15_ADD to * satisfy the check in parse_group_prop_ntr_selection_method() */ error = parse_ofp15_group_properties( msg, gd->type, OFPGC15_ADD, &gd->props, length - sizeof *ogds - bucket_list_len); if (error) { ofputil_bucket_list_destroy(&gd->buckets); } return error; } /* Converts a group description reply in 'msg' into an abstract * ofputil_group_desc in 'gd'. * * Multiple group description replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. The caller must initially leave 'msg''s layer pointers * null and not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_group_desc_reply(struct ofputil_group_desc *gd, struct ofpbuf *msg, enum ofp_version version) { ofputil_init_group_properties(&gd->props); switch (version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: return ofputil_decode_ofp11_group_desc_reply(gd, msg, version); case OFP15_VERSION: case OFP16_VERSION: return ofputil_decode_ofp15_group_desc_reply(gd, msg, version); case OFP10_VERSION: default: OVS_NOT_REACHED(); } } void ofputil_uninit_group_mod(struct ofputil_group_mod *gm) { ofputil_bucket_list_destroy(&gm->buckets); ofputil_group_properties_destroy(&gm->props); } static struct ofpbuf * ofputil_encode_ofp11_group_mod(enum ofp_version ofp_version, const struct ofputil_group_mod *gm) { struct ofpbuf *b; struct ofp11_group_mod *ogm; size_t start_ogm; struct ofputil_bucket *bucket; b = ofpraw_alloc(OFPRAW_OFPT11_GROUP_MOD, ofp_version, 0); start_ogm = b->size; ofpbuf_put_zeros(b, sizeof *ogm); LIST_FOR_EACH (bucket, list_node, &gm->buckets) { ofputil_put_ofp11_bucket(bucket, b, ofp_version); } ogm = ofpbuf_at_assert(b, start_ogm, sizeof *ogm); ogm->command = htons(gm->command); ogm->type = gm->type; ogm->group_id = htonl(gm->group_id); return b; } static struct ofpbuf * ofputil_encode_ofp15_group_mod(enum ofp_version ofp_version, const struct ofputil_group_mod *gm) { struct ofpbuf *b; struct ofp15_group_mod *ogm; size_t start_ogm; struct ofputil_bucket *bucket; struct id_pool *bucket_ids = NULL; b = ofpraw_alloc(OFPRAW_OFPT15_GROUP_MOD, ofp_version, 0); start_ogm = b->size; ofpbuf_put_zeros(b, sizeof *ogm); LIST_FOR_EACH (bucket, list_node, &gm->buckets) { uint32_t bucket_id; /* Generate a bucket id if none was supplied */ if (bucket->bucket_id > OFPG15_BUCKET_MAX) { if (!bucket_ids) { const struct ofputil_bucket *bkt; bucket_ids = id_pool_create(0, OFPG15_BUCKET_MAX + 1); /* Mark all bucket_ids that are present in gm * as used in the pool. */ LIST_FOR_EACH_REVERSE (bkt, list_node, &gm->buckets) { if (bkt == bucket) { break; } if (bkt->bucket_id <= OFPG15_BUCKET_MAX) { id_pool_add(bucket_ids, bkt->bucket_id); } } } if (!id_pool_alloc_id(bucket_ids, &bucket_id)) { OVS_NOT_REACHED(); } } else { bucket_id = bucket->bucket_id; } ofputil_put_ofp15_bucket(bucket, bucket_id, gm->type, b, ofp_version); } ogm = ofpbuf_at_assert(b, start_ogm, sizeof *ogm); ogm->command = htons(gm->command); ogm->type = gm->type; ogm->group_id = htonl(gm->group_id); ogm->command_bucket_id = htonl(gm->command_bucket_id); ogm->bucket_array_len = htons(b->size - start_ogm - sizeof *ogm); /* Add group properties */ if (gm->props.selection_method[0]) { ofputil_put_group_prop_ntr_selection_method(ofp_version, &gm->props, b); } id_pool_destroy(bucket_ids); return b; } static void bad_group_cmd(enum ofp15_group_mod_command cmd) { const char *opt_version; const char *version; const char *cmd_str; switch (cmd) { case OFPGC15_ADD: case OFPGC15_MODIFY: case OFPGC15_ADD_OR_MOD: case OFPGC15_DELETE: version = "1.1"; opt_version = "11"; break; case OFPGC15_INSERT_BUCKET: case OFPGC15_REMOVE_BUCKET: version = "1.5"; opt_version = "15"; break; default: OVS_NOT_REACHED(); } switch (cmd) { case OFPGC15_ADD: cmd_str = "add-group"; break; case OFPGC15_MODIFY: case OFPGC15_ADD_OR_MOD: cmd_str = "mod-group"; break; case OFPGC15_DELETE: cmd_str = "del-group"; break; case OFPGC15_INSERT_BUCKET: cmd_str = "insert-bucket"; break; case OFPGC15_REMOVE_BUCKET: cmd_str = "remove-bucket"; break; default: OVS_NOT_REACHED(); } ovs_fatal(0, "%s needs OpenFlow %s or later (\'-O OpenFlow%s\')", cmd_str, version, opt_version); } /* Converts abstract group mod 'gm' into a message for OpenFlow version * 'ofp_version' and returns the message. */ struct ofpbuf * ofputil_encode_group_mod(enum ofp_version ofp_version, const struct ofputil_group_mod *gm) { switch (ofp_version) { case OFP10_VERSION: bad_group_cmd(gm->command); case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: if (gm->command > OFPGC11_DELETE && gm->command != OFPGC11_ADD_OR_MOD) { bad_group_cmd(gm->command); } return ofputil_encode_ofp11_group_mod(ofp_version, gm); case OFP15_VERSION: case OFP16_VERSION: return ofputil_encode_ofp15_group_mod(ofp_version, gm); default: OVS_NOT_REACHED(); } } static enum ofperr ofputil_pull_ofp11_group_mod(struct ofpbuf *msg, enum ofp_version ofp_version, struct ofputil_group_mod *gm) { const struct ofp11_group_mod *ogm; enum ofperr error; ogm = ofpbuf_pull(msg, sizeof *ogm); gm->command = ntohs(ogm->command); gm->type = ogm->type; gm->group_id = ntohl(ogm->group_id); gm->command_bucket_id = OFPG15_BUCKET_ALL; error = ofputil_pull_ofp11_buckets(msg, msg->size, ofp_version, &gm->buckets); /* OF1.3.5+ prescribes an error when an OFPGC_DELETE includes buckets. */ if (!error && ofp_version >= OFP13_VERSION && gm->command == OFPGC11_DELETE && !ovs_list_is_empty(&gm->buckets)) { error = OFPERR_OFPGMFC_INVALID_GROUP; ofputil_bucket_list_destroy(&gm->buckets); } return error; } static enum ofperr ofputil_pull_ofp15_group_mod(struct ofpbuf *msg, enum ofp_version ofp_version, struct ofputil_group_mod *gm) { const struct ofp15_group_mod *ogm; uint16_t bucket_list_len; enum ofperr error = OFPERR_OFPGMFC_BAD_BUCKET; ogm = ofpbuf_pull(msg, sizeof *ogm); gm->command = ntohs(ogm->command); gm->type = ogm->type; gm->group_id = ntohl(ogm->group_id); gm->command_bucket_id = ntohl(ogm->command_bucket_id); switch (gm->command) { case OFPGC15_REMOVE_BUCKET: if (gm->command_bucket_id == OFPG15_BUCKET_ALL) { error = 0; } /* Fall through */ case OFPGC15_INSERT_BUCKET: if (gm->command_bucket_id <= OFPG15_BUCKET_MAX || gm->command_bucket_id == OFPG15_BUCKET_FIRST || gm->command_bucket_id == OFPG15_BUCKET_LAST) { error = 0; } break; case OFPGC11_ADD: case OFPGC11_MODIFY: case OFPGC11_ADD_OR_MOD: case OFPGC11_DELETE: default: if (gm->command_bucket_id == OFPG15_BUCKET_ALL) { error = 0; } break; } if (error) { VLOG_WARN_RL(&bad_ofmsg_rl, "group command bucket id (%u) is out of range", gm->command_bucket_id); return OFPERR_OFPGMFC_BAD_BUCKET; } bucket_list_len = ntohs(ogm->bucket_array_len); if (bucket_list_len > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } error = ofputil_pull_ofp15_buckets(msg, bucket_list_len, ofp_version, gm->type, &gm->buckets); if (error) { return error; } error = parse_ofp15_group_properties(msg, gm->type, gm->command, &gm->props, msg->size); if (error) { ofputil_bucket_list_destroy(&gm->buckets); } return error; } static enum ofperr ofputil_check_group_mod(const struct ofputil_group_mod *gm) { switch (gm->type) { case OFPGT11_INDIRECT: if (gm->command != OFPGC11_DELETE && !ovs_list_is_singleton(&gm->buckets) ) { return OFPERR_OFPGMFC_INVALID_GROUP; } break; case OFPGT11_ALL: case OFPGT11_SELECT: case OFPGT11_FF: break; default: return OFPERR_OFPGMFC_BAD_TYPE; } switch (gm->command) { case OFPGC11_ADD: case OFPGC11_MODIFY: case OFPGC11_ADD_OR_MOD: case OFPGC11_DELETE: case OFPGC15_INSERT_BUCKET: break; case OFPGC15_REMOVE_BUCKET: if (!ovs_list_is_empty(&gm->buckets)) { return OFPERR_OFPGMFC_BAD_BUCKET; } break; default: return OFPERR_OFPGMFC_BAD_COMMAND; } struct ofputil_bucket *bucket; LIST_FOR_EACH (bucket, list_node, &gm->buckets) { if (bucket->weight && gm->type != OFPGT11_SELECT) { return OFPERR_OFPGMFC_INVALID_GROUP; } switch (gm->type) { case OFPGT11_ALL: case OFPGT11_INDIRECT: if (ofputil_bucket_has_liveness(bucket)) { return OFPERR_OFPGMFC_WATCH_UNSUPPORTED; } break; case OFPGT11_SELECT: break; case OFPGT11_FF: if (!ofputil_bucket_has_liveness(bucket)) { return OFPERR_OFPGMFC_INVALID_GROUP; } break; default: /* Returning BAD TYPE to be consistent * though gm->type has been checked already. */ return OFPERR_OFPGMFC_BAD_TYPE; } } return 0; } /* Converts OpenFlow group mod message 'oh' into an abstract group mod in * 'gm'. Returns 0 if successful, otherwise an OpenFlow error code. */ enum ofperr ofputil_decode_group_mod(const struct ofp_header *oh, struct ofputil_group_mod *gm) { ofputil_init_group_properties(&gm->props); enum ofp_version ofp_version = oh->version; struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&msg); enum ofperr err; switch (ofp_version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: err = ofputil_pull_ofp11_group_mod(&msg, ofp_version, gm); break; case OFP15_VERSION: case OFP16_VERSION: err = ofputil_pull_ofp15_group_mod(&msg, ofp_version, gm); break; case OFP10_VERSION: default: OVS_NOT_REACHED(); } if (err) { return err; } err = ofputil_check_group_mod(gm); if (err) { ofputil_uninit_group_mod(gm); } return err; } /* Destroys 'bms'. */ void ofputil_free_bundle_msgs(struct ofputil_bundle_msg *bms, size_t n_bms) { for (size_t i = 0; i < n_bms; i++) { switch ((int)bms[i].type) { case OFPTYPE_FLOW_MOD: free(CONST_CAST(struct ofpact *, bms[i].fm.ofpacts)); break; case OFPTYPE_GROUP_MOD: ofputil_uninit_group_mod(&bms[i].gm); break; case OFPTYPE_PACKET_OUT: free(bms[i].po.ofpacts); free(CONST_CAST(void *, bms[i].po.packet)); break; default: break; } } free(bms); } void ofputil_encode_bundle_msgs(const struct ofputil_bundle_msg *bms, size_t n_bms, struct ovs_list *requests, enum ofputil_protocol protocol) { enum ofp_version version = ofputil_protocol_to_ofp_version(protocol); for (size_t i = 0; i < n_bms; i++) { struct ofpbuf *request = NULL; switch ((int)bms[i].type) { case OFPTYPE_FLOW_MOD: request = ofputil_encode_flow_mod(&bms[i].fm, protocol); break; case OFPTYPE_GROUP_MOD: request = ofputil_encode_group_mod(version, &bms[i].gm); break; case OFPTYPE_PACKET_OUT: request = ofputil_encode_packet_out(&bms[i].po, protocol); break; default: break; } if (request) { ovs_list_push_back(requests, &request->list_node); } } } /* Parse a queue status request message into 'oqsr'. * Returns 0 if successful, otherwise an OFPERR_* number. */ enum ofperr ofputil_decode_queue_stats_request(const struct ofp_header *request, struct ofputil_queue_stats_request *oqsr) { switch ((enum ofp_version)request->version) { case OFP16_VERSION: case OFP15_VERSION: case OFP14_VERSION: case OFP13_VERSION: case OFP12_VERSION: case OFP11_VERSION: { const struct ofp11_queue_stats_request *qsr11 = ofpmsg_body(request); oqsr->queue_id = ntohl(qsr11->queue_id); return ofputil_port_from_ofp11(qsr11->port_no, &oqsr->port_no); } case OFP10_VERSION: { const struct ofp10_queue_stats_request *qsr10 = ofpmsg_body(request); oqsr->queue_id = ntohl(qsr10->queue_id); oqsr->port_no = u16_to_ofp(ntohs(qsr10->port_no)); /* OF 1.0 uses OFPP_ALL for OFPP_ANY */ if (oqsr->port_no == OFPP_ALL) { oqsr->port_no = OFPP_ANY; } return 0; } default: OVS_NOT_REACHED(); } } /* Encode a queue stats request for 'oqsr', the encoded message * will be for OpenFlow version 'ofp_version'. Returns message * as a struct ofpbuf. Returns encoded message on success, NULL on error. */ struct ofpbuf * ofputil_encode_queue_stats_request(enum ofp_version ofp_version, const struct ofputil_queue_stats_request *oqsr) { struct ofpbuf *request; switch (ofp_version) { case OFP11_VERSION: case OFP12_VERSION: case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp11_queue_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST11_QUEUE_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); req->port_no = ofputil_port_to_ofp11(oqsr->port_no); req->queue_id = htonl(oqsr->queue_id); break; } case OFP10_VERSION: { struct ofp10_queue_stats_request *req; request = ofpraw_alloc(OFPRAW_OFPST10_QUEUE_REQUEST, ofp_version, 0); req = ofpbuf_put_zeros(request, sizeof *req); /* OpenFlow 1.0 needs OFPP_ALL instead of OFPP_ANY */ req->port_no = htons(ofp_to_u16(oqsr->port_no == OFPP_ANY ? OFPP_ALL : oqsr->port_no)); req->queue_id = htonl(oqsr->queue_id); break; } default: OVS_NOT_REACHED(); } return request; } /* Returns the number of queue stats elements in OFPTYPE_QUEUE_STATS_REPLY * message 'oh'. */ size_t ofputil_count_queue_stats(const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&b); for (size_t n = 0; ; n++) { struct ofputil_queue_stats qs; if (ofputil_decode_queue_stats(&qs, &b)) { return n; } } } static enum ofperr ofputil_queue_stats_from_ofp10(struct ofputil_queue_stats *oqs, const struct ofp10_queue_stats *qs10) { oqs->port_no = u16_to_ofp(ntohs(qs10->port_no)); oqs->queue_id = ntohl(qs10->queue_id); oqs->tx_bytes = ntohll(get_32aligned_be64(&qs10->tx_bytes)); oqs->tx_packets = ntohll(get_32aligned_be64(&qs10->tx_packets)); oqs->tx_errors = ntohll(get_32aligned_be64(&qs10->tx_errors)); oqs->duration_sec = oqs->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_queue_stats_from_ofp11(struct ofputil_queue_stats *oqs, const struct ofp11_queue_stats *qs11) { enum ofperr error; error = ofputil_port_from_ofp11(qs11->port_no, &oqs->port_no); if (error) { return error; } oqs->queue_id = ntohl(qs11->queue_id); oqs->tx_bytes = ntohll(qs11->tx_bytes); oqs->tx_packets = ntohll(qs11->tx_packets); oqs->tx_errors = ntohll(qs11->tx_errors); oqs->duration_sec = oqs->duration_nsec = UINT32_MAX; return 0; } static enum ofperr ofputil_queue_stats_from_ofp13(struct ofputil_queue_stats *oqs, const struct ofp13_queue_stats *qs13) { enum ofperr error = ofputil_queue_stats_from_ofp11(oqs, &qs13->qs); if (!error) { oqs->duration_sec = ntohl(qs13->duration_sec); oqs->duration_nsec = ntohl(qs13->duration_nsec); } return error; } static enum ofperr ofputil_pull_ofp14_queue_stats(struct ofputil_queue_stats *oqs, struct ofpbuf *msg) { const struct ofp14_queue_stats *qs14; size_t len; qs14 = ofpbuf_try_pull(msg, sizeof *qs14); if (!qs14) { return OFPERR_OFPBRC_BAD_LEN; } len = ntohs(qs14->length); if (len < sizeof *qs14 || len - sizeof *qs14 > msg->size) { return OFPERR_OFPBRC_BAD_LEN; } ofpbuf_pull(msg, len - sizeof *qs14); /* No properties yet defined, so ignore them for now. */ return ofputil_queue_stats_from_ofp13(oqs, &qs14->qs); } /* Converts an OFPST_QUEUE_STATS reply in 'msg' into an abstract * ofputil_queue_stats in 'qs'. * * Multiple OFPST_QUEUE_STATS replies can be packed into a single OpenFlow * message. Calling this function multiple times for a single 'msg' iterates * through the replies. The caller must initially leave 'msg''s layer pointers * null and not modify them between calls. * * Returns 0 if successful, EOF if no replies were left in this 'msg', * otherwise a positive errno value. */ int ofputil_decode_queue_stats(struct ofputil_queue_stats *qs, struct ofpbuf *msg) { enum ofperr error; enum ofpraw raw; error = (msg->header ? ofpraw_decode(&raw, msg->header) : ofpraw_pull(&raw, msg)); if (error) { return error; } if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST14_QUEUE_REPLY) { return ofputil_pull_ofp14_queue_stats(qs, msg); } else if (raw == OFPRAW_OFPST13_QUEUE_REPLY) { const struct ofp13_queue_stats *qs13; qs13 = ofpbuf_try_pull(msg, sizeof *qs13); if (!qs13) { goto bad_len; } return ofputil_queue_stats_from_ofp13(qs, qs13); } else if (raw == OFPRAW_OFPST11_QUEUE_REPLY) { const struct ofp11_queue_stats *qs11; qs11 = ofpbuf_try_pull(msg, sizeof *qs11); if (!qs11) { goto bad_len; } return ofputil_queue_stats_from_ofp11(qs, qs11); } else if (raw == OFPRAW_OFPST10_QUEUE_REPLY) { const struct ofp10_queue_stats *qs10; qs10 = ofpbuf_try_pull(msg, sizeof *qs10); if (!qs10) { goto bad_len; } return ofputil_queue_stats_from_ofp10(qs, qs10); } else { OVS_NOT_REACHED(); } bad_len: VLOG_WARN_RL(&bad_ofmsg_rl, "OFPST_QUEUE reply has %"PRIu32" leftover " "bytes at end", msg->size); return OFPERR_OFPBRC_BAD_LEN; } static void ofputil_queue_stats_to_ofp10(const struct ofputil_queue_stats *oqs, struct ofp10_queue_stats *qs10) { qs10->port_no = htons(ofp_to_u16(oqs->port_no)); memset(qs10->pad, 0, sizeof qs10->pad); qs10->queue_id = htonl(oqs->queue_id); put_32aligned_be64(&qs10->tx_bytes, htonll(oqs->tx_bytes)); put_32aligned_be64(&qs10->tx_packets, htonll(oqs->tx_packets)); put_32aligned_be64(&qs10->tx_errors, htonll(oqs->tx_errors)); } static void ofputil_queue_stats_to_ofp11(const struct ofputil_queue_stats *oqs, struct ofp11_queue_stats *qs11) { qs11->port_no = ofputil_port_to_ofp11(oqs->port_no); qs11->queue_id = htonl(oqs->queue_id); qs11->tx_bytes = htonll(oqs->tx_bytes); qs11->tx_packets = htonll(oqs->tx_packets); qs11->tx_errors = htonll(oqs->tx_errors); } static void ofputil_queue_stats_to_ofp13(const struct ofputil_queue_stats *oqs, struct ofp13_queue_stats *qs13) { ofputil_queue_stats_to_ofp11(oqs, &qs13->qs); if (oqs->duration_sec != UINT32_MAX) { qs13->duration_sec = htonl(oqs->duration_sec); qs13->duration_nsec = htonl(oqs->duration_nsec); } else { qs13->duration_sec = OVS_BE32_MAX; qs13->duration_nsec = OVS_BE32_MAX; } } static void ofputil_queue_stats_to_ofp14(const struct ofputil_queue_stats *oqs, struct ofp14_queue_stats *qs14) { qs14->length = htons(sizeof *qs14); memset(qs14->pad, 0, sizeof qs14->pad); ofputil_queue_stats_to_ofp13(oqs, &qs14->qs); } /* Encode a queue stat for 'oqs' and append it to 'replies'. */ void ofputil_append_queue_stat(struct ovs_list *replies, const struct ofputil_queue_stats *oqs) { switch (ofpmp_version(replies)) { case OFP13_VERSION: { struct ofp13_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp13(oqs, reply); break; } case OFP12_VERSION: case OFP11_VERSION: { struct ofp11_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp11(oqs, reply); break; } case OFP10_VERSION: { struct ofp10_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp10(oqs, reply); break; } case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: { struct ofp14_queue_stats *reply = ofpmp_append(replies, sizeof *reply); ofputil_queue_stats_to_ofp14(oqs, reply); break; } default: OVS_NOT_REACHED(); } } enum ofperr ofputil_decode_bundle_ctrl(const struct ofp_header *oh, struct ofputil_bundle_ctrl_msg *msg) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); ovs_assert(raw == OFPRAW_OFPT14_BUNDLE_CONTROL || raw == OFPRAW_ONFT13_BUNDLE_CONTROL); const struct ofp14_bundle_ctrl_msg *m = b.msg; msg->bundle_id = ntohl(m->bundle_id); msg->type = ntohs(m->type); msg->flags = ntohs(m->flags); return 0; } struct ofpbuf * ofputil_encode_bundle_ctrl_request(enum ofp_version ofp_version, struct ofputil_bundle_ctrl_msg *bc) { struct ofpbuf *request; struct ofp14_bundle_ctrl_msg *m; switch (ofp_version) { case OFP10_VERSION: case OFP11_VERSION: case OFP12_VERSION: ovs_fatal(0, "bundles need OpenFlow 1.3 or later " "(\'-O OpenFlow14\')"); case OFP13_VERSION: case OFP14_VERSION: case OFP15_VERSION: case OFP16_VERSION: request = ofpraw_alloc(ofp_version == OFP13_VERSION ? OFPRAW_ONFT13_BUNDLE_CONTROL : OFPRAW_OFPT14_BUNDLE_CONTROL, ofp_version, 0); m = ofpbuf_put_zeros(request, sizeof *m); m->bundle_id = htonl(bc->bundle_id); m->type = htons(bc->type); m->flags = htons(bc->flags); break; default: OVS_NOT_REACHED(); } return request; } struct ofpbuf * ofputil_encode_bundle_ctrl_reply(const struct ofp_header *oh, struct ofputil_bundle_ctrl_msg *msg) { struct ofpbuf *buf; struct ofp14_bundle_ctrl_msg *m; buf = ofpraw_alloc_reply(oh->version == OFP13_VERSION ? OFPRAW_ONFT13_BUNDLE_CONTROL : OFPRAW_OFPT14_BUNDLE_CONTROL, oh, 0); m = ofpbuf_put_zeros(buf, sizeof *m); m->bundle_id = htonl(msg->bundle_id); m->type = htons(msg->type); m->flags = htons(msg->flags); return buf; } /* Return true for bundlable state change requests, false for other messages. */ static bool ofputil_is_bundlable(enum ofptype type) { switch (type) { /* Minimum required by OpenFlow 1.4. */ case OFPTYPE_PORT_MOD: case OFPTYPE_FLOW_MOD: /* Other supported types. */ case OFPTYPE_GROUP_MOD: case OFPTYPE_PACKET_OUT: return true; /* Nice to have later. */ case OFPTYPE_FLOW_MOD_TABLE_ID: case OFPTYPE_TABLE_MOD: case OFPTYPE_METER_MOD: case OFPTYPE_NXT_TLV_TABLE_MOD: /* Not to be bundlable. */ case OFPTYPE_ECHO_REQUEST: case OFPTYPE_FEATURES_REQUEST: case OFPTYPE_GET_CONFIG_REQUEST: case OFPTYPE_SET_CONFIG: case OFPTYPE_BARRIER_REQUEST: case OFPTYPE_ROLE_REQUEST: case OFPTYPE_ECHO_REPLY: case OFPTYPE_SET_FLOW_FORMAT: case OFPTYPE_SET_PACKET_IN_FORMAT: case OFPTYPE_SET_CONTROLLER_ID: case OFPTYPE_FLOW_AGE: case OFPTYPE_FLOW_MONITOR_CANCEL: case OFPTYPE_SET_ASYNC_CONFIG: case OFPTYPE_GET_ASYNC_REQUEST: case OFPTYPE_DESC_STATS_REQUEST: case OFPTYPE_FLOW_STATS_REQUEST: case OFPTYPE_AGGREGATE_STATS_REQUEST: case OFPTYPE_TABLE_STATS_REQUEST: case OFPTYPE_TABLE_FEATURES_STATS_REQUEST: case OFPTYPE_TABLE_DESC_REQUEST: case OFPTYPE_PORT_STATS_REQUEST: case OFPTYPE_QUEUE_STATS_REQUEST: case OFPTYPE_PORT_DESC_STATS_REQUEST: case OFPTYPE_FLOW_MONITOR_STATS_REQUEST: case OFPTYPE_METER_STATS_REQUEST: case OFPTYPE_METER_CONFIG_STATS_REQUEST: case OFPTYPE_METER_FEATURES_STATS_REQUEST: case OFPTYPE_GROUP_STATS_REQUEST: case OFPTYPE_GROUP_DESC_STATS_REQUEST: case OFPTYPE_GROUP_FEATURES_STATS_REQUEST: case OFPTYPE_QUEUE_GET_CONFIG_REQUEST: case OFPTYPE_BUNDLE_CONTROL: case OFPTYPE_BUNDLE_ADD_MESSAGE: case OFPTYPE_HELLO: case OFPTYPE_ERROR: case OFPTYPE_FEATURES_REPLY: case OFPTYPE_GET_CONFIG_REPLY: case OFPTYPE_PACKET_IN: case OFPTYPE_FLOW_REMOVED: case OFPTYPE_PORT_STATUS: case OFPTYPE_BARRIER_REPLY: case OFPTYPE_QUEUE_GET_CONFIG_REPLY: case OFPTYPE_DESC_STATS_REPLY: case OFPTYPE_FLOW_STATS_REPLY: case OFPTYPE_QUEUE_STATS_REPLY: case OFPTYPE_PORT_STATS_REPLY: case OFPTYPE_TABLE_STATS_REPLY: case OFPTYPE_AGGREGATE_STATS_REPLY: case OFPTYPE_PORT_DESC_STATS_REPLY: case OFPTYPE_ROLE_REPLY: case OFPTYPE_FLOW_MONITOR_PAUSED: case OFPTYPE_FLOW_MONITOR_RESUMED: case OFPTYPE_FLOW_MONITOR_STATS_REPLY: case OFPTYPE_GET_ASYNC_REPLY: case OFPTYPE_GROUP_STATS_REPLY: case OFPTYPE_GROUP_DESC_STATS_REPLY: case OFPTYPE_GROUP_FEATURES_STATS_REPLY: case OFPTYPE_METER_STATS_REPLY: case OFPTYPE_METER_CONFIG_STATS_REPLY: case OFPTYPE_METER_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_FEATURES_STATS_REPLY: case OFPTYPE_TABLE_DESC_REPLY: case OFPTYPE_ROLE_STATUS: case OFPTYPE_REQUESTFORWARD: case OFPTYPE_TABLE_STATUS: case OFPTYPE_NXT_TLV_TABLE_REQUEST: case OFPTYPE_NXT_TLV_TABLE_REPLY: case OFPTYPE_NXT_RESUME: case OFPTYPE_IPFIX_BRIDGE_STATS_REQUEST: case OFPTYPE_IPFIX_BRIDGE_STATS_REPLY: case OFPTYPE_IPFIX_FLOW_STATS_REQUEST: case OFPTYPE_IPFIX_FLOW_STATS_REPLY: case OFPTYPE_CT_FLUSH_ZONE: break; } return false; } enum ofperr ofputil_decode_bundle_add(const struct ofp_header *oh, struct ofputil_bundle_add_msg *msg, enum ofptype *typep) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); /* Pull the outer ofp_header. */ enum ofpraw raw = ofpraw_pull_assert(&b); ovs_assert(raw == OFPRAW_OFPT14_BUNDLE_ADD_MESSAGE || raw == OFPRAW_ONFT13_BUNDLE_ADD_MESSAGE); /* Pull the bundle_ctrl header. */ const struct ofp14_bundle_ctrl_msg *m = ofpbuf_pull(&b, sizeof *m); msg->bundle_id = ntohl(m->bundle_id); msg->flags = ntohs(m->flags); /* Pull the inner ofp_header. */ if (b.size < sizeof(struct ofp_header)) { return OFPERR_OFPBFC_MSG_BAD_LEN; } msg->msg = b.data; if (msg->msg->version != oh->version) { return OFPERR_OFPBFC_BAD_VERSION; } size_t inner_len = ntohs(msg->msg->length); if (inner_len < sizeof(struct ofp_header) || inner_len > b.size) { return OFPERR_OFPBFC_MSG_BAD_LEN; } if (msg->msg->xid != oh->xid) { return OFPERR_OFPBFC_MSG_BAD_XID; } /* Reject unbundlable messages. */ enum ofptype type; enum ofperr error = ofptype_decode(&type, msg->msg); if (error) { VLOG_WARN_RL(&bad_ofmsg_rl, "OFPT14_BUNDLE_ADD_MESSAGE contained " "message is unparsable (%s)", ofperr_get_name(error)); return OFPERR_OFPBFC_MSG_UNSUP; /* 'error' would be confusing. */ } if (!ofputil_is_bundlable(type)) { VLOG_WARN_RL(&bad_ofmsg_rl, "%s message not allowed inside " "OFPT14_BUNDLE_ADD_MESSAGE", ofptype_get_name(type)); return OFPERR_OFPBFC_MSG_UNSUP; } if (typep) { *typep = type; } return 0; } struct ofpbuf * ofputil_encode_bundle_add(enum ofp_version ofp_version, struct ofputil_bundle_add_msg *msg) { struct ofpbuf *request; struct ofp14_bundle_ctrl_msg *m; /* Must use the same xid as the embedded message. */ request = ofpraw_alloc_xid(ofp_version == OFP13_VERSION ? OFPRAW_ONFT13_BUNDLE_ADD_MESSAGE : OFPRAW_OFPT14_BUNDLE_ADD_MESSAGE, ofp_version, msg->msg->xid, ntohs(msg->msg->length)); m = ofpbuf_put_zeros(request, sizeof *m); m->bundle_id = htonl(msg->bundle_id); m->flags = htons(msg->flags); ofpbuf_put(request, msg->msg, ntohs(msg->msg->length)); ofpmsg_update_length(request); return request; } static void encode_tlv_table_mappings(struct ofpbuf *b, struct ovs_list *mappings) { struct ofputil_tlv_map *map; LIST_FOR_EACH (map, list_node, mappings) { struct nx_tlv_map *nx_map; nx_map = ofpbuf_put_zeros(b, sizeof *nx_map); nx_map->option_class = htons(map->option_class); nx_map->option_type = map->option_type; nx_map->option_len = map->option_len; nx_map->index = htons(map->index); } } struct ofpbuf * ofputil_encode_tlv_table_mod(enum ofp_version ofp_version, struct ofputil_tlv_table_mod *ttm) { struct ofpbuf *b; struct nx_tlv_table_mod *nx_ttm; b = ofpraw_alloc(OFPRAW_NXT_TLV_TABLE_MOD, ofp_version, 0); nx_ttm = ofpbuf_put_zeros(b, sizeof *nx_ttm); nx_ttm->command = htons(ttm->command); encode_tlv_table_mappings(b, &ttm->mappings); return b; } static enum ofperr decode_tlv_table_mappings(struct ofpbuf *msg, unsigned int max_fields, struct ovs_list *mappings) { ovs_list_init(mappings); while (msg->size) { struct nx_tlv_map *nx_map; struct ofputil_tlv_map *map; nx_map = ofpbuf_pull(msg, sizeof *nx_map); map = xmalloc(sizeof *map); ovs_list_push_back(mappings, &map->list_node); map->option_class = ntohs(nx_map->option_class); map->option_type = nx_map->option_type; map->option_len = nx_map->option_len; if (map->option_len % 4 || map->option_len > TLV_MAX_OPT_SIZE) { VLOG_WARN_RL(&bad_ofmsg_rl, "tlv table option length (%u) is not a valid option size", map->option_len); ofputil_uninit_tlv_table(mappings); return OFPERR_NXTTMFC_BAD_OPT_LEN; } map->index = ntohs(nx_map->index); if (map->index >= max_fields) { VLOG_WARN_RL(&bad_ofmsg_rl, "tlv table field index (%u) is too large (max %u)", map->index, max_fields - 1); ofputil_uninit_tlv_table(mappings); return OFPERR_NXTTMFC_BAD_FIELD_IDX; } } return 0; } enum ofperr ofputil_decode_tlv_table_mod(const struct ofp_header *oh, struct ofputil_tlv_table_mod *ttm) { struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&msg); struct nx_tlv_table_mod *nx_ttm = ofpbuf_pull(&msg, sizeof *nx_ttm); ttm->command = ntohs(nx_ttm->command); if (ttm->command > NXTTMC_CLEAR) { VLOG_WARN_RL(&bad_ofmsg_rl, "tlv table mod command (%u) is out of range", ttm->command); return OFPERR_NXTTMFC_BAD_COMMAND; } return decode_tlv_table_mappings(&msg, TUN_METADATA_NUM_OPTS, &ttm->mappings); } struct ofpbuf * ofputil_encode_tlv_table_reply(const struct ofp_header *oh, struct ofputil_tlv_table_reply *ttr) { struct ofpbuf *b; struct nx_tlv_table_reply *nx_ttr; b = ofpraw_alloc_reply(OFPRAW_NXT_TLV_TABLE_REPLY, oh, 0); nx_ttr = ofpbuf_put_zeros(b, sizeof *nx_ttr); nx_ttr->max_option_space = htonl(ttr->max_option_space); nx_ttr->max_fields = htons(ttr->max_fields); encode_tlv_table_mappings(b, &ttr->mappings); return b; } /* Decodes the NXT_TLV_TABLE_REPLY message in 'oh' into '*ttr'. Returns 0 * if successful, otherwise an ofperr. * * The decoder verifies that the indexes in 'ttr->mappings' are less than * 'ttr->max_fields', but the caller must ensure, if necessary, that they are * less than TUN_METADATA_NUM_OPTS. */ enum ofperr ofputil_decode_tlv_table_reply(const struct ofp_header *oh, struct ofputil_tlv_table_reply *ttr) { struct ofpbuf msg = ofpbuf_const_initializer(oh, ntohs(oh->length)); ofpraw_pull_assert(&msg); struct nx_tlv_table_reply *nx_ttr = ofpbuf_pull(&msg, sizeof *nx_ttr); ttr->max_option_space = ntohl(nx_ttr->max_option_space); ttr->max_fields = ntohs(nx_ttr->max_fields); return decode_tlv_table_mappings(&msg, ttr->max_fields, &ttr->mappings); } void ofputil_uninit_tlv_table(struct ovs_list *mappings) { struct ofputil_tlv_map *map; LIST_FOR_EACH_POP (map, list_node, mappings) { free(map); } } const char * ofputil_async_msg_type_to_string(enum ofputil_async_msg_type type) { switch (type) { case OAM_PACKET_IN: return "PACKET_IN"; case OAM_PORT_STATUS: return "PORT_STATUS"; case OAM_FLOW_REMOVED: return "FLOW_REMOVED"; case OAM_ROLE_STATUS: return "ROLE_STATUS"; case OAM_TABLE_STATUS: return "TABLE_STATUS"; case OAM_REQUESTFORWARD: return "REQUESTFORWARD"; case OAM_N_TYPES: default: OVS_NOT_REACHED(); } } struct ofp14_async_prop { uint64_t prop_type; enum ofputil_async_msg_type oam; bool master; uint32_t allowed10, allowed14; }; #define AP_PAIR(SLAVE_PROP_TYPE, OAM, A10, A14) \ { SLAVE_PROP_TYPE, OAM, false, A10, (A14) ? (A14) : (A10) }, \ { (SLAVE_PROP_TYPE + 1), OAM, true, A10, (A14) ? (A14) : (A10) } static const struct ofp14_async_prop async_props[] = { AP_PAIR( 0, OAM_PACKET_IN, OFPR10_BITS, OFPR14_BITS), AP_PAIR( 2, OAM_PORT_STATUS, (1 << OFPPR_N_REASONS) - 1, 0), AP_PAIR( 4, OAM_FLOW_REMOVED, (1 << OVS_OFPRR_NONE) - 1, 0), AP_PAIR( 6, OAM_ROLE_STATUS, (1 << OFPCRR_N_REASONS) - 1, 0), AP_PAIR( 8, OAM_TABLE_STATUS, OFPTR_BITS, 0), AP_PAIR(10, OAM_REQUESTFORWARD, (1 << OFPRFR_N_REASONS) - 1, 0), }; #define FOR_EACH_ASYNC_PROP(VAR) \ for (const struct ofp14_async_prop *VAR = async_props; \ VAR < &async_props[ARRAY_SIZE(async_props)]; VAR++) static const struct ofp14_async_prop * get_ofp14_async_config_prop_by_prop_type(uint64_t prop_type) { FOR_EACH_ASYNC_PROP (ap) { if (prop_type == ap->prop_type) { return ap; } } return NULL; } static const struct ofp14_async_prop * get_ofp14_async_config_prop_by_oam(enum ofputil_async_msg_type oam, bool master) { FOR_EACH_ASYNC_PROP (ap) { if (ap->oam == oam && ap->master == master) { return ap; } } return NULL; } static uint32_t ofp14_async_prop_allowed(const struct ofp14_async_prop *prop, enum ofp_version version) { return version >= OFP14_VERSION ? prop->allowed14 : prop->allowed10; } static ovs_be32 encode_async_mask(const struct ofputil_async_cfg *src, const struct ofp14_async_prop *ap, enum ofp_version version) { uint32_t mask = ap->master ? src->master[ap->oam] : src->slave[ap->oam]; return htonl(mask & ofp14_async_prop_allowed(ap, version)); } static enum ofperr decode_async_mask(ovs_be32 src, const struct ofp14_async_prop *ap, enum ofp_version version, bool loose, struct ofputil_async_cfg *dst) { uint32_t mask = ntohl(src); uint32_t allowed = ofp14_async_prop_allowed(ap, version); if (mask & ~allowed) { OFPPROP_LOG(&bad_ofmsg_rl, loose, "bad value %#x for %s (allowed mask %#x)", mask, ofputil_async_msg_type_to_string(ap->oam), allowed); mask &= allowed; if (!loose) { return OFPERR_OFPACFC_INVALID; } } if (ap->oam == OAM_PACKET_IN) { if (mask & (1u << OFPR_NO_MATCH)) { mask |= 1u << OFPR_EXPLICIT_MISS; if (version < OFP13_VERSION) { mask |= 1u << OFPR_IMPLICIT_MISS; } } } uint32_t *array = ap->master ? dst->master : dst->slave; array[ap->oam] = mask; return 0; } static enum ofperr parse_async_tlv(const struct ofpbuf *property, const struct ofp14_async_prop *ap, struct ofputil_async_cfg *ac, enum ofp_version version, bool loose) { enum ofperr error; ovs_be32 mask; error = ofpprop_parse_be32(property, &mask); if (error) { return error; } if (ofpprop_is_experimenter(ap->prop_type)) { /* For experimenter properties, whether a property is for the master or * slave role is indicated by both 'type' and 'exp_type' in struct * ofp_prop_experimenter. Check that these are consistent. */ const struct ofp_prop_experimenter *ope = property->data; bool should_be_master = ope->type == htons(0xffff); if (should_be_master != ap->master) { VLOG_WARN_RL(&bad_ofmsg_rl, "async property type %#"PRIx16" " "indicates %s role but exp_type %"PRIu32" indicates " "%s role", ntohs(ope->type), should_be_master ? "master" : "slave", ntohl(ope->exp_type), ap->master ? "master" : "slave"); return OFPERR_OFPBPC_BAD_EXP_TYPE; } } return decode_async_mask(mask, ap, version, loose, ac); } static void decode_legacy_async_masks(const ovs_be32 masks[2], enum ofputil_async_msg_type oam, enum ofp_version version, struct ofputil_async_cfg *dst) { for (int i = 0; i < 2; i++) { bool master = i == 0; const struct ofp14_async_prop *ap = get_ofp14_async_config_prop_by_oam(oam, master); decode_async_mask(masks[i], ap, version, true, dst); } } /* Decodes the OpenFlow "set async config" request and "get async config * reply" message in '*oh' into an abstract form in 'ac'. * * Some versions of the "set async config" request change only some of the * settings and leave the others alone. This function uses 'basis' as the * initial state for decoding these. Other versions of the request change all * the settings; this function ignores 'basis' when decoding these. * * If 'loose' is true, this function ignores properties and values that it does * not understand, as a controller would want to do when interpreting * capabilities provided by a switch. If 'loose' is false, this function * treats unknown properties and values as an error, as a switch would want to * do when interpreting a configuration request made by a controller. * * Returns 0 if successful, otherwise an OFPERR_* value. * * Returns error code OFPERR_OFPACFC_INVALID if the value of mask is not in * the valid range of mask. * * Returns error code OFPERR_OFPACFC_UNSUPPORTED if the configuration is not * supported.*/ enum ofperr ofputil_decode_set_async_config(const struct ofp_header *oh, bool loose, const struct ofputil_async_cfg *basis, struct ofputil_async_cfg *ac) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); enum ofpraw raw = ofpraw_pull_assert(&b); if (raw == OFPRAW_OFPT13_SET_ASYNC || raw == OFPRAW_NXT_SET_ASYNC_CONFIG || raw == OFPRAW_OFPT13_GET_ASYNC_REPLY) { const struct nx_async_config *msg = ofpmsg_body(oh); *ac = OFPUTIL_ASYNC_CFG_INIT; decode_legacy_async_masks(msg->packet_in_mask, OAM_PACKET_IN, oh->version, ac); decode_legacy_async_masks(msg->port_status_mask, OAM_PORT_STATUS, oh->version, ac); decode_legacy_async_masks(msg->flow_removed_mask, OAM_FLOW_REMOVED, oh->version, ac); } else if (raw == OFPRAW_OFPT14_SET_ASYNC || raw == OFPRAW_OFPT14_GET_ASYNC_REPLY || raw == OFPRAW_NXT_SET_ASYNC_CONFIG2) { *ac = *basis; while (b.size > 0) { struct ofpbuf property; enum ofperr error; uint64_t type; error = ofpprop_pull__(&b, &property, 8, 0xfffe, &type); if (error) { return error; } const struct ofp14_async_prop *ap = get_ofp14_async_config_prop_by_prop_type(type); error = (ap ? parse_async_tlv(&property, ap, ac, oh->version, loose) : OFPPROP_UNKNOWN(loose, "async config", type)); if (error) { /* Most messages use OFPBPC_BAD_TYPE but async has its own (who * knows why, it's OpenFlow. */ if (error == OFPERR_OFPBPC_BAD_TYPE) { error = OFPERR_OFPACFC_UNSUPPORTED; } return error; } } } else { return OFPERR_OFPBRC_BAD_VERSION; } return 0; } static void encode_legacy_async_masks(const struct ofputil_async_cfg *ac, enum ofputil_async_msg_type oam, enum ofp_version version, ovs_be32 masks[2]) { for (int i = 0; i < 2; i++) { bool master = i == 0; const struct ofp14_async_prop *ap = get_ofp14_async_config_prop_by_oam(oam, master); masks[i] = encode_async_mask(ac, ap, version); } } static void ofputil_put_async_config__(const struct ofputil_async_cfg *ac, struct ofpbuf *buf, bool tlv, enum ofp_version version, uint32_t oams) { if (!tlv) { struct nx_async_config *msg = ofpbuf_put_zeros(buf, sizeof *msg); encode_legacy_async_masks(ac, OAM_PACKET_IN, version, msg->packet_in_mask); encode_legacy_async_masks(ac, OAM_PORT_STATUS, version, msg->port_status_mask); encode_legacy_async_masks(ac, OAM_FLOW_REMOVED, version, msg->flow_removed_mask); } else { FOR_EACH_ASYNC_PROP (ap) { if (oams & (1u << ap->oam)) { size_t ofs = buf->size; ofpprop_put_be32(buf, ap->prop_type, encode_async_mask(ac, ap, version)); /* For experimenter properties, we need to use type 0xfffe for * master and 0xffff for slaves. */ if (ofpprop_is_experimenter(ap->prop_type)) { struct ofp_prop_experimenter *ope = ofpbuf_at_assert(buf, ofs, sizeof *ope); ope->type = ap->master ? htons(0xffff) : htons(0xfffe); } } } } } /* Encodes and returns a reply to the OFPT_GET_ASYNC_REQUEST in 'oh' that * states that the asynchronous message configuration is 'ac'. */ struct ofpbuf * ofputil_encode_get_async_reply(const struct ofp_header *oh, const struct ofputil_async_cfg *ac) { struct ofpbuf *buf; enum ofpraw raw = (oh->version < OFP14_VERSION ? OFPRAW_OFPT13_GET_ASYNC_REPLY : OFPRAW_OFPT14_GET_ASYNC_REPLY); struct ofpbuf *reply = ofpraw_alloc_reply(raw, oh, 0); ofputil_put_async_config__(ac, reply, raw == OFPRAW_OFPT14_GET_ASYNC_REPLY, oh->version, UINT32_MAX); return reply; return buf; } /* Encodes and returns a message, in a format appropriate for OpenFlow version * 'ofp_version', that sets the asynchronous message configuration to 'ac'. * * Specify 'oams' as a bitmap of OAM_* that indicate the asynchronous messages * to configure. OF1.0 through OF1.3 can't natively configure a subset of * messages, so more messages than requested may be configured. OF1.0 through * OF1.3 also can't configure OVS extension OAM_* values, so if 'oam' includes * any extensions then this function encodes an Open vSwitch extension message * that does support configuring OVS extension OAM_*. */ struct ofpbuf * ofputil_encode_set_async_config(const struct ofputil_async_cfg *ac, uint32_t oams, enum ofp_version ofp_version) { enum ofpraw raw = (ofp_version >= OFP14_VERSION ? OFPRAW_OFPT14_SET_ASYNC : oams & OAM_EXTENSIONS ? OFPRAW_NXT_SET_ASYNC_CONFIG2 : ofp_version >= OFP13_VERSION ? OFPRAW_OFPT13_SET_ASYNC : OFPRAW_NXT_SET_ASYNC_CONFIG); struct ofpbuf *request = ofpraw_alloc(raw, ofp_version, 0); ofputil_put_async_config__(ac, request, (raw == OFPRAW_OFPT14_SET_ASYNC || raw == OFPRAW_NXT_SET_ASYNC_CONFIG2), ofp_version, oams); return request; } struct ofputil_async_cfg ofputil_async_cfg_default(enum ofp_version version) { /* We enable all of the OF1.4 reasons regardless of 'version' because the * reasons added in OF1.4 just are just refinements of the OFPR_ACTION * introduced in OF1.0, breaking it into more specific categories. When we * encode these for earlier OpenFlow versions, we translate them into * OFPR_ACTION. */ uint32_t pin = OFPR14_BITS & ~(1u << OFPR_INVALID_TTL); pin |= 1u << OFPR_EXPLICIT_MISS; if (version <= OFP12_VERSION) { pin |= 1u << OFPR_IMPLICIT_MISS; } return (struct ofputil_async_cfg) { .master[OAM_PACKET_IN] = pin, .master[OAM_FLOW_REMOVED] = (version >= OFP14_VERSION ? OFPRR14_BITS : OFPRR10_BITS), .master[OAM_PORT_STATUS] = OFPPR_BITS, .slave[OAM_PORT_STATUS] = OFPPR_BITS, }; } static void ofputil_put_ofp14_table_desc(const struct ofputil_table_desc *td, struct ofpbuf *b, enum ofp_version version) { struct ofp14_table_desc *otd; struct ofp14_table_mod_prop_vacancy *otv; size_t start_otd; start_otd = b->size; ofpbuf_put_zeros(b, sizeof *otd); ofpprop_put_u32(b, OFPTMPT14_EVICTION, td->eviction_flags); otv = ofpbuf_put_zeros(b, sizeof *otv); otv->type = htons(OFPTMPT14_VACANCY); otv->length = htons(sizeof *otv); otv->vacancy_down = td->table_vacancy.vacancy_down; otv->vacancy_up = td->table_vacancy.vacancy_up; otv->vacancy = td->table_vacancy.vacancy; otd = ofpbuf_at_assert(b, start_otd, sizeof *otd); otd->length = htons(b->size - start_otd); otd->table_id = td->table_id; otd->config = ofputil_encode_table_config(OFPUTIL_TABLE_MISS_DEFAULT, td->eviction, td->vacancy, version); } /* Converts the abstract form of a "table status" message in '*ts' into an * OpenFlow message suitable for 'protocol', and returns that encoded form in * a buffer owned by the caller. */ struct ofpbuf * ofputil_encode_table_status(const struct ofputil_table_status *ts, enum ofputil_protocol protocol) { enum ofp_version version; struct ofpbuf *b; version = ofputil_protocol_to_ofp_version(protocol); if (version >= OFP14_VERSION) { enum ofpraw raw; struct ofp14_table_status *ots; raw = OFPRAW_OFPT14_TABLE_STATUS; b = ofpraw_alloc_xid(raw, version, htonl(0), 0); ots = ofpbuf_put_zeros(b, sizeof *ots); ots->reason = ts->reason; ofputil_put_ofp14_table_desc(&ts->desc, b, version); ofpmsg_update_length(b); return b; } else { return NULL; } } /* Decodes the OpenFlow "table status" message in '*ots' into an abstract form * in '*ts'. Returns 0 if successful, otherwise an OFPERR_* value. */ enum ofperr ofputil_decode_table_status(const struct ofp_header *oh, struct ofputil_table_status *ts) { const struct ofp14_table_status *ots; struct ofpbuf b; enum ofperr error; enum ofpraw raw; ofpbuf_use_const(&b, oh, ntohs(oh->length)); raw = ofpraw_pull_assert(&b); ots = ofpbuf_pull(&b, sizeof *ots); if (raw == OFPRAW_OFPT14_TABLE_STATUS) { if (ots->reason != OFPTR_VACANCY_DOWN && ots->reason != OFPTR_VACANCY_UP) { return OFPERR_OFPBPC_BAD_VALUE; } ts->reason = ots->reason; error = ofputil_decode_table_desc(&b, &ts->desc, oh->version); return error; } else { return OFPERR_OFPBRC_BAD_VERSION; } return 0; }
./CrossVul/dataset_final_sorted/CWE-617/c/good_389_0
crossvul-cpp_data_bad_487_0
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/kdc_util.c - Utility functions for the KDC implementation */ /* * Copyright 1990,1991,2007,2008,2009 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include "kdc_util.h" #include "extern.h" #include <stdio.h> #include <ctype.h> #include <syslog.h> #include <kadm5/admin.h> #include "adm_proto.h" #include "net-server.h" #include <limits.h> #ifdef KRBCONF_VAGUE_ERRORS const int vague_errors = 1; #else const int vague_errors = 0; #endif static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey); static krb5_error_code find_server_key(krb5_context, krb5_db_entry *, krb5_enctype, krb5_kvno, krb5_keyblock **, krb5_kvno *); /* * concatenate first two authdata arrays, returning an allocated replacement. * The replacement should be freed with krb5_free_authdata(). */ krb5_error_code concat_authorization_data(krb5_context context, krb5_authdata **first, krb5_authdata **second, krb5_authdata ***output) { int i, j; krb5_authdata **ptr, **retdata; /* count up the entries */ i = 0; if (first) for (ptr = first; *ptr; ptr++) i++; if (second) for (ptr = second; *ptr; ptr++) i++; retdata = (krb5_authdata **)malloc((i+1)*sizeof(*retdata)); if (!retdata) return ENOMEM; retdata[i] = 0; /* null-terminated array */ for (i = 0, j = 0, ptr = first; j < 2 ; ptr = second, j++) while (ptr && *ptr) { /* now walk & copy */ retdata[i] = (krb5_authdata *)malloc(sizeof(*retdata[i])); if (!retdata[i]) { krb5_free_authdata(context, retdata); return ENOMEM; } *retdata[i] = **ptr; if (!(retdata[i]->contents = (krb5_octet *)malloc(retdata[i]->length))) { free(retdata[i]); retdata[i] = 0; krb5_free_authdata(context, retdata); return ENOMEM; } memcpy(retdata[i]->contents, (*ptr)->contents, retdata[i]->length); ptr++; i++; } *output = retdata; return 0; } krb5_boolean is_local_principal(kdc_realm_t *kdc_active_realm, krb5_const_principal princ1) { return krb5_realm_compare(kdc_context, princ1, tgs_server); } /* * Returns TRUE if the kerberos principal is the name of a Kerberos ticket * service. */ krb5_boolean krb5_is_tgs_principal(krb5_const_principal principal) { if (krb5_princ_size(kdc_context, principal) != 2) return FALSE; if (data_eq_string(*krb5_princ_component(kdc_context, principal, 0), KRB5_TGS_NAME)) return TRUE; else return FALSE; } /* Returns TRUE if principal is the name of a cross-realm TGS. */ krb5_boolean is_cross_tgs_principal(krb5_const_principal principal) { if (!krb5_is_tgs_principal(principal)) return FALSE; if (!data_eq(*krb5_princ_component(kdc_context, principal, 1), *krb5_princ_realm(kdc_context, principal))) return TRUE; else return FALSE; } /* * given authentication data (provides seed for checksum), verify checksum * for source data. */ static krb5_error_code comp_cksum(krb5_context kcontext, krb5_data *source, krb5_ticket *ticket, krb5_checksum *his_cksum) { krb5_error_code retval; krb5_boolean valid; if (!krb5_c_valid_cksumtype(his_cksum->checksum_type)) return KRB5KDC_ERR_SUMTYPE_NOSUPP; /* must be collision proof */ if (!krb5_c_is_coll_proof_cksum(his_cksum->checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; /* verify checksum */ if ((retval = krb5_c_verify_checksum(kcontext, ticket->enc_part2->session, KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM, source, his_cksum, &valid))) return(retval); if (!valid) return(KRB5KRB_AP_ERR_BAD_INTEGRITY); return(0); } /* If a header ticket is decrypted, *ticket_out is filled in even on error. */ krb5_error_code kdc_process_tgs_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_fulladdr *from, krb5_data *pkt, krb5_ticket **ticket_out, krb5_db_entry **krbtgt_ptr, krb5_keyblock **tgskey, krb5_keyblock **subkey, krb5_pa_data **pa_tgs_req) { krb5_pa_data * tmppa; krb5_ap_req * apreq; krb5_error_code retval; krb5_authdata **authdata = NULL; krb5_data scratch1; krb5_data * scratch = NULL; krb5_boolean foreign_server = FALSE; krb5_auth_context auth_context = NULL; krb5_authenticator * authenticator = NULL; krb5_checksum * his_cksum = NULL; krb5_db_entry * krbtgt = NULL; krb5_ticket * ticket; *ticket_out = NULL; *krbtgt_ptr = NULL; *tgskey = NULL; tmppa = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_AP_REQ); if (!tmppa) return KRB5KDC_ERR_PADATA_TYPE_NOSUPP; scratch1.length = tmppa->length; scratch1.data = (char *)tmppa->contents; if ((retval = decode_krb5_ap_req(&scratch1, &apreq))) return retval; ticket = apreq->ticket; if (isflagset(apreq->ap_options, AP_OPTS_USE_SESSION_KEY) || isflagset(apreq->ap_options, AP_OPTS_MUTUAL_REQUIRED)) { krb5_klog_syslog(LOG_INFO, _("TGS_REQ: SESSION KEY or MUTUAL")); retval = KRB5KDC_ERR_POLICY; goto cleanup; } /* If the "server" principal in the ticket is not something in the local realm, then we must refuse to service the request if the client claims to be from the local realm. If we don't do this, then some other realm's nasty KDC can claim to be authenticating a client from our realm, and we'll give out tickets concurring with it! we set a flag here for checking below. */ foreign_server = !is_local_principal(kdc_active_realm, apreq->ticket->server); if ((retval = krb5_auth_con_init(kdc_context, &auth_context))) goto cleanup; /* Don't use a replay cache. */ if ((retval = krb5_auth_con_setflags(kdc_context, auth_context, 0))) goto cleanup; if ((retval = krb5_auth_con_setaddrs(kdc_context, auth_context, NULL, from->address)) ) goto cleanup_auth_context; retval = kdc_rd_ap_req(kdc_active_realm, apreq, auth_context, &krbtgt, tgskey); if (retval) goto cleanup_auth_context; /* "invalid flag" tickets can must be used to validate */ if (isflagset(ticket->enc_part2->flags, TKT_FLG_INVALID) && !isflagset(request->kdc_options, KDC_OPT_VALIDATE)) { retval = KRB5KRB_AP_ERR_TKT_INVALID; goto cleanup_auth_context; } if ((retval = krb5_auth_con_getrecvsubkey(kdc_context, auth_context, subkey))) goto cleanup_auth_context; if ((retval = krb5_auth_con_getauthenticator(kdc_context, auth_context, &authenticator))) goto cleanup_auth_context; retval = krb5_find_authdata(kdc_context, ticket->enc_part2->authorization_data, authenticator->authorization_data, KRB5_AUTHDATA_FX_ARMOR, &authdata); if (retval != 0) goto cleanup_authenticator; if (authdata&& authdata[0]) { k5_setmsg(kdc_context, KRB5KDC_ERR_POLICY, "ticket valid only as FAST armor"); retval = KRB5KDC_ERR_POLICY; krb5_free_authdata(kdc_context, authdata); goto cleanup_authenticator; } krb5_free_authdata(kdc_context, authdata); /* Check for a checksum */ if (!(his_cksum = authenticator->checksum)) { retval = KRB5KRB_AP_ERR_INAPP_CKSUM; goto cleanup_authenticator; } /* make sure the client is of proper lineage (see above) */ if (foreign_server && !krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER)) { if (is_local_principal(kdc_active_realm, ticket->enc_part2->client)) { /* someone in a foreign realm claiming to be local */ krb5_klog_syslog(LOG_INFO, _("PROCESS_TGS: failed lineage check")); retval = KRB5KDC_ERR_POLICY; goto cleanup_authenticator; } } /* * Check application checksum vs. tgs request * * We try checksumming the req-body two different ways: first we * try reaching into the raw asn.1 stream (if available), and * checksum that directly; if that fails, then we try encoding * using our local asn.1 library. */ if (pkt && (fetch_asn1_field((unsigned char *) pkt->data, 1, 4, &scratch1) >= 0)) { if (comp_cksum(kdc_context, &scratch1, ticket, his_cksum)) { if (!(retval = encode_krb5_kdc_req_body(request, &scratch))) retval = comp_cksum(kdc_context, scratch, ticket, his_cksum); krb5_free_data(kdc_context, scratch); if (retval) goto cleanup_authenticator; } } *pa_tgs_req = tmppa; *krbtgt_ptr = krbtgt; krbtgt = NULL; cleanup_authenticator: krb5_free_authenticator(kdc_context, authenticator); cleanup_auth_context: krb5_auth_con_free(kdc_context, auth_context); cleanup: if (retval != 0) { krb5_free_keyblock(kdc_context, *tgskey); *tgskey = NULL; } if (apreq->ticket->enc_part2 != NULL) { /* Steal the decrypted ticket pointer, even on error. */ *ticket_out = apreq->ticket; apreq->ticket = NULL; } krb5_free_ap_req(kdc_context, apreq); krb5_db_free_principal(kdc_context, krbtgt); return retval; } /* * This is a KDC wrapper around krb5_rd_req_decoded_anyflag(). * * We can't depend on KDB-as-keytab for handling the AP-REQ here for * optimization reasons: we want to minimize the number of KDB lookups. We'll * need the KDB entry for the TGS principal, and the TGS key used to decrypt * the TGT, elsewhere in the TGS code. * * This function also implements key rollover support for kvno 0 cross-realm * TGTs issued by AD. */ static krb5_error_code kdc_rd_ap_req(kdc_realm_t *kdc_active_realm, krb5_ap_req *apreq, krb5_auth_context auth_context, krb5_db_entry **server, krb5_keyblock **tgskey) { krb5_error_code retval; krb5_enctype search_enctype = apreq->ticket->enc_part.enctype; krb5_boolean match_enctype = 1; krb5_kvno kvno; size_t tries = 3; /* * When we issue tickets we use the first key in the principals' highest * kvno keyset. For non-cross-realm krbtgt principals we want to only * allow the use of the first key of the principal's keyset that matches * the given kvno. */ if (krb5_is_tgs_principal(apreq->ticket->server) && !is_cross_tgs_principal(apreq->ticket->server)) { search_enctype = -1; match_enctype = 0; } retval = kdc_get_server_key(kdc_context, apreq->ticket, KRB5_KDB_FLAG_ALIAS_OK, match_enctype, server, NULL, NULL); if (retval) return retval; *tgskey = NULL; kvno = apreq->ticket->enc_part.kvno; do { krb5_free_keyblock(kdc_context, *tgskey); retval = find_server_key(kdc_context, *server, search_enctype, kvno, tgskey, &kvno); if (retval) continue; /* Make the TGS key available to krb5_rd_req_decoded_anyflag() */ retval = krb5_auth_con_setuseruserkey(kdc_context, auth_context, *tgskey); if (retval) return retval; retval = krb5_rd_req_decoded_anyflag(kdc_context, &auth_context, apreq, apreq->ticket->server, kdc_active_realm->realm_keytab, NULL, NULL); /* If the ticket was decrypted, don't try any more keys. */ if (apreq->ticket->enc_part2 != NULL) break; } while (retval && apreq->ticket->enc_part.kvno == 0 && kvno-- > 1 && --tries > 0); return retval; } /* * The KDC should take the keytab associated with the realm and pass * that to the krb5_rd_req_decoded_anyflag(), but we still need to use * the service (TGS, here) key elsewhere. This approach is faster than * the KDB keytab approach too. * * This is also used by do_tgs_req() for u2u auth. */ krb5_error_code kdc_get_server_key(krb5_context context, krb5_ticket *ticket, unsigned int flags, krb5_boolean match_enctype, krb5_db_entry **server_ptr, krb5_keyblock **key, krb5_kvno *kvno) { krb5_error_code retval; krb5_db_entry * server = NULL; krb5_enctype search_enctype = -1; krb5_kvno search_kvno = -1; if (match_enctype) search_enctype = ticket->enc_part.enctype; if (ticket->enc_part.kvno) search_kvno = ticket->enc_part.kvno; *server_ptr = NULL; retval = krb5_db_get_principal(context, ticket->server, flags, &server); if (retval == KRB5_KDB_NOENTRY) { char *sname; if (!krb5_unparse_name(context, ticket->server, &sname)) { limit_string(sname); krb5_klog_syslog(LOG_ERR, _("TGS_REQ: UNKNOWN SERVER: server='%s'"), sname); free(sname); } return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; } else if (retval) return retval; if (server->attributes & KRB5_KDB_DISALLOW_SVR || server->attributes & KRB5_KDB_DISALLOW_ALL_TIX) { retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto errout; } if (key) { retval = find_server_key(context, server, search_enctype, search_kvno, key, kvno); if (retval) goto errout; } *server_ptr = server; server = NULL; return 0; errout: krb5_db_free_principal(context, server); return retval; } /* * A utility function to get the right key from a KDB entry. Used in handling * of kvno 0 TGTs, for example. */ static krb5_error_code find_server_key(krb5_context context, krb5_db_entry *server, krb5_enctype enctype, krb5_kvno kvno, krb5_keyblock **key_out, krb5_kvno *kvno_out) { krb5_error_code retval; krb5_key_data * server_key; krb5_keyblock * key; *key_out = NULL; retval = krb5_dbe_find_enctype(context, server, enctype, -1, kvno ? (krb5_int32)kvno : -1, &server_key); if (retval) return retval; if (!server_key) return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; if ((key = (krb5_keyblock *)malloc(sizeof *key)) == NULL) return ENOMEM; retval = krb5_dbe_decrypt_key_data(context, NULL, server_key, key, NULL); if (retval) goto errout; if (enctype != -1) { krb5_boolean similar; retval = krb5_c_enctype_compare(context, enctype, key->enctype, &similar); if (retval) goto errout; if (!similar) { retval = KRB5_KDB_NO_PERMITTED_KEY; goto errout; } key->enctype = enctype; } *key_out = key; key = NULL; if (kvno_out) *kvno_out = server_key->key_data_kvno; errout: krb5_free_keyblock(context, key); return retval; } /* * If candidate is the local TGT for realm, set *alias_out to candidate and * *storage_out to NULL. Otherwise, load the local TGT into *storage_out and * set *alias_out to *storage_out. * * In the future we might generalize this to a small per-request principal * cache. For now, it saves a load operation in the common case where the AS * server or TGS header ticket server is the local TGT. */ krb5_error_code get_local_tgt(krb5_context context, const krb5_data *realm, krb5_db_entry *candidate, krb5_db_entry **alias_out, krb5_db_entry **storage_out) { krb5_error_code ret; krb5_principal princ; krb5_db_entry *tgt; *alias_out = NULL; *storage_out = NULL; ret = krb5_build_principal_ext(context, &princ, realm->length, realm->data, KRB5_TGS_NAME_SIZE, KRB5_TGS_NAME, realm->length, realm->data, 0); if (ret) return ret; if (!krb5_principal_compare(context, candidate->princ, princ)) { ret = krb5_db_get_principal(context, princ, 0, &tgt); if (!ret) *storage_out = *alias_out = tgt; } else { *alias_out = candidate; } krb5_free_principal(context, princ); return ret; } /* This probably wants to be updated if you support last_req stuff */ static krb5_last_req_entry nolrentry = { KV5M_LAST_REQ_ENTRY, KRB5_LRQ_NONE, 0 }; static krb5_last_req_entry *nolrarray[] = { &nolrentry, 0 }; krb5_error_code fetch_last_req_info(krb5_db_entry *dbentry, krb5_last_req_entry ***lrentry) { *lrentry = nolrarray; return 0; } /* XXX! This is a temporary place-holder */ krb5_error_code check_hot_list(krb5_ticket *ticket) { return 0; } /* Convert an API error code to a protocol error code. */ int errcode_to_protocol(krb5_error_code code) { int protcode; protcode = code - ERROR_TABLE_BASE_krb5; return (protcode >= 0 && protcode <= 128) ? protcode : KRB_ERR_GENERIC; } /* Return -1 if the AS or TGS request is disallowed due to KDC policy on * anonymous tickets. */ int check_anon(kdc_realm_t *kdc_active_realm, krb5_principal client, krb5_principal server) { /* If restrict_anon is set, reject requests from anonymous to principals * other than the local TGT. */ if (kdc_active_realm->realm_restrict_anon && krb5_principal_compare_any_realm(kdc_context, client, krb5_anonymous_principal()) && !krb5_principal_compare(kdc_context, server, tgs_server)) return -1; return 0; } /* * Routines that validate a AS request; checks a lot of things. :-) * * Returns a Kerberos protocol error number, which is _not_ the same * as a com_err error number! */ #define AS_INVALID_OPTIONS (KDC_OPT_FORWARDED | KDC_OPT_PROXY | \ KDC_OPT_VALIDATE | KDC_OPT_RENEW | \ KDC_OPT_ENC_TKT_IN_SKEY | KDC_OPT_CNAME_IN_ADDL_TKT) int validate_as_request(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_db_entry client, krb5_db_entry server, krb5_timestamp kdc_time, const char **status, krb5_pa_data ***e_data) { krb5_error_code ret; /* * If an option is set that is only allowed in TGS requests, complain. */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KDC_ERR_BADOPTION; } /* The client must not be expired */ if (client.expiration && ts_after(kdc_time, client.expiration)) { *status = "CLIENT EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_NAME_EXP); } /* The client's password must not be expired, unless the server is a KRB5_KDC_PWCHANGE_SERVICE. */ if (client.pw_expiration && ts_after(kdc_time, client.pw_expiration) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "CLIENT KEY EXPIRED"; if (vague_errors) return(KRB_ERR_GENERIC); else return(KDC_ERR_KEY_EXP); } /* The server must not be expired */ if (server.expiration && ts_after(kdc_time, server.expiration)) { *status = "SERVICE EXPIRED"; return(KDC_ERR_SERVICE_EXP); } /* * If the client requires password changing, then only allow the * pwchange service. */ if (isflagset(client.attributes, KRB5_KDB_REQUIRES_PWCHANGE) && !isflagset(server.attributes, KRB5_KDB_PWCHANGE_SERVICE)) { *status = "REQUIRED PWCHANGE"; return(KDC_ERR_KEY_EXP); } /* Client and server must allow postdating tickets */ if ((isflagset(request->kdc_options, KDC_OPT_ALLOW_POSTDATE) || isflagset(request->kdc_options, KDC_OPT_POSTDATED)) && (isflagset(client.attributes, KRB5_KDB_DISALLOW_POSTDATED) || isflagset(server.attributes, KRB5_KDB_DISALLOW_POSTDATED))) { *status = "POSTDATE NOT ALLOWED"; return(KDC_ERR_CANNOT_POSTDATE); } /* Check to see if client is locked out */ if (isflagset(client.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "CLIENT LOCKED OUT"; return(KDC_ERR_CLIENT_REVOKED); } /* Check to see if server is locked out */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_ALL_TIX)) { *status = "SERVICE LOCKED OUT"; return(KDC_ERR_S_PRINCIPAL_UNKNOWN); } /* Check to see if server is allowed to be a service */ if (isflagset(server.attributes, KRB5_KDB_DISALLOW_SVR)) { *status = "SERVICE NOT ALLOWED"; return(KDC_ERR_MUST_USE_USER2USER); } if (check_anon(kdc_active_realm, client.princ, request->server) != 0) { *status = "ANONYMOUS NOT ALLOWED"; return(KDC_ERR_POLICY); } /* Perform KDB module policy checks. */ ret = krb5_db_check_policy_as(kdc_context, request, &client, &server, kdc_time, status, e_data); if (ret && ret != KRB5_PLUGIN_OP_NOTSUPP) return errcode_to_protocol(ret); return 0; } /* * Compute ticket flags based on the request, the client and server DB entry * (which may prohibit forwardable or proxiable tickets), and the header * ticket. client may be NULL for a TGS request (although it may be set, such * as for an S4U2Self request). header_enc may be NULL for an AS request. */ krb5_flags get_ticket_flags(krb5_flags reqflags, krb5_db_entry *client, krb5_db_entry *server, krb5_enc_tkt_part *header_enc) { krb5_flags flags; /* Indicate support for encrypted padata (RFC 6806), and set flags based on * request options and the header ticket. */ flags = OPTS2FLAGS(reqflags) | TKT_FLG_ENC_PA_REP; if (reqflags & KDC_OPT_POSTDATED) flags |= TKT_FLG_INVALID; if (header_enc != NULL) flags |= COPY_TKT_FLAGS(header_enc->flags); if (header_enc == NULL) flags |= TKT_FLG_INITIAL; /* For TGS requests, indicate if the service is marked ok-as-delegate. */ if (header_enc != NULL && (server->attributes & KRB5_KDB_OK_AS_DELEGATE)) flags |= TKT_FLG_OK_AS_DELEGATE; /* Unset PROXIABLE if it is disallowed. */ if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_PROXIABLE)) flags &= ~TKT_FLG_PROXIABLE; if (server->attributes & KRB5_KDB_DISALLOW_PROXIABLE) flags &= ~TKT_FLG_PROXIABLE; if (header_enc != NULL && !(header_enc->flags & TKT_FLG_PROXIABLE)) flags &= ~TKT_FLG_PROXIABLE; /* Unset FORWARDABLE if it is disallowed. */ if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_FORWARDABLE)) flags &= ~TKT_FLG_FORWARDABLE; if (server->attributes & KRB5_KDB_DISALLOW_FORWARDABLE) flags &= ~TKT_FLG_FORWARDABLE; if (header_enc != NULL && !(header_enc->flags & TKT_FLG_FORWARDABLE)) flags &= ~TKT_FLG_FORWARDABLE; /* We don't currently handle issuing anonymous tickets based on * non-anonymous ones. */ if (header_enc != NULL && !(header_enc->flags & TKT_FLG_ANONYMOUS)) flags &= ~TKT_FLG_ANONYMOUS; return flags; } /* Return KRB5KDC_ERR_POLICY if indicators does not contain the required auth * indicators for server, ENOMEM on allocation error, 0 otherwise. */ krb5_error_code check_indicators(krb5_context context, krb5_db_entry *server, krb5_data *const *indicators) { krb5_error_code ret; char *str = NULL, *copy = NULL, *save, *ind; ret = krb5_dbe_get_string(context, server, KRB5_KDB_SK_REQUIRE_AUTH, &str); if (ret || str == NULL) goto cleanup; copy = strdup(str); if (copy == NULL) { ret = ENOMEM; goto cleanup; } /* Look for any of the space-separated strings in indicators. */ ind = strtok_r(copy, " ", &save); while (ind != NULL) { if (authind_contains(indicators, ind)) goto cleanup; ind = strtok_r(NULL, " ", &save); } ret = KRB5KDC_ERR_POLICY; k5_setmsg(context, ret, _("Required auth indicators not present in ticket: %s"), str); cleanup: krb5_dbe_free_string(context, str); free(copy); return ret; } #define ASN1_ID_CLASS (0xc0) #define ASN1_ID_TYPE (0x20) #define ASN1_ID_TAG (0x1f) #define ASN1_CLASS_UNIV (0) #define ASN1_CLASS_APP (1) #define ASN1_CLASS_CTX (2) #define ASN1_CLASS_PRIV (3) #define asn1_id_constructed(x) (x & ASN1_ID_TYPE) #define asn1_id_primitive(x) (!asn1_id_constructed(x)) #define asn1_id_class(x) ((x & ASN1_ID_CLASS) >> 6) #define asn1_id_tag(x) (x & ASN1_ID_TAG) /* * asn1length - return encoded length of value. * * passed a pointer into the asn.1 stream, which is updated * to point right after the length bits. * * returns -1 on failure. */ static int asn1length(unsigned char **astream) { int length; /* resulting length */ int sublen; /* sublengths */ int blen; /* bytes of length */ unsigned char *p; /* substring searching */ if (**astream & 0x80) { blen = **astream & 0x7f; if (blen > 3) { return(-1); } for (++*astream, length = 0; blen; ++*astream, blen--) { length = (length << 8) | **astream; } if (length == 0) { /* indefinite length, figure out by hand */ p = *astream; p++; while (1) { /* compute value length. */ if ((sublen = asn1length(&p)) < 0) { return(-1); } p += sublen; /* check for termination */ if ((!*p++) && (!*p)) { p++; break; } } length = p - *astream; } } else { length = **astream; ++*astream; } return(length); } /* * fetch_asn1_field - return raw asn.1 stream of subfield. * * this routine is passed a context-dependent tag number and "level" and returns * the size and length of the corresponding level subfield. * * levels and are numbered starting from 1. * * returns 0 on success, -1 otherwise. */ int fetch_asn1_field(unsigned char *astream, unsigned int level, unsigned int field, krb5_data *data) { unsigned char *estream; /* end of stream */ int classes; /* # classes seen so far this level */ unsigned int levels = 0; /* levels seen so far */ int lastlevel = 1000; /* last level seen */ int length; /* various lengths */ int tag; /* tag number */ unsigned char savelen; /* saved length of our field */ classes = -1; /* we assume that the first identifier/length will tell us how long the entire stream is. */ astream++; estream = astream; if ((length = asn1length(&astream)) < 0) { return(-1); } estream += length; /* search down the stream, checking identifiers. we process identifiers until we hit the "level" we want, and then process that level for our subfield, always making sure we don't go off the end of the stream. */ while (astream < estream) { if (!asn1_id_constructed(*astream)) { return(-1); } if (asn1_id_class(*astream) == ASN1_CLASS_CTX) { if ((tag = (int)asn1_id_tag(*astream)) <= lastlevel) { levels++; classes = -1; } lastlevel = tag; if (levels == level) { /* in our context-dependent class, is this the one we're looking for ? */ if (tag == (int)field) { /* return length and data */ astream++; savelen = *astream; if ((length = asn1length(&astream)) < 0) { return(-1); } data->length = length; /* if the field length is indefinite, we will have to subtract two (terminating octets) from the length returned since we don't want to pass any info from the "wrapper" back. asn1length will always return the *total* length of the field, not just what's contained in it */ if ((savelen & 0xff) == 0x80) { data->length -=2 ; } data->data = (char *)astream; return(0); } else if (tag <= classes) { /* we've seen this class before, something must be wrong */ return(-1); } else { classes = tag; } } } /* if we're not on our level yet, process this value. otherwise skip over it */ astream++; if ((length = asn1length(&astream)) < 0) { return(-1); } if (levels == level) { astream += length; } } return(-1); } /* Return true if we believe server can support enctype as a session key. */ static krb5_boolean dbentry_supports_enctype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, krb5_enctype enctype) { krb5_error_code retval; krb5_key_data *datap; char *etypes_str = NULL; krb5_enctype default_enctypes[1] = { 0 }; krb5_enctype *etypes = NULL; krb5_boolean in_list; /* Look up the supported session key enctypes list in the KDB. */ retval = krb5_dbe_get_string(kdc_context, server, KRB5_KDB_SK_SESSION_ENCTYPES, &etypes_str); if (retval == 0 && etypes_str != NULL && *etypes_str != '\0') { /* Pass a fake profile key for tracing of unrecognized tokens. */ retval = krb5int_parse_enctype_list(kdc_context, "KDB-session_etypes", etypes_str, default_enctypes, &etypes); if (retval == 0 && etypes != NULL && etypes[0]) { in_list = k5_etypes_contains(etypes, enctype); free(etypes_str); free(etypes); return in_list; } /* Fall through on error or empty list */ } free(etypes_str); free(etypes); /* If configured to, assume every server without a session_enctypes * attribute supports DES_CBC_CRC. */ if (kdc_active_realm->realm_assume_des_crc_sess && enctype == ENCTYPE_DES_CBC_CRC) return TRUE; /* Due to an ancient interop problem, assume nothing supports des-cbc-md5 * unless there's a session_enctypes explicitly saying that it does. */ if (enctype == ENCTYPE_DES_CBC_MD5) return FALSE; /* Assume the server supports any enctype it has a long-term key for. */ return !krb5_dbe_find_enctype(kdc_context, server, enctype, -1, 0, &datap); } /* * This function returns the keytype which should be selected for the * session key. It is based on the ordered list which the user * requested, and what the KDC and the application server can support. */ krb5_enctype select_session_keytype(kdc_realm_t *kdc_active_realm, krb5_db_entry *server, int nktypes, krb5_enctype *ktype) { int i; for (i = 0; i < nktypes; i++) { if (!krb5_c_valid_enctype(ktype[i])) continue; if (!krb5_is_permitted_enctype(kdc_context, ktype[i])) continue; if (dbentry_supports_enctype(kdc_active_realm, server, ktype[i])) return ktype[i]; } return 0; } /* * Limit strings to a "reasonable" length to prevent crowding out of * other useful information in the log entry */ #define NAME_LENGTH_LIMIT 128 void limit_string(char *name) { int i; if (!name) return; if (strlen(name) < NAME_LENGTH_LIMIT) return; i = NAME_LENGTH_LIMIT-4; name[i++] = '.'; name[i++] = '.'; name[i++] = '.'; name[i] = '\0'; return; } /* * L10_2 = log10(2**x), rounded up; log10(2) ~= 0.301. */ #define L10_2(x) ((int)(((x * 301) + 999) / 1000)) /* * Max length of sprintf("%ld") for an int of type T; includes leading * minus sign and terminating NUL. */ #define D_LEN(t) (L10_2(sizeof(t) * CHAR_BIT) + 2) void ktypes2str(char *s, size_t len, int nktypes, krb5_enctype *ktype) { int i; char stmp[D_LEN(krb5_enctype) + 1]; char *p; if (nktypes < 0 || len < (sizeof(" etypes {...}") + D_LEN(int))) { *s = '\0'; return; } snprintf(s, len, "%d etypes {", nktypes); for (i = 0; i < nktypes; i++) { snprintf(stmp, sizeof(stmp), "%s%ld", i ? " " : "", (long)ktype[i]); if (strlen(s) + strlen(stmp) + sizeof("}") > len) break; strlcat(s, stmp, len); } if (i < nktypes) { /* * We broke out of the loop. Try to truncate the list. */ p = s + strlen(s); while (p - s + sizeof("...}") > len) { while (p > s && *p != ' ' && *p != '{') *p-- = '\0'; if (p > s && *p == ' ') { *p-- = '\0'; continue; } } strlcat(s, "...", len); } strlcat(s, "}", len); return; } void rep_etypes2str(char *s, size_t len, krb5_kdc_rep *rep) { char stmp[sizeof("ses=") + D_LEN(krb5_enctype)]; if (len < (3 * D_LEN(krb5_enctype) + sizeof("etypes {rep= tkt= ses=}"))) { *s = '\0'; return; } snprintf(s, len, "etypes {rep=%ld", (long)rep->enc_part.enctype); if (rep->ticket != NULL) { snprintf(stmp, sizeof(stmp), " tkt=%ld", (long)rep->ticket->enc_part.enctype); strlcat(s, stmp, len); } if (rep->ticket != NULL && rep->ticket->enc_part2 != NULL && rep->ticket->enc_part2->session != NULL) { snprintf(stmp, sizeof(stmp), " ses=%ld", (long)rep->ticket->enc_part2->session->enctype); strlcat(s, stmp, len); } strlcat(s, "}", len); return; } static krb5_error_code verify_for_user_checksum(krb5_context context, krb5_keyblock *key, krb5_pa_for_user *req) { krb5_error_code code; int i; krb5_int32 name_type; char *p; krb5_data data; krb5_boolean valid = FALSE; if (!krb5_c_is_keyed_cksum(req->cksum.checksum_type)) { return KRB5KRB_AP_ERR_INAPP_CKSUM; } /* * Checksum is over name type and string components of * client principal name and auth_package. */ data.length = 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { data.length += krb5_princ_component(context, req->user, i)->length; } data.length += krb5_princ_realm(context, req->user)->length; data.length += req->auth_package.length; p = data.data = malloc(data.length); if (data.data == NULL) { return ENOMEM; } name_type = krb5_princ_type(context, req->user); p[0] = (name_type >> 0 ) & 0xFF; p[1] = (name_type >> 8 ) & 0xFF; p[2] = (name_type >> 16) & 0xFF; p[3] = (name_type >> 24) & 0xFF; p += 4; for (i = 0; i < krb5_princ_size(context, req->user); i++) { if (krb5_princ_component(context, req->user, i)->length > 0) { memcpy(p, krb5_princ_component(context, req->user, i)->data, krb5_princ_component(context, req->user, i)->length); } p += krb5_princ_component(context, req->user, i)->length; } if (krb5_princ_realm(context, req->user)->length > 0) { memcpy(p, krb5_princ_realm(context, req->user)->data, krb5_princ_realm(context, req->user)->length); } p += krb5_princ_realm(context, req->user)->length; if (req->auth_package.length > 0) memcpy(p, req->auth_package.data, req->auth_package.length); p += req->auth_package.length; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_APP_DATA_CKSUM, &data, &req->cksum, &valid); if (code == 0 && valid == FALSE) code = KRB5KRB_AP_ERR_MODIFIED; free(data.data); return code; } /* * Legacy protocol transition (Windows 2003 and above) */ static krb5_error_code kdc_process_for_user(kdc_realm_t *kdc_active_realm, krb5_pa_data *pa_data, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_pa_for_user *for_user; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_for_user(&req_data, &for_user); if (code) { *status = "DECODE_PA_FOR_USER"; return code; } code = verify_for_user_checksum(kdc_context, tgs_session, for_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_for_user(kdc_context, for_user); return code; } *s4u_x509_user = calloc(1, sizeof(krb5_pa_s4u_x509_user)); if (*s4u_x509_user == NULL) { krb5_free_pa_for_user(kdc_context, for_user); return ENOMEM; } (*s4u_x509_user)->user_id.user = for_user->user; for_user->user = NULL; krb5_free_pa_for_user(kdc_context, for_user); return 0; } static krb5_error_code verify_s4u_x509_user_checksum(krb5_context context, krb5_keyblock *key, krb5_data *req_data, krb5_int32 kdc_req_nonce, krb5_pa_s4u_x509_user *req) { krb5_error_code code; krb5_data scratch; krb5_boolean valid = FALSE; if (enctype_requires_etype_info_2(key->enctype) && !krb5_c_is_keyed_cksum(req->cksum.checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; if (req->user_id.nonce != kdc_req_nonce) return KRB5KRB_AP_ERR_MODIFIED; /* * Verify checksum over the encoded userid. If that fails, * re-encode, and verify that. This is similar to the * behaviour in kdc_process_tgs_req(). */ if (fetch_asn1_field((unsigned char *)req_data->data, 1, 0, &scratch) < 0) return ASN1_PARSE_ERROR; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, &scratch, &req->cksum, &valid); if (code != 0) return code; if (valid == FALSE) { krb5_data *data; code = encode_krb5_s4u_userid(&req->user_id, &data); if (code != 0) return code; code = krb5_c_verify_checksum(context, key, KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST, data, &req->cksum, &valid); krb5_free_data(context, data); if (code != 0) return code; } return valid ? 0 : KRB5KRB_AP_ERR_MODIFIED; } /* * New protocol transition request (Windows 2008 and above) */ static krb5_error_code kdc_process_s4u_x509_user(krb5_context context, krb5_kdc_req *request, krb5_pa_data *pa_data, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user **s4u_x509_user, const char **status) { krb5_error_code code; krb5_data req_data; req_data.length = pa_data->length; req_data.data = (char *)pa_data->contents; code = decode_krb5_pa_s4u_x509_user(&req_data, s4u_x509_user); if (code) { *status = "DECODE_PA_S4U_X509_USER"; return code; } code = verify_s4u_x509_user_checksum(context, tgs_subkey ? tgs_subkey : tgs_session, &req_data, request->nonce, *s4u_x509_user); if (code) { *status = "INVALID_S4U2SELF_CHECKSUM"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return code; } if (krb5_princ_size(context, (*s4u_x509_user)->user_id.user) == 0 || (*s4u_x509_user)->user_id.subject_cert.length != 0) { *status = "INVALID_S4U2SELF_REQUEST"; krb5_free_pa_s4u_x509_user(context, *s4u_x509_user); *s4u_x509_user = NULL; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } return 0; } krb5_error_code kdc_make_s4u2self_rep(krb5_context context, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_pa_s4u_x509_user *req_s4u_user, krb5_kdc_rep *reply, krb5_enc_kdc_rep_part *reply_encpart) { krb5_error_code code; krb5_data *der_user_id = NULL, *der_s4u_x509_user = NULL; krb5_pa_s4u_x509_user rep_s4u_user; krb5_pa_data *pa; krb5_enctype enctype; krb5_keyusage usage; memset(&rep_s4u_user, 0, sizeof(rep_s4u_user)); rep_s4u_user.user_id.nonce = req_s4u_user->user_id.nonce; rep_s4u_user.user_id.user = req_s4u_user->user_id.user; rep_s4u_user.user_id.options = req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE; code = encode_krb5_s4u_userid(&rep_s4u_user.user_id, &der_user_id); if (code != 0) goto cleanup; if (req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY; else usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST; code = krb5_c_make_checksum(context, req_s4u_user->cksum.checksum_type, tgs_subkey != NULL ? tgs_subkey : tgs_session, usage, der_user_id, &rep_s4u_user.cksum); if (code != 0) goto cleanup; code = encode_krb5_pa_s4u_x509_user(&rep_s4u_user, &der_s4u_x509_user); if (code != 0) goto cleanup; /* Add a padata element, stealing memory from der_s4u_x509_user. */ code = alloc_pa_data(KRB5_PADATA_S4U_X509_USER, 0, &pa); if (code != 0) goto cleanup; pa->length = der_s4u_x509_user->length; pa->contents = (uint8_t *)der_s4u_x509_user->data; der_s4u_x509_user->data = NULL; /* add_pa_data_element() claims pa on success or failure. */ code = add_pa_data_element(&reply->padata, pa); if (code != 0) goto cleanup; if (tgs_subkey != NULL) enctype = tgs_subkey->enctype; else enctype = tgs_session->enctype; /* * Owing to a bug in Windows, unkeyed checksums were used for older * enctypes, including rc4-hmac. A forthcoming workaround for this * includes the checksum bytes in the encrypted padata. */ if ((req_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) && enctype_requires_etype_info_2(enctype) == FALSE) { code = alloc_pa_data(KRB5_PADATA_S4U_X509_USER, req_s4u_user->cksum.length + rep_s4u_user.cksum.length, &pa); if (code != 0) goto cleanup; memcpy(pa->contents, req_s4u_user->cksum.contents, req_s4u_user->cksum.length); memcpy(&pa->contents[req_s4u_user->cksum.length], rep_s4u_user.cksum.contents, rep_s4u_user.cksum.length); /* add_pa_data_element() claims pa on success or failure. */ code = add_pa_data_element(&reply_encpart->enc_padata, pa); if (code != 0) goto cleanup; } cleanup: if (rep_s4u_user.cksum.contents != NULL) krb5_free_checksum_contents(context, &rep_s4u_user.cksum); krb5_free_data(context, der_user_id); krb5_free_data(context, der_s4u_x509_user); return code; } /* * Protocol transition (S4U2Self) */ krb5_error_code kdc_process_s4u2self_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_const_principal client_princ, krb5_const_principal header_srv_princ, krb5_boolean issuing_referral, const krb5_db_entry *server, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_timestamp kdc_time, krb5_pa_s4u_x509_user **s4u_x509_user, krb5_db_entry **princ_ptr, const char **status) { krb5_error_code code; krb5_boolean is_local_tgt; krb5_pa_data *pa_data; int flags; krb5_db_entry *princ; *princ_ptr = NULL; pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_S4U_X509_USER); if (pa_data != NULL) { code = kdc_process_s4u_x509_user(kdc_context, request, pa_data, tgs_subkey, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else { pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER); if (pa_data != NULL) { code = kdc_process_for_user(kdc_active_realm, pa_data, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else return 0; } /* * We need to compare the client name in the TGT with the requested * server name. Supporting server name aliases without assuming a * global name service makes this difficult to do. * * The comparison below handles the following cases (note that the * term "principal name" below excludes the realm). * * (1) The requested service is a host-based service with two name * components, in which case we assume the principal name to * contain sufficient qualifying information. The realm is * ignored for the purpose of comparison. * * (2) The requested service name is an enterprise principal name: * the service principal name is compared with the unparsed * form of the client name (including its realm). * * (3) The requested service is some other name type: an exact * match is required. * * An alternative would be to look up the server once again with * FLAG_CANONICALIZE | FLAG_CLIENT_REFERRALS_ONLY set, do an exact * match between the returned name and client_princ. However, this * assumes that the client set FLAG_CANONICALIZE when requesting * the TGT and that we have a global name service. */ flags = 0; switch (krb5_princ_type(kdc_context, request->server)) { case KRB5_NT_SRV_HST: /* (1) */ if (krb5_princ_size(kdc_context, request->server) == 2) flags |= KRB5_PRINCIPAL_COMPARE_IGNORE_REALM; break; case KRB5_NT_ENTERPRISE_PRINCIPAL: /* (2) */ flags |= KRB5_PRINCIPAL_COMPARE_ENTERPRISE; break; default: /* (3) */ break; } if (!krb5_principal_compare_flags(kdc_context, request->server, client_princ, flags)) { *status = "INVALID_S4U2SELF_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error code */ } /* * Protocol transition is mutually exclusive with renew/forward/etc * as well as user-to-user and constrained delegation. This check * is also made in validate_as_request(). * * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* * Valid S4U2Self requests can occur in the following combinations: * * (1) local TGT, local user, local server * (2) cross TGT, local user, issuing referral * (3) cross TGT, non-local user, issuing referral * (4) cross TGT, non-local user, local server * * The first case is for a single-realm S4U2Self scenario; the second, * third, and fourth cases are for the initial, intermediate (if any), and * final cross-realm requests in a multi-realm scenario. */ is_local_tgt = !is_cross_tgs_principal(header_srv_princ); if (is_local_tgt && issuing_referral) { /* The requesting server appears to no longer exist, and we found * a referral instead. Treat this as a server lookup failure. */ *status = "LOOKING_UP_SERVER"; return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; } /* * Do not attempt to lookup principals in foreign realms. */ if (is_local_principal(kdc_active_realm, (*s4u_x509_user)->user_id.user)) { krb5_db_entry no_server; krb5_pa_data **e_data = NULL; if (!is_local_tgt && !issuing_referral) { /* A local server should not need a cross-realm TGT to impersonate * a local principal. */ *status = "NOT_CROSS_REALM_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error */ } code = krb5_db_get_principal(kdc_context, (*s4u_x509_user)->user_id.user, KRB5_KDB_FLAG_INCLUDE_PAC, &princ); if (code == KRB5_KDB_NOENTRY) { *status = "UNKNOWN_S4U2SELF_PRINCIPAL"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } else if (code) { *status = "LOOKING_UP_S4U2SELF_PRINCIPAL"; return code; /* caller can free for_user */ } memset(&no_server, 0, sizeof(no_server)); code = validate_as_request(kdc_active_realm, request, *princ, no_server, kdc_time, status, &e_data); if (code) { krb5_db_free_principal(kdc_context, princ); krb5_free_pa_data(kdc_context, e_data); return code; } *princ_ptr = princ; } else if (is_local_tgt) { /* * The server is asking to impersonate a principal from another realm, * using a local TGT. It should instead ask that principal's realm and * follow referrals back to us. */ *status = "S4U2SELF_CLIENT_NOT_OURS"; return KRB5KDC_ERR_POLICY; /* match Windows error */ } return 0; } static krb5_error_code check_allowed_to_delegate_to(krb5_context context, krb5_const_principal client, const krb5_db_entry *server, krb5_const_principal proxy) { /* Can't get a TGT (otherwise it would be unconstrained delegation) */ if (krb5_is_tgs_principal(proxy)) return KRB5KDC_ERR_POLICY; /* Must be in same realm */ if (!krb5_realm_compare(context, server->princ, proxy)) return KRB5KDC_ERR_POLICY; return krb5_db_check_allowed_to_delegate(context, client, server, proxy); } krb5_error_code kdc_process_s4u2proxy_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, const krb5_enc_tkt_part *t2enc, const krb5_db_entry *server, krb5_const_principal server_princ, krb5_const_principal proxy_princ, const char **status) { krb5_error_code errcode; /* * Constrained delegation is mutually exclusive with renew/forward/etc. * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & (NON_TGT_OPTION | KDC_OPT_ENC_TKT_IN_SKEY)) { *status = "INVALID_S4U2PROXY_OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* Ensure that evidence ticket server matches TGT client */ if (!krb5_principal_compare(kdc_context, server->princ, /* after canon */ server_princ)) { *status = "EVIDENCE_TICKET_MISMATCH"; return KRB5KDC_ERR_SERVER_NOMATCH; } if (!isflagset(t2enc->flags, TKT_FLG_FORWARDABLE)) { *status = "EVIDENCE_TKT_NOT_FORWARDABLE"; return KRB5_TKT_NOT_FORWARDABLE; } /* Backend policy check */ errcode = check_allowed_to_delegate_to(kdc_context, t2enc->client, server, proxy_princ); if (errcode) { *status = "NOT_ALLOWED_TO_DELEGATE"; return errcode; } return 0; } krb5_error_code kdc_check_transited_list(kdc_realm_t *kdc_active_realm, const krb5_data *trans, const krb5_data *realm1, const krb5_data *realm2) { krb5_error_code code; /* Check against the KDB module. Treat this answer as authoritative if the * method is supported and doesn't explicitly pass control. */ code = krb5_db_check_transited_realms(kdc_context, trans, realm1, realm2); if (code != KRB5_PLUGIN_OP_NOTSUPP && code != KRB5_PLUGIN_NO_HANDLE) return code; /* Check using krb5.conf [capaths] or hierarchical relationships. */ return krb5_check_transited_list(kdc_context, trans, realm1, realm2); } krb5_error_code validate_transit_path(krb5_context context, krb5_const_principal client, krb5_db_entry *server, krb5_db_entry *header_srv) { /* Incoming */ if (isflagset(server->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE)) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } /* Outgoing */ if (isflagset(header_srv->attributes, KRB5_KDB_XREALM_NON_TRANSITIVE) && (!krb5_principal_compare(context, server->princ, header_srv->princ) || !krb5_realm_compare(context, client, header_srv->princ))) { return KRB5KDC_ERR_PATH_NOT_ACCEPTED; } return 0; } krb5_boolean enctype_requires_etype_info_2(krb5_enctype enctype) { switch(enctype) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: case ENCTYPE_DES3_CBC_SHA1: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC_EXP : return 0; default: return krb5_c_valid_enctype(enctype); } } /* Allocate a pa-data entry with an uninitialized buffer of size len. */ krb5_error_code alloc_pa_data(krb5_preauthtype pa_type, size_t len, krb5_pa_data **out) { krb5_pa_data *pa; uint8_t *buf = NULL; *out = NULL; if (len > 0) { buf = malloc(len); if (buf == NULL) return ENOMEM; } pa = malloc(sizeof(*pa)); if (pa == NULL) { free(buf); return ENOMEM; } pa->magic = KV5M_PA_DATA; pa->pa_type = pa_type; pa->length = len; pa->contents = buf; *out = pa; return 0; } /* Add pa to list, claiming its memory. Free pa on failure. */ krb5_error_code add_pa_data_element(krb5_pa_data ***list, krb5_pa_data *pa) { size_t count; krb5_pa_data **newlist; for (count = 0; *list != NULL && (*list)[count] != NULL; count++); newlist = realloc(*list, (count + 2) * sizeof(*newlist)); if (newlist == NULL) { free(pa->contents); free(pa); return ENOMEM; } newlist[count] = pa; newlist[count + 1] = NULL; *list = newlist; return 0; } void kdc_get_ticket_endtime(kdc_realm_t *kdc_active_realm, krb5_timestamp starttime, krb5_timestamp endtime, krb5_timestamp till, krb5_db_entry *client, krb5_db_entry *server, krb5_timestamp *out_endtime) { krb5_timestamp until; krb5_deltat life; if (till == 0) till = kdc_infinity; until = ts_min(till, endtime); /* Determine the requested lifetime, capped at the maximum valid time * interval. */ life = ts_delta(until, starttime); if (ts_after(until, starttime) && life < 0) life = INT32_MAX; if (client != NULL && client->max_life != 0) life = min(life, client->max_life); if (server->max_life != 0) life = min(life, server->max_life); if (kdc_active_realm->realm_maxlife != 0) life = min(life, kdc_active_realm->realm_maxlife); *out_endtime = ts_incr(starttime, life); } /* * Set tkt->renew_till to the requested renewable lifetime as modified by * policy. Set the TKT_FLG_RENEWABLE flag if we set a nonzero renew_till. * client and tgt may be NULL. */ void kdc_get_ticket_renewtime(kdc_realm_t *realm, krb5_kdc_req *request, krb5_enc_tkt_part *tgt, krb5_db_entry *client, krb5_db_entry *server, krb5_enc_tkt_part *tkt) { krb5_timestamp rtime, max_rlife; clear(tkt->flags, TKT_FLG_RENEWABLE); tkt->times.renew_till = 0; /* Don't issue renewable tickets if the client or server don't allow it, * or if this is a TGS request and the TGT isn't renewable. */ if (server->attributes & KRB5_KDB_DISALLOW_RENEWABLE) return; if (client != NULL && (client->attributes & KRB5_KDB_DISALLOW_RENEWABLE)) return; if (tgt != NULL && !(tgt->flags & TKT_FLG_RENEWABLE)) return; /* Determine the requested renewable time. */ if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE)) rtime = request->rtime ? request->rtime : kdc_infinity; else if (isflagset(request->kdc_options, KDC_OPT_RENEWABLE_OK) && ts_after(request->till, tkt->times.endtime)) rtime = request->till; else return; /* Truncate it to the allowable renewable time. */ if (tgt != NULL) rtime = ts_min(rtime, tgt->times.renew_till); max_rlife = min(server->max_renewable_life, realm->realm_maxrlife); if (client != NULL) max_rlife = min(max_rlife, client->max_renewable_life); rtime = ts_min(rtime, ts_incr(tkt->times.starttime, max_rlife)); /* If the client only specified renewable-ok, don't issue a renewable * ticket unless the truncated renew time exceeds the ticket end time. */ if (!isflagset(request->kdc_options, KDC_OPT_RENEWABLE) && !ts_after(rtime, tkt->times.endtime)) return; setflag(tkt->flags, TKT_FLG_RENEWABLE); tkt->times.renew_till = rtime; } /** * Handle protected negotiation of FAST using enc_padata * - If ENCPADATA_REQ_ENC_PA_REP is present, then: * - Return ENCPADATA_REQ_ENC_PA_REP with checksum of AS-REQ from client * - Include PADATA_FX_FAST in the enc_padata to indicate FAST * @pre @c out_enc_padata has space for at least two more padata * @param index in/out index into @c out_enc_padata for next item */ krb5_error_code kdc_handle_protected_negotiation(krb5_context context, krb5_data *req_pkt, krb5_kdc_req *request, const krb5_keyblock *reply_key, krb5_pa_data ***out_enc_padata) { krb5_error_code retval = 0; krb5_checksum checksum; krb5_data *der_cksum = NULL; krb5_pa_data *pa, *pa_in; memset(&checksum, 0, sizeof(checksum)); pa_in = krb5int_find_pa_data(context, request->padata, KRB5_ENCPADATA_REQ_ENC_PA_REP); if (pa_in == NULL) return 0; /* Compute and encode a checksum over the AS-REQ. */ retval = krb5_c_make_checksum(context, 0, reply_key, KRB5_KEYUSAGE_AS_REQ, req_pkt, &checksum); if (retval != 0) goto cleanup; retval = encode_krb5_checksum(&checksum, &der_cksum); if (retval != 0) goto cleanup; /* Add a pa-data element to the list, stealing memory from der_cksum. */ retval = alloc_pa_data(KRB5_ENCPADATA_REQ_ENC_PA_REP, 0, &pa); if (retval) goto cleanup; pa->length = der_cksum->length; pa->contents = (uint8_t *)der_cksum->data; der_cksum->data = NULL; /* add_pa_data_element() claims pa on success or failure. */ retval = add_pa_data_element(out_enc_padata, pa); if (retval) goto cleanup; /* Add a zero-length PA-FX-FAST element to the list. */ retval = alloc_pa_data(KRB5_PADATA_FX_FAST, 0, &pa); if (retval) goto cleanup; /* add_pa_data_element() claims pa on success or failure. */ retval = add_pa_data_element(out_enc_padata, pa); cleanup: krb5_free_checksum_contents(context, &checksum); krb5_free_data(context, der_cksum); return retval; } /* * Although the KDC doesn't call this function directly, * process_tcp_connection_read() in net-server.c does call it. */ krb5_error_code make_toolong_error (void *handle, krb5_data **out) { krb5_error errpkt; krb5_error_code retval; krb5_data *scratch; struct server_handle *h = handle; retval = krb5_us_timeofday(h->kdc_err_context, &errpkt.stime, &errpkt.susec); if (retval) return retval; errpkt.error = KRB_ERR_FIELD_TOOLONG; errpkt.server = h->kdc_realmlist[0]->realm_tgsprinc; errpkt.client = NULL; errpkt.cusec = 0; errpkt.ctime = 0; errpkt.text.length = 0; errpkt.text.data = 0; errpkt.e_data.length = 0; errpkt.e_data.data = 0; scratch = malloc(sizeof(*scratch)); if (scratch == NULL) return ENOMEM; retval = krb5_mk_error(h->kdc_err_context, &errpkt, scratch); if (retval) { free(scratch); return retval; } *out = scratch; return 0; } void reset_for_hangup(void *ctx) { int k; struct server_handle *h = ctx; for (k = 0; k < h->kdc_numrealms; k++) krb5_db_refresh_config(h->kdc_realmlist[k]->realm_context); }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_487_0
crossvul-cpp_data_bad_2571_1
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/do_tgs_req.c - KDC Routines to deal with TGS_REQ's */ /* * Copyright 1990, 1991, 2001, 2007, 2008, 2009, 2013, 2014 by the * Massachusetts Institute of Technology. All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include <syslog.h> #ifdef HAVE_NETINET_IN_H #include <sys/types.h> #include <netinet/in.h> #ifndef hpux #include <arpa/inet.h> #endif #endif #include "kdc_util.h" #include "kdc_audit.h" #include "policy.h" #include "extern.h" #include "adm_proto.h" #include <ctype.h> static krb5_error_code find_alternate_tgs(kdc_realm_t *, krb5_principal, krb5_db_entry **, const char**); static krb5_error_code prepare_error_tgs(struct kdc_request_state *, krb5_kdc_req *,krb5_ticket *,int, krb5_principal,krb5_data **,const char *, krb5_pa_data **); static krb5_error_code decrypt_2ndtkt(kdc_realm_t *, krb5_kdc_req *, krb5_flags, krb5_db_entry **, const char **); static krb5_error_code gen_session_key(kdc_realm_t *, krb5_kdc_req *, krb5_db_entry *, krb5_keyblock *, const char **); static krb5_int32 find_referral_tgs(kdc_realm_t *, krb5_kdc_req *, krb5_principal *); static krb5_error_code db_get_svc_princ(krb5_context, krb5_principal, krb5_flags, krb5_db_entry **, const char **); static krb5_error_code search_sprinc(kdc_realm_t *, krb5_kdc_req *, krb5_flags, krb5_db_entry **, const char **); /*ARGSUSED*/ krb5_error_code process_tgs_req(struct server_handle *handle, krb5_data *pkt, const krb5_fulladdr *from, krb5_data **response) { krb5_keyblock * subkey = 0; krb5_keyblock *header_key = NULL; krb5_kdc_req *request = 0; krb5_db_entry *server = NULL; krb5_db_entry *stkt_server = NULL; krb5_kdc_rep reply; krb5_enc_kdc_rep_part reply_encpart; krb5_ticket ticket_reply, *header_ticket = 0; int st_idx = 0; krb5_enc_tkt_part enc_tkt_reply; int newtransited = 0; krb5_error_code retval = 0; krb5_keyblock encrypting_key; krb5_timestamp kdc_time, authtime = 0; krb5_keyblock session_key; krb5_keyblock *reply_key = NULL; krb5_key_data *server_key; krb5_principal cprinc = NULL, sprinc = NULL, altcprinc = NULL; krb5_last_req_entry *nolrarray[2], nolrentry; int errcode; const char *status = 0; krb5_enc_tkt_part *header_enc_tkt = NULL; /* TGT */ krb5_enc_tkt_part *subject_tkt = NULL; /* TGT or evidence ticket */ krb5_db_entry *client = NULL, *header_server = NULL; krb5_db_entry *local_tgt, *local_tgt_storage = NULL; krb5_pa_s4u_x509_user *s4u_x509_user = NULL; /* protocol transition request */ krb5_authdata **kdc_issued_auth_data = NULL; /* auth data issued by KDC */ unsigned int c_flags = 0, s_flags = 0; /* client/server KDB flags */ krb5_boolean is_referral; const char *emsg = NULL; krb5_kvno ticket_kvno = 0; struct kdc_request_state *state = NULL; krb5_pa_data *pa_tgs_req; /*points into request*/ krb5_data scratch; krb5_pa_data **e_data = NULL; kdc_realm_t *kdc_active_realm = NULL; krb5_audit_state *au_state = NULL; krb5_data **auth_indicators = NULL; memset(&reply, 0, sizeof(reply)); memset(&reply_encpart, 0, sizeof(reply_encpart)); memset(&ticket_reply, 0, sizeof(ticket_reply)); memset(&enc_tkt_reply, 0, sizeof(enc_tkt_reply)); session_key.contents = NULL; retval = decode_krb5_tgs_req(pkt, &request); if (retval) return retval; /* Save pointer to client-requested service principal, in case of * errors before a successful call to search_sprinc(). */ sprinc = request->server; if (request->msg_type != KRB5_TGS_REQ) { krb5_free_kdc_req(handle->kdc_err_context, request); return KRB5_BADMSGTYPE; } /* * setup_server_realm() sets up the global realm-specific data pointer. */ kdc_active_realm = setup_server_realm(handle, request->server); if (kdc_active_realm == NULL) { krb5_free_kdc_req(handle->kdc_err_context, request); return KRB5KDC_ERR_WRONG_REALM; } errcode = kdc_make_rstate(kdc_active_realm, &state); if (errcode !=0) { krb5_free_kdc_req(handle->kdc_err_context, request); return errcode; } /* Initialize audit state. */ errcode = kau_init_kdc_req(kdc_context, request, from, &au_state); if (errcode) { krb5_free_kdc_req(handle->kdc_err_context, request); return errcode; } /* Seed the audit trail with the request ID and basic information. */ kau_tgs_req(kdc_context, TRUE, au_state); errcode = kdc_process_tgs_req(kdc_active_realm, request, from, pkt, &header_ticket, &header_server, &header_key, &subkey, &pa_tgs_req); if (header_ticket && header_ticket->enc_part2) cprinc = header_ticket->enc_part2->client; if (errcode) { status = "PROCESS_TGS"; goto cleanup; } if (!header_ticket) { errcode = KRB5_NO_TKT_SUPPLIED; /* XXX? */ status="UNEXPECTED NULL in header_ticket"; goto cleanup; } errcode = kau_make_tkt_id(kdc_context, header_ticket, &au_state->tkt_in_id); if (errcode) { status = "GENERATE_TICKET_ID"; goto cleanup; } scratch.length = pa_tgs_req->length; scratch.data = (char *) pa_tgs_req->contents; errcode = kdc_find_fast(&request, &scratch, subkey, header_ticket->enc_part2->session, state, NULL); /* Reset sprinc because kdc_find_fast() can replace request. */ sprinc = request->server; if (errcode !=0) { status = "FIND_FAST"; goto cleanup; } errcode = get_local_tgt(kdc_context, &sprinc->realm, header_server, &local_tgt, &local_tgt_storage); if (errcode) { status = "GET_LOCAL_TGT"; goto cleanup; } /* Ignore (for now) the request modification due to FAST processing. */ au_state->request = request; /* * Pointer to the encrypted part of the header ticket, which may be * replaced to point to the encrypted part of the evidence ticket * if constrained delegation is used. This simplifies the number of * special cases for constrained delegation. */ header_enc_tkt = header_ticket->enc_part2; /* * We've already dealt with the AP_REQ authentication, so we can * use header_ticket freely. The encrypted part (if any) has been * decrypted with the session key. */ au_state->stage = SRVC_PRINC; /* XXX make sure server here has the proper realm...taken from AP_REQ header? */ setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK); if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE)) { setflag(c_flags, KRB5_KDB_FLAG_CANONICALIZE); setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE); } errcode = search_sprinc(kdc_active_realm, request, s_flags, &server, &status); if (errcode != 0) goto cleanup; sprinc = server->princ; /* If we got a cross-realm TGS which is not the requested server, we are * issuing a referral (or alternate TGT, which we treat similarly). */ is_referral = is_cross_tgs_principal(server->princ) && !krb5_principal_compare(kdc_context, request->server, server->princ); au_state->stage = VALIDATE_POL; if ((errcode = krb5_timeofday(kdc_context, &kdc_time))) { status = "TIME_OF_DAY"; goto cleanup; } if ((retval = validate_tgs_request(kdc_active_realm, request, *server, header_ticket, kdc_time, &status, &e_data))) { if (!status) status = "UNKNOWN_REASON"; if (retval == KDC_ERR_POLICY || retval == KDC_ERR_BADOPTION) au_state->violation = PROT_CONSTRAINT; errcode = retval + ERROR_TABLE_BASE_krb5; goto cleanup; } if (!is_local_principal(kdc_active_realm, header_enc_tkt->client)) setflag(c_flags, KRB5_KDB_FLAG_CROSS_REALM); /* Check for protocol transition */ errcode = kdc_process_s4u2self_req(kdc_active_realm, request, header_enc_tkt->client, server, subkey, header_enc_tkt->session, kdc_time, &s4u_x509_user, &client, &status); if (s4u_x509_user != NULL || errcode != 0) { if (s4u_x509_user != NULL) au_state->s4u2self_user = s4u_x509_user->user_id.user; if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION) au_state->violation = PROT_CONSTRAINT; au_state->status = status; kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state); au_state->s4u2self_user = NULL; } if (errcode) goto cleanup; if (s4u_x509_user != NULL) { setflag(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION); if (is_referral) { /* The requesting server appears to no longer exist, and we found * a referral instead. Treat this as a server lookup failure. */ errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; status = "LOOKING_UP_SERVER"; goto cleanup; } } /* Deal with user-to-user and constrained delegation */ errcode = decrypt_2ndtkt(kdc_active_realm, request, c_flags, &stkt_server, &status); if (errcode) goto cleanup; if (isflagset(request->kdc_options, KDC_OPT_CNAME_IN_ADDL_TKT)) { /* Do constrained delegation protocol and authorization checks */ errcode = kdc_process_s4u2proxy_req(kdc_active_realm, request, request->second_ticket[st_idx]->enc_part2, stkt_server, header_ticket->enc_part2->client, request->server, &status); if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION) au_state->violation = PROT_CONSTRAINT; else if (errcode) au_state->violation = LOCAL_POLICY; au_state->status = status; retval = kau_make_tkt_id(kdc_context, request->second_ticket[st_idx], &au_state->evid_tkt_id); if (retval) { status = "GENERATE_TICKET_ID"; errcode = retval; goto cleanup; } kau_s4u2proxy(kdc_context, errcode ? FALSE : TRUE, au_state); if (errcode) goto cleanup; setflag(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION); assert(krb5_is_tgs_principal(header_ticket->server)); assert(client == NULL); /* assured by kdc_process_s4u2self_req() */ client = stkt_server; stkt_server = NULL; } else if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) { krb5_db_free_principal(kdc_context, stkt_server); stkt_server = NULL; } else assert(stkt_server == NULL); au_state->stage = ISSUE_TKT; errcode = gen_session_key(kdc_active_realm, request, server, &session_key, &status); if (errcode) goto cleanup; /* * subject_tkt will refer to the evidence ticket (for constrained * delegation) or the TGT. The distinction from header_enc_tkt is * necessary because the TGS signature only protects some fields: * the others could be forged by a malicious server. */ if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) subject_tkt = request->second_ticket[st_idx]->enc_part2; else subject_tkt = header_enc_tkt; authtime = subject_tkt->times.authtime; /* Extract auth indicators from the subject ticket, except for S4U2Proxy * requests (where the client didn't authenticate). */ if (s4u_x509_user == NULL) { errcode = get_auth_indicators(kdc_context, subject_tkt, local_tgt, &auth_indicators); if (errcode) { status = "GET_AUTH_INDICATORS"; goto cleanup; } } errcode = check_indicators(kdc_context, server, auth_indicators); if (errcode) { status = "HIGHER_AUTHENTICATION_REQUIRED"; goto cleanup; } if (is_referral) ticket_reply.server = server->princ; else ticket_reply.server = request->server; /* XXX careful for realm... */ enc_tkt_reply.flags = OPTS2FLAGS(request->kdc_options); enc_tkt_reply.flags |= COPY_TKT_FLAGS(header_enc_tkt->flags); enc_tkt_reply.times.starttime = 0; if (isflagset(server->attributes, KRB5_KDB_OK_AS_DELEGATE)) setflag(enc_tkt_reply.flags, TKT_FLG_OK_AS_DELEGATE); /* Indicate support for encrypted padata (RFC 6806). */ setflag(enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP); /* don't use new addresses unless forwarded, see below */ enc_tkt_reply.caddrs = header_enc_tkt->caddrs; /* noaddrarray[0] = 0; */ reply_encpart.caddrs = 0;/* optional...don't put it in */ reply_encpart.enc_padata = NULL; /* * It should be noted that local policy may affect the * processing of any of these flags. For example, some * realms may refuse to issue renewable tickets */ if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE)) { if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) { /* * If S4U2Self principal is not forwardable, then mark ticket as * unforwardable. This behaviour matches Windows, but it is * different to the MIT AS-REQ path, which returns an error * (KDC_ERR_POLICY) if forwardable tickets cannot be issued. * * Consider this block the S4U2Self equivalent to * validate_forwardable(). */ if (client != NULL && isflagset(client->attributes, KRB5_KDB_DISALLOW_FORWARDABLE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); /* * Forwardable flag is propagated along referral path. */ else if (!isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDABLE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); /* * OK_TO_AUTH_AS_DELEGATE must be set on the service requesting * S4U2Self in order for forwardable tickets to be returned. */ else if (!is_referral && !isflagset(server->attributes, KRB5_KDB_OK_TO_AUTH_AS_DELEGATE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); } } if (isflagset(request->kdc_options, KDC_OPT_FORWARDED) || isflagset(request->kdc_options, KDC_OPT_PROXY)) { /* include new addresses in ticket & reply */ enc_tkt_reply.caddrs = request->addresses; reply_encpart.caddrs = request->addresses; } /* We don't currently handle issuing anonymous tickets based on * non-anonymous ones, so just ignore the option. */ if (isflagset(request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS) && !isflagset(header_enc_tkt->flags, TKT_FLG_ANONYMOUS)) clear(enc_tkt_reply.flags, TKT_FLG_ANONYMOUS); if (isflagset(request->kdc_options, KDC_OPT_POSTDATED)) { setflag(enc_tkt_reply.flags, TKT_FLG_INVALID); enc_tkt_reply.times.starttime = request->from; } else enc_tkt_reply.times.starttime = kdc_time; if (isflagset(request->kdc_options, KDC_OPT_VALIDATE)) { assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0); /* BEWARE of allocation hanging off of ticket & enc_part2, it belongs to the caller */ ticket_reply = *(header_ticket); enc_tkt_reply = *(header_ticket->enc_part2); enc_tkt_reply.authorization_data = NULL; clear(enc_tkt_reply.flags, TKT_FLG_INVALID); } if (isflagset(request->kdc_options, KDC_OPT_RENEW)) { krb5_timestamp old_starttime; krb5_deltat old_life; assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0); /* BEWARE of allocation hanging off of ticket & enc_part2, it belongs to the caller */ ticket_reply = *(header_ticket); enc_tkt_reply = *(header_ticket->enc_part2); enc_tkt_reply.authorization_data = NULL; old_starttime = enc_tkt_reply.times.starttime ? enc_tkt_reply.times.starttime : enc_tkt_reply.times.authtime; old_life = ts_delta(enc_tkt_reply.times.endtime, old_starttime); enc_tkt_reply.times.starttime = kdc_time; enc_tkt_reply.times.endtime = ts_min(header_ticket->enc_part2->times.renew_till, ts_incr(kdc_time, old_life)); } else { /* not a renew request */ enc_tkt_reply.times.starttime = kdc_time; kdc_get_ticket_endtime(kdc_active_realm, enc_tkt_reply.times.starttime, header_enc_tkt->times.endtime, request->till, client, server, &enc_tkt_reply.times.endtime); } kdc_get_ticket_renewtime(kdc_active_realm, request, header_enc_tkt, client, server, &enc_tkt_reply); /* * Set authtime to be the same as header or evidence ticket's */ enc_tkt_reply.times.authtime = authtime; /* starttime is optional, and treated as authtime if not present. so we can nuke it if it matches */ if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime) enc_tkt_reply.times.starttime = 0; if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) { altcprinc = s4u_x509_user->user_id.user; } else if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) { altcprinc = subject_tkt->client; } else { altcprinc = NULL; } if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) { krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2; encrypting_key = *(t2enc->session); } else { /* * Find the server key */ if ((errcode = krb5_dbe_find_enctype(kdc_context, server, -1, /* ignore keytype */ -1, /* Ignore salttype */ 0, /* Get highest kvno */ &server_key))) { status = "FINDING_SERVER_KEY"; goto cleanup; } /* * Convert server.key into a real key * (it may be encrypted in the database) */ if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL, server_key, &encrypting_key, NULL))) { status = "DECRYPT_SERVER_KEY"; goto cleanup; } } if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) { /* * Don't allow authorization data to be disabled if constrained * delegation is requested. We don't want to deny the server * the ability to validate that delegation was used. */ clear(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED); } if (isflagset(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED) == 0) { /* * If we are not doing protocol transition/constrained delegation * try to lookup the client principal so plugins can add additional * authorization information. * * Always validate authorization data for constrained delegation * because we must validate the KDC signatures. */ if (!isflagset(c_flags, KRB5_KDB_FLAGS_S4U)) { /* Generate authorization data so we can include it in ticket */ setflag(c_flags, KRB5_KDB_FLAG_INCLUDE_PAC); /* Map principals from foreign (possibly non-AD) realms */ setflag(c_flags, KRB5_KDB_FLAG_MAP_PRINCIPALS); assert(client == NULL); /* should not have been set already */ errcode = krb5_db_get_principal(kdc_context, subject_tkt->client, c_flags, &client); } } if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) && !isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) enc_tkt_reply.client = s4u_x509_user->user_id.user; else enc_tkt_reply.client = subject_tkt->client; enc_tkt_reply.session = &session_key; enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; enc_tkt_reply.transited.tr_contents = empty_string; /* equivalent of "" */ /* * Only add the realm of the presented tgt to the transited list if * it is different than the local realm (cross-realm) and it is different * than the realm of the client (since the realm of the client is already * implicitly part of the transited list and should not be explicitly * listed). */ /* realm compare is like strcmp, but knows how to deal with these args */ if (krb5_realm_compare(kdc_context, header_ticket->server, tgs_server) || krb5_realm_compare(kdc_context, header_ticket->server, enc_tkt_reply.client)) { /* tgt issued by local realm or issued by realm of client */ enc_tkt_reply.transited = header_enc_tkt->transited; } else { /* tgt issued by some other realm and not the realm of the client */ /* assemble new transited field into allocated storage */ if (header_enc_tkt->transited.tr_type != KRB5_DOMAIN_X500_COMPRESS) { status = "VALIDATE_TRANSIT_TYPE"; errcode = KRB5KDC_ERR_TRTYPE_NOSUPP; goto cleanup; } memset(&enc_tkt_reply.transited, 0, sizeof(enc_tkt_reply.transited)); enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; if ((errcode = add_to_transited(&header_enc_tkt->transited.tr_contents, &enc_tkt_reply.transited.tr_contents, header_ticket->server, enc_tkt_reply.client, request->server))) { status = "ADD_TO_TRANSITED_LIST"; goto cleanup; } newtransited = 1; } if (isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) { errcode = validate_transit_path(kdc_context, header_enc_tkt->client, server, header_server); if (errcode) { status = "NON_TRANSITIVE"; goto cleanup; } } if (!isflagset (request->kdc_options, KDC_OPT_DISABLE_TRANSITED_CHECK)) { errcode = kdc_check_transited_list (kdc_active_realm, &enc_tkt_reply.transited.tr_contents, krb5_princ_realm (kdc_context, header_enc_tkt->client), krb5_princ_realm (kdc_context, request->server)); if (errcode == 0) { setflag (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED); } else { log_tgs_badtrans(kdc_context, cprinc, sprinc, &enc_tkt_reply.transited.tr_contents, errcode); } } else krb5_klog_syslog(LOG_INFO, _("not checking transit path")); if (kdc_active_realm->realm_reject_bad_transit && !isflagset(enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED)) { errcode = KRB5KDC_ERR_POLICY; status = "BAD_TRANSIT"; au_state->violation = LOCAL_POLICY; goto cleanup; } errcode = handle_authdata(kdc_context, c_flags, client, server, header_server, local_tgt, subkey != NULL ? subkey : header_ticket->enc_part2->session, &encrypting_key, /* U2U or server key */ header_key, pkt, request, s4u_x509_user ? s4u_x509_user->user_id.user : NULL, subject_tkt, auth_indicators, &enc_tkt_reply); if (errcode) { krb5_klog_syslog(LOG_INFO, _("TGS_REQ : handle_authdata (%d)"), errcode); status = "HANDLE_AUTHDATA"; goto cleanup; } ticket_reply.enc_part2 = &enc_tkt_reply; /* * If we are doing user-to-user authentication, then make sure * that the client for the second ticket matches the request * server, and then encrypt the ticket using the session key of * the second ticket. */ if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) { /* * Make sure the client for the second ticket matches * requested server. */ krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2; krb5_principal client2 = t2enc->client; if (!krb5_principal_compare(kdc_context, request->server, client2)) { altcprinc = client2; errcode = KRB5KDC_ERR_SERVER_NOMATCH; status = "2ND_TKT_MISMATCH"; au_state->status = status; kau_u2u(kdc_context, FALSE, au_state); goto cleanup; } ticket_kvno = 0; ticket_reply.enc_part.enctype = t2enc->session->enctype; kau_u2u(kdc_context, TRUE, au_state); st_idx++; } else { ticket_kvno = server_key->key_data_kvno; } errcode = krb5_encrypt_tkt_part(kdc_context, &encrypting_key, &ticket_reply); if (!isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) krb5_free_keyblock_contents(kdc_context, &encrypting_key); if (errcode) { status = "ENCRYPT_TICKET"; goto cleanup; } ticket_reply.enc_part.kvno = ticket_kvno; /* Start assembling the response */ au_state->stage = ENCR_REP; reply.msg_type = KRB5_TGS_REP; if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) && krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_S4U_X509_USER) != NULL) { errcode = kdc_make_s4u2self_rep(kdc_context, subkey, header_ticket->enc_part2->session, s4u_x509_user, &reply, &reply_encpart); if (errcode) { status = "MAKE_S4U2SELF_PADATA"; au_state->status = status; } kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state); if (errcode) goto cleanup; } reply.client = enc_tkt_reply.client; reply.enc_part.kvno = 0;/* We are using the session key */ reply.ticket = &ticket_reply; reply_encpart.session = &session_key; reply_encpart.nonce = request->nonce; /* copy the time fields */ reply_encpart.times = enc_tkt_reply.times; nolrentry.lr_type = KRB5_LRQ_NONE; nolrentry.value = 0; nolrentry.magic = 0; nolrarray[0] = &nolrentry; nolrarray[1] = 0; reply_encpart.last_req = nolrarray; /* not available for TGS reqs */ reply_encpart.key_exp = 0;/* ditto */ reply_encpart.flags = enc_tkt_reply.flags; reply_encpart.server = ticket_reply.server; /* use the session key in the ticket, unless there's a subsession key in the AP_REQ */ reply.enc_part.enctype = subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype; errcode = kdc_fast_response_handle_padata(state, request, &reply, subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype); if (errcode !=0 ) { status = "MAKE_FAST_RESPONSE"; goto cleanup; } errcode =kdc_fast_handle_reply_key(state, subkey?subkey:header_ticket->enc_part2->session, &reply_key); if (errcode) { status = "MAKE_FAST_REPLY_KEY"; goto cleanup; } errcode = return_enc_padata(kdc_context, pkt, request, reply_key, server, &reply_encpart, is_referral && isflagset(s_flags, KRB5_KDB_FLAG_CANONICALIZE)); if (errcode) { status = "KDC_RETURN_ENC_PADATA"; goto cleanup; } errcode = kau_make_tkt_id(kdc_context, &ticket_reply, &au_state->tkt_out_id); if (errcode) { status = "GENERATE_TICKET_ID"; goto cleanup; } if (kdc_fast_hide_client(state)) reply.client = (krb5_principal)krb5_anonymous_principal(); errcode = krb5_encode_kdc_rep(kdc_context, KRB5_TGS_REP, &reply_encpart, subkey ? 1 : 0, reply_key, &reply, response); if (errcode) { status = "ENCODE_KDC_REP"; } else { status = "ISSUE"; } memset(ticket_reply.enc_part.ciphertext.data, 0, ticket_reply.enc_part.ciphertext.length); free(ticket_reply.enc_part.ciphertext.data); /* these parts are left on as a courtesy from krb5_encode_kdc_rep so we can use them in raw form if needed. But, we don't... */ memset(reply.enc_part.ciphertext.data, 0, reply.enc_part.ciphertext.length); free(reply.enc_part.ciphertext.data); cleanup: assert(status != NULL); if (reply_key) krb5_free_keyblock(kdc_context, reply_key); if (errcode) emsg = krb5_get_error_message (kdc_context, errcode); au_state->status = status; if (!errcode) au_state->reply = &reply; kau_tgs_req(kdc_context, errcode ? FALSE : TRUE, au_state); kau_free_kdc_req(au_state); log_tgs_req(kdc_context, from, request, &reply, cprinc, sprinc, altcprinc, authtime, c_flags, status, errcode, emsg); if (errcode) { krb5_free_error_message (kdc_context, emsg); emsg = NULL; } if (errcode) { int got_err = 0; if (status == 0) { status = krb5_get_error_message (kdc_context, errcode); got_err = 1; } errcode -= ERROR_TABLE_BASE_krb5; if (errcode < 0 || errcode > KRB_ERR_MAX) errcode = KRB_ERR_GENERIC; retval = prepare_error_tgs(state, request, header_ticket, errcode, (server != NULL) ? server->princ : NULL, response, status, e_data); if (got_err) { krb5_free_error_message (kdc_context, status); status = 0; } } if (header_ticket != NULL) krb5_free_ticket(kdc_context, header_ticket); if (request != NULL) krb5_free_kdc_req(kdc_context, request); if (state) kdc_free_rstate(state); krb5_db_free_principal(kdc_context, server); krb5_db_free_principal(kdc_context, stkt_server); krb5_db_free_principal(kdc_context, header_server); krb5_db_free_principal(kdc_context, client); krb5_db_free_principal(kdc_context, local_tgt_storage); if (session_key.contents != NULL) krb5_free_keyblock_contents(kdc_context, &session_key); if (newtransited) free(enc_tkt_reply.transited.tr_contents.data); if (s4u_x509_user != NULL) krb5_free_pa_s4u_x509_user(kdc_context, s4u_x509_user); if (kdc_issued_auth_data != NULL) krb5_free_authdata(kdc_context, kdc_issued_auth_data); if (subkey != NULL) krb5_free_keyblock(kdc_context, subkey); if (header_key != NULL) krb5_free_keyblock(kdc_context, header_key); if (reply.padata) krb5_free_pa_data(kdc_context, reply.padata); if (reply_encpart.enc_padata) krb5_free_pa_data(kdc_context, reply_encpart.enc_padata); if (enc_tkt_reply.authorization_data != NULL) krb5_free_authdata(kdc_context, enc_tkt_reply.authorization_data); krb5_free_pa_data(kdc_context, e_data); k5_free_data_ptr_list(auth_indicators); return retval; } static krb5_error_code prepare_error_tgs (struct kdc_request_state *state, krb5_kdc_req *request, krb5_ticket *ticket, int error, krb5_principal canon_server, krb5_data **response, const char *status, krb5_pa_data **e_data) { krb5_error errpkt; krb5_error_code retval = 0; krb5_data *scratch, *e_data_asn1 = NULL, *fast_edata = NULL; kdc_realm_t *kdc_active_realm = state->realm_data; errpkt.magic = KV5M_ERROR; errpkt.ctime = request->nonce; errpkt.cusec = 0; if ((retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec))) return(retval); errpkt.error = error; errpkt.server = request->server; if (ticket && ticket->enc_part2) errpkt.client = ticket->enc_part2->client; else errpkt.client = NULL; errpkt.text.length = strlen(status); if (!(errpkt.text.data = strdup(status))) return ENOMEM; if (!(scratch = (krb5_data *)malloc(sizeof(*scratch)))) { free(errpkt.text.data); return ENOMEM; } if (e_data != NULL) { retval = encode_krb5_padata_sequence(e_data, &e_data_asn1); if (retval) { free(scratch); free(errpkt.text.data); return retval; } errpkt.e_data = *e_data_asn1; } else errpkt.e_data = empty_data(); retval = kdc_fast_handle_error(kdc_context, state, request, e_data, &errpkt, &fast_edata); if (retval) { free(scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); return retval; } if (fast_edata) errpkt.e_data = *fast_edata; if (kdc_fast_hide_client(state) && errpkt.client != NULL) errpkt.client = (krb5_principal)krb5_anonymous_principal(); retval = krb5_mk_error(kdc_context, &errpkt, scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); krb5_free_data(kdc_context, fast_edata); if (retval) free(scratch); else *response = scratch; return retval; } /* KDC options that require a second ticket */ #define STKT_OPTIONS (KDC_OPT_CNAME_IN_ADDL_TKT | KDC_OPT_ENC_TKT_IN_SKEY) /* * Get the key for the second ticket, if any, and decrypt it. */ static krb5_error_code decrypt_2ndtkt(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_flags flags, krb5_db_entry **server_out, const char **status) { krb5_error_code retval; krb5_db_entry *server = NULL; krb5_keyblock *key; krb5_kvno kvno; krb5_ticket *stkt; if (!(req->kdc_options & STKT_OPTIONS)) return 0; stkt = req->second_ticket[0]; retval = kdc_get_server_key(kdc_context, stkt, flags, TRUE, /* match_enctype */ &server, &key, &kvno); if (retval != 0) { *status = "2ND_TKT_SERVER"; goto cleanup; } retval = krb5_decrypt_tkt_part(kdc_context, key, req->second_ticket[0]); krb5_free_keyblock(kdc_context, key); if (retval != 0) { *status = "2ND_TKT_DECRYPT"; goto cleanup; } *server_out = server; server = NULL; cleanup: krb5_db_free_principal(kdc_context, server); return retval; } static krb5_error_code get_2ndtkt_enctype(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_enctype *useenctype, const char **status) { krb5_enctype etype; krb5_ticket *stkt = req->second_ticket[0]; int i; etype = stkt->enc_part2->session->enctype; if (!krb5_c_valid_enctype(etype)) { *status = "BAD_ETYPE_IN_2ND_TKT"; return KRB5KDC_ERR_ETYPE_NOSUPP; } for (i = 0; i < req->nktypes; i++) { if (req->ktype[i] == etype) { *useenctype = etype; break; } } return 0; } static krb5_error_code gen_session_key(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_db_entry *server, krb5_keyblock *skey, const char **status) { krb5_error_code retval; krb5_enctype useenctype = 0; /* * Some special care needs to be taken in the user-to-user * case, since we don't know what keytypes the application server * which is doing user-to-user authentication can support. We * know that it at least must be able to support the encryption * type of the session key in the TGT, since otherwise it won't be * able to decrypt the U2U ticket! So we use that in preference * to anything else. */ if (req->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) { retval = get_2ndtkt_enctype(kdc_active_realm, req, &useenctype, status); if (retval != 0) goto cleanup; } if (useenctype == 0) { useenctype = select_session_keytype(kdc_active_realm, server, req->nktypes, req->ktype); } if (useenctype == 0) { /* unsupported ktype */ *status = "BAD_ENCRYPTION_TYPE"; retval = KRB5KDC_ERR_ETYPE_NOSUPP; goto cleanup; } retval = krb5_c_make_random_key(kdc_context, useenctype, skey); if (retval != 0) { /* random key failed */ *status = "MAKE_RANDOM_KEY"; goto cleanup; } cleanup: return retval; } /* * The request seems to be for a ticket-granting service somewhere else, * but we don't have a ticket for the final TGS. Try to give the requestor * some intermediate realm. */ static krb5_error_code find_alternate_tgs(kdc_realm_t *kdc_active_realm, krb5_principal princ, krb5_db_entry **server_ptr, const char **status) { krb5_error_code retval; krb5_principal *plist = NULL, *pl2; krb5_data tmp; krb5_db_entry *server = NULL; *server_ptr = NULL; assert(is_cross_tgs_principal(princ)); if ((retval = krb5_walk_realm_tree(kdc_context, krb5_princ_realm(kdc_context, princ), krb5_princ_component(kdc_context, princ, 1), &plist, KRB5_REALM_BRANCH_CHAR))) { goto cleanup; } /* move to the end */ for (pl2 = plist; *pl2; pl2++); /* the first entry in this array is for krbtgt/local@local, so we ignore it */ while (--pl2 > plist) { tmp = *krb5_princ_realm(kdc_context, *pl2); krb5_princ_set_realm(kdc_context, *pl2, krb5_princ_realm(kdc_context, princ)); retval = db_get_svc_princ(kdc_context, *pl2, 0, &server, status); krb5_princ_set_realm(kdc_context, *pl2, &tmp); if (retval == KRB5_KDB_NOENTRY) continue; else if (retval) goto cleanup; log_tgs_alt_tgt(kdc_context, server->princ); *server_ptr = server; server = NULL; goto cleanup; } cleanup: if (retval == 0 && *server_ptr == NULL) retval = KRB5_KDB_NOENTRY; if (retval != 0) *status = "UNKNOWN_SERVER"; krb5_free_realm_tree(kdc_context, plist); krb5_db_free_principal(kdc_context, server); return retval; } /* Return true if item is an element of the space/comma-separated list. */ static krb5_boolean in_list(const char *list, const char *item) { const char *p; int len = strlen(item); if (list == NULL) return FALSE; for (p = strstr(list, item); p != NULL; p = strstr(p + 1, item)) { if ((p == list || isspace((unsigned char)p[-1]) || p[-1] == ',') && (p[len] == '\0' || isspace((unsigned char)p[len]) || p[len] == ',')) return TRUE; } return FALSE; } /* * Check whether the request satisfies the conditions for generating a referral * TGT. The caller checks whether the hostname component looks like a FQDN. */ static krb5_boolean is_referral_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request) { krb5_boolean ret = FALSE; char *stype = NULL; char *hostbased = kdc_active_realm->realm_hostbased; char *no_referral = kdc_active_realm->realm_no_referral; if (!(request->kdc_options & KDC_OPT_CANONICALIZE)) return FALSE; if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) return FALSE; if (krb5_princ_size(kdc_context, request->server) != 2) return FALSE; stype = data2string(krb5_princ_component(kdc_context, request->server, 0)); if (stype == NULL) return FALSE; switch (krb5_princ_type(kdc_context, request->server)) { case KRB5_NT_UNKNOWN: /* Allow referrals for NT-UNKNOWN principals, if configured. */ if (!in_list(hostbased, stype) && !in_list(hostbased, "*")) goto cleanup; /* FALLTHROUGH */ case KRB5_NT_SRV_HST: case KRB5_NT_SRV_INST: /* Deny referrals for specific service types, if configured. */ if (in_list(no_referral, stype) || in_list(no_referral, "*")) goto cleanup; ret = TRUE; break; default: goto cleanup; } cleanup: free(stype); return ret; } /* * Find a remote realm TGS principal for an unknown host-based service * principal. */ static krb5_int32 find_referral_tgs(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_principal *krbtgt_princ) { krb5_error_code retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; char **realms = NULL, *hostname = NULL; krb5_data srealm = request->server->realm; if (!is_referral_req(kdc_active_realm, request)) goto cleanup; hostname = data2string(krb5_princ_component(kdc_context, request->server, 1)); if (hostname == NULL) { retval = ENOMEM; goto cleanup; } /* If the hostname doesn't contain a '.', it's not a FQDN. */ if (strchr(hostname, '.') == NULL) goto cleanup; retval = krb5_get_host_realm(kdc_context, hostname, &realms); if (retval) { /* no match found */ kdc_err(kdc_context, retval, "unable to find realm of host"); goto cleanup; } /* Don't return a referral to the empty realm or the service realm. */ if (realms == NULL || realms[0] == NULL || *realms[0] == '\0' || data_eq_string(srealm, realms[0])) { retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto cleanup; } retval = krb5_build_principal(kdc_context, krbtgt_princ, srealm.length, srealm.data, "krbtgt", realms[0], (char *)0); cleanup: krb5_free_host_realm(kdc_context, realms); free(hostname); return retval; } static krb5_error_code db_get_svc_princ(krb5_context ctx, krb5_principal princ, krb5_flags flags, krb5_db_entry **server, const char **status) { krb5_error_code ret; ret = krb5_db_get_principal(ctx, princ, flags, server); if (ret == KRB5_KDB_CANTLOCK_DB) ret = KRB5KDC_ERR_SVC_UNAVAILABLE; if (ret != 0) { *status = "LOOKING_UP_SERVER"; } return ret; } static krb5_error_code search_sprinc(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_flags flags, krb5_db_entry **server, const char **status) { krb5_error_code ret; krb5_principal princ = req->server; krb5_principal reftgs = NULL; krb5_boolean allow_referral; /* Do not allow referrals for u2u or ticket modification requests, because * the server is supposed to match an already-issued ticket. */ allow_referral = !(req->kdc_options & NO_REFERRAL_OPTION); if (!allow_referral) flags &= ~KRB5_KDB_FLAG_CANONICALIZE; ret = db_get_svc_princ(kdc_context, princ, flags, server, status); if (ret == 0 || ret != KRB5_KDB_NOENTRY || !allow_referral) goto cleanup; if (!is_cross_tgs_principal(req->server)) { ret = find_referral_tgs(kdc_active_realm, req, &reftgs); if (ret != 0) goto cleanup; ret = db_get_svc_princ(kdc_context, reftgs, flags, server, status); if (ret == 0 || ret != KRB5_KDB_NOENTRY) goto cleanup; princ = reftgs; } ret = find_alternate_tgs(kdc_active_realm, princ, server, status); cleanup: if (ret != 0 && ret != KRB5KDC_ERR_SVC_UNAVAILABLE) { ret = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; if (*status == NULL) *status = "LOOKING_UP_SERVER"; } krb5_free_principal(kdc_context, reftgs); return ret; }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2571_1
crossvul-cpp_data_good_1771_2
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_EDP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <fnmatch.h> static int seq = 0; int edp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = EDP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_EXTREME; struct lldpd_chassis *chassis; int length, i, v; u_int8_t *packet, *pos, *pos_llc, *pos_len_eh, *pos_len_edp, *pos_edp, *tlv, *end; u_int16_t checksum; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; unsigned int state = 0; #endif u_int8_t edp_fakeversion[] = {7, 6, 4, 99}; /* Subsequent XXX can be replaced by other values. We place them here to ensure the position of "" to be a bit invariant with version changes. */ char *deviceslot[] = { "eth", "veth", "XXX", "XXX", "XXX", "XXX", "XXX", "XXX", "", NULL }; log_debug("edp", "send EDP frame on port %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; #ifdef ENABLE_DOT1 while (state != 2) { #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; v = 0; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && /* We need to save our current position to compute ethernet len */ /* SSAP and DSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_EDP))) goto toobig; /* EDP header */ if ((chassis->c_id_len != ETHER_ADDR_LEN) || (chassis->c_id_subtype != LLDP_CHASSISID_SUBTYPE_LLADDR)) { log_warnx("edp", "local chassis does not use MAC address as chassis ID!?"); free(packet); return EINVAL; } if (!( POKE_SAVE(pos_edp) && /* Save the start of EDP frame */ POKE_UINT8(1) && POKE_UINT8(0) && POKE_SAVE(pos_len_edp) && /* We compute the len and the checksum later */ POKE_UINT32(0) && /* Len + Checksum */ POKE_UINT16(seq) && POKE_UINT16(0) && POKE_BYTES(chassis->c_id, ETHER_ADDR_LEN))) goto toobig; seq++; #ifdef ENABLE_DOT1 switch (state) { case 0: #endif /* Display TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_DISPLAY) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_UINT8(0) && /* Add a NULL character for better compatibility */ POKE_END_EDP_TLV)) goto toobig; /* Info TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_INFO))) goto toobig; /* We try to emulate the slot thing */ for (i=0; deviceslot[i] != NULL; i++) { if (strncmp(hardware->h_ifname, deviceslot[i], strlen(deviceslot[i])) == 0) { if (!( POKE_UINT16(i) && POKE_UINT16(atoi(hardware->h_ifname + strlen(deviceslot[i]))))) goto toobig; break; } } /* If we don't find a "slot", we say that the interface is in slot 8 */ if (deviceslot[i] == NULL) { if (!( POKE_UINT16(8) && POKE_UINT16(hardware->h_ifindex))) goto toobig; } if (!( POKE_UINT16(0) && /* vchassis */ POKE_UINT32(0) && POKE_UINT16(0) && /* Reserved */ /* Version */ POKE_BYTES(edp_fakeversion, sizeof(edp_fakeversion)) && /* Connections, we say that we won't have more interfaces than this mask. */ POKE_UINT32(0xffffffff) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_END_EDP_TLV)) goto toobig; #ifdef ENABLE_DOT1 break; case 1: TAILQ_FOREACH(vlan, &hardware->h_lport.p_vlans, v_entries) { v++; if (!( POKE_START_EDP_TLV(EDP_TLV_VLAN) && POKE_UINT8(0) && /* Flags: no IP address */ POKE_UINT8(0) && /* Reserved */ POKE_UINT16(vlan->v_vid) && POKE_UINT32(0) && /* Reserved */ POKE_UINT32(0) && /* IP address */ /* VLAN name */ POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_UINT8(0) && POKE_END_EDP_TLV)) goto toobig; } break; } if ((state == 1) && (v == 0)) { /* No VLAN, no need to send another TLV */ free(packet); break; } #endif /* Null TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_NULL) && POKE_END_EDP_TLV && POKE_SAVE(end))) goto toobig; /* Compute len and checksum */ i = end - pos_llc; /* Ethernet length */ v = end - pos_edp; /* EDP length */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(i))) goto toobig; POKE_RESTORE(pos_len_edp); if (!(POKE_UINT16(v))) goto toobig; checksum = frame_checksum(pos_edp, v, 0); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("edp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); #ifdef ENABLE_DOT1 state++; } #endif hardware->h_tx_cnt++; return 0; toobig: free(packet); return E2BIG; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("edp", name " EDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int edp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; #ifdef ENABLE_DOT1 struct lldpd_mgmt *mgmt, *mgmt_next, *m; struct lldpd_vlan *lvlan = NULL, *lvlan_next; #endif const unsigned char edpaddr[] = EDP_MULTICAST_ADDR; int length, gotend = 0, gotvlans = 0, edp_len, tlv_len, tlv_type; int edp_port, edp_slot; u_int8_t *pos, *pos_edp, *tlv; u_int8_t version[4]; #ifdef ENABLE_DOT1 struct in_addr address; struct lldpd_port *oport; #endif log_debug("edp", "decode EDP frame on port %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("edp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("edp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) + 8 /* LLC */ + 10 + ETHER_ADDR_LEN /* EDP header */) { log_warnx("edp", "too short EDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(edpaddr, sizeof(edpaddr)) != 0) { log_info("edp", "frame not targeted at EDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); /* LLC: DSAP + SSAP + control + org */ if (PEEK_UINT16 != LLC_PID_EDP) { log_debug("edp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } (void)PEEK_SAVE(pos_edp); /* Save the start of EDP packet */ if (PEEK_UINT8 != 1) { log_warnx("edp", "incorrect EDP version for frame received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; /* Reserved */ edp_len = PEEK_UINT16; PEEK_DISCARD_UINT16; /* Checksum */ PEEK_DISCARD_UINT16; /* Sequence */ if (PEEK_UINT16 != 0) { /* ID Type = 0 = MAC */ log_warnx("edp", "incorrect device id type for frame received on %s", hardware->h_ifname); goto malformed; } if (edp_len > length + 10) { log_warnx("edp", "incorrect size for EDP frame received on %s", hardware->h_ifname); goto malformed; } chassis->c_ttl = cfg?cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold:0; chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR; chassis->c_id_len = ETHER_ADDR_LEN; if ((chassis->c_id = (char *)malloc(ETHER_ADDR_LEN)) == NULL) { log_warn("edp", "unable to allocate memory for chassis ID"); goto malformed; } PEEK_BYTES(chassis->c_id, ETHER_ADDR_LEN); /* Let's check checksum */ if (frame_checksum(pos_edp, edp_len, 0) != 0) { log_warnx("edp", "incorrect EDP checksum for frame received on %s", hardware->h_ifname); goto malformed; } while (length && !gotend) { if (length < 4) { log_warnx("edp", "EDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_UINT8 != EDP_TLV_MARKER) { log_warnx("edp", "incorrect marker starting EDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT8; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (tlv_len > length)) { log_debug("edp", "incorrect size in EDP TLV header for frame " "received on %s", hardware->h_ifname); /* Some poor old Extreme Summit are quite bogus */ gotend = 1; break; } switch (tlv_type) { case EDP_TLV_INFO: CHECK_TLV_SIZE(32, "Info"); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; edp_slot = PEEK_UINT16; edp_port = PEEK_UINT16; if (asprintf(&port->p_id, "%d/%d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port ID"); goto malformed; } port->p_id_len = strlen(port->p_id); if (asprintf(&port->p_descr, "Slot %d / Port %d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port description"); goto malformed; } PEEK_DISCARD_UINT16; /* vchassis */ PEEK_DISCARD(6); /* Reserved */ PEEK_BYTES(version, 4); if (asprintf(&chassis->c_descr, "EDP enabled device, version %d.%d.%d.%d", version[0], version[1], version[2], version[3]) == -1) { log_warn("edp", "unable to allocate memory for " "chassis description"); goto malformed; } break; case EDP_TLV_DISPLAY: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("edp", "unable to allocate memory for chassis " "name"); goto malformed; } /* TLV display contains a lot of garbage */ PEEK_BYTES(chassis->c_name, tlv_len); break; case EDP_TLV_NULL: if (tlv_len != 0) { log_warnx("edp", "null tlv with incorrect size in frame " "received on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("edp", "extra data after edp frame on %s", hardware->h_ifname); gotend = 1; break; case EDP_TLV_VLAN: #ifdef ENABLE_DOT1 CHECK_TLV_SIZE(12, "VLAN"); if ((lvlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("edp", "unable to allocate vlan"); goto malformed; } PEEK_DISCARD_UINT16; /* Flags + reserved */ lvlan->v_vid = PEEK_UINT16; /* VID */ PEEK_DISCARD(4); /* Reserved */ PEEK_BYTES(&address, sizeof(address)); if (address.s_addr != INADDR_ANY) { mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { log_warn("edp", "Out of memory"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } if ((lvlan->v_name = (char *)calloc(1, tlv_len + 1 - 12)) == NULL) { log_warn("edp", "unable to allocate vlan name"); goto malformed; } PEEK_BYTES(lvlan->v_name, tlv_len - 12); TAILQ_INSERT_TAIL(&port->p_vlans, lvlan, v_entries); lvlan = NULL; #endif gotvlans = 1; break; default: log_debug("edp", "unknown EDP TLV type (%d) received on %s", tlv_type, hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (gotend == 0)) { #ifdef ENABLE_DOT1 if (gotvlans && gotend) { /* VLAN can be sent in a separate frames. We need to add * those vlans to an existing port */ TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (!((oport->p_protocol == LLDPD_MODE_EDP) && (oport->p_chassis->c_id_subtype == chassis->c_id_subtype) && (oport->p_chassis->c_id_len == chassis->c_id_len) && (memcmp(oport->p_chassis->c_id, chassis->c_id, chassis->c_id_len) == 0))) continue; /* We attach the VLANs to the found port */ lldpd_vlan_cleanup(oport); for (lvlan = TAILQ_FIRST(&port->p_vlans); lvlan != NULL; lvlan = lvlan_next) { lvlan_next = TAILQ_NEXT(lvlan, v_entries); TAILQ_REMOVE(&port->p_vlans, lvlan, v_entries); TAILQ_INSERT_TAIL(&oport->p_vlans, lvlan, v_entries); } /* And the IP addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); /* Don't add an address that already exists! */ TAILQ_FOREACH(m, &chassis->c_mgmt, m_entries) if (m->m_family == mgmt->m_family && !memcmp(&m->m_addr, &mgmt->m_addr, sizeof(m->m_addr))) break; if (m == NULL) TAILQ_INSERT_TAIL(&oport->p_chassis->c_mgmt, mgmt, m_entries); } } /* We discard the remaining frame */ goto malformed; } #else if (gotvlans) goto malformed; #endif log_warnx("edp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_DOT1 free(lvlan); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_EDP */
./CrossVul/dataset_final_sorted/CWE-617/c/good_1771_2
crossvul-cpp_data_bad_219_2
/* * MPEG-4 decoder * Copyright (c) 2000,2001 Fabrice Bellard * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #define UNCHECKED_BITSTREAM_READER 1 #include "libavutil/internal.h" #include "libavutil/opt.h" #include "error_resilience.h" #include "hwaccel.h" #include "idctdsp.h" #include "internal.h" #include "mpegutils.h" #include "mpegvideo.h" #include "mpegvideodata.h" #include "mpeg4video.h" #include "h263.h" #include "profiles.h" #include "thread.h" #include "xvididct.h" /* The defines below define the number of bits that are read at once for * reading vlc values. Changing these may improve speed and data cache needs * be aware though that decreasing them may need the number of stages that is * passed to get_vlc* to be increased. */ #define SPRITE_TRAJ_VLC_BITS 6 #define DC_VLC_BITS 9 #define MB_TYPE_B_VLC_BITS 4 #define STUDIO_INTRA_BITS 9 static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb); static VLC dc_lum, dc_chrom; static VLC sprite_trajectory; static VLC mb_type_b_vlc; static const int mb_type_b_map[4] = { MB_TYPE_DIRECT2 | MB_TYPE_L0L1, MB_TYPE_L0L1 | MB_TYPE_16x16, MB_TYPE_L1 | MB_TYPE_16x16, MB_TYPE_L0 | MB_TYPE_16x16, }; /** * Predict the ac. * @param n block index (0-3 are luma, 4-5 are chroma) * @param dir the ac prediction direction */ void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir) { int i; int16_t *ac_val, *ac_val1; int8_t *const qscale_table = s->current_picture.qscale_table; /* find prediction */ ac_val = &s->ac_val[0][0][0] + s->block_index[n] * 16; ac_val1 = ac_val; if (s->ac_pred) { if (dir == 0) { const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride; /* left prediction */ ac_val -= 16; if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i << 3]] += ac_val[i]; } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i << 3]] += ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale); } } else { const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride; /* top prediction */ ac_val -= 16 * s->block_wrap[n]; if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i]] += ac_val[i + 8]; } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) block[s->idsp.idct_permutation[i]] += ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale); } } } /* left copy */ for (i = 1; i < 8; i++) ac_val1[i] = block[s->idsp.idct_permutation[i << 3]]; /* top copy */ for (i = 1; i < 8; i++) ac_val1[8 + i] = block[s->idsp.idct_permutation[i]]; } /** * check if the next stuff is a resync marker or the end. * @return 0 if not */ static inline int mpeg4_is_resync(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int bits_count = get_bits_count(&s->gb); int v = show_bits(&s->gb, 16); if (s->workaround_bugs & FF_BUG_NO_PADDING && !ctx->resync_marker) return 0; while (v <= 0xFF) { if (s->pict_type == AV_PICTURE_TYPE_B || (v >> (8 - s->pict_type) != 1) || s->partitioned_frame) break; skip_bits(&s->gb, 8 + s->pict_type); bits_count += 8 + s->pict_type; v = show_bits(&s->gb, 16); } if (bits_count + 8 >= s->gb.size_in_bits) { v >>= 8; v |= 0x7F >> (7 - (bits_count & 7)); if (v == 0x7F) return s->mb_num; } else { if (v == ff_mpeg4_resync_prefix[bits_count & 7]) { int len, mb_num; int mb_num_bits = av_log2(s->mb_num - 1) + 1; GetBitContext gb = s->gb; skip_bits(&s->gb, 1); align_get_bits(&s->gb); for (len = 0; len < 32; len++) if (get_bits1(&s->gb)) break; mb_num = get_bits(&s->gb, mb_num_bits); if (!mb_num || mb_num > s->mb_num || get_bits_count(&s->gb)+6 > s->gb.size_in_bits) mb_num= -1; s->gb = gb; if (len >= ff_mpeg4_get_video_packet_prefix_length(s)) return mb_num; } } return 0; } static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int a = 2 << s->sprite_warping_accuracy; int rho = 3 - s->sprite_warping_accuracy; int r = 16 / a; int alpha = 1; int beta = 0; int w = s->width; int h = s->height; int min_ab, i, w2, h2, w3, h3; int sprite_ref[4][2]; int virtual_ref[2][2]; int64_t sprite_offset[2][2]; int64_t sprite_delta[2][2]; // only true for rectangle shapes const int vop_ref[4][2] = { { 0, 0 }, { s->width, 0 }, { 0, s->height }, { s->width, s->height } }; int d[4][2] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }; if (w <= 0 || h <= 0) return AVERROR_INVALIDDATA; /* the decoder was not properly initialized and we cannot continue */ if (sprite_trajectory.table == NULL) return AVERROR_INVALIDDATA; for (i = 0; i < ctx->num_sprite_warping_points; i++) { int length; int x = 0, y = 0; length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3); if (length > 0) x = get_xbits(gb, length); if (!(ctx->divx_version == 500 && ctx->divx_build == 413)) check_marker(s->avctx, gb, "before sprite_trajectory"); length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3); if (length > 0) y = get_xbits(gb, length); check_marker(s->avctx, gb, "after sprite_trajectory"); ctx->sprite_traj[i][0] = d[i][0] = x; ctx->sprite_traj[i][1] = d[i][1] = y; } for (; i < 4; i++) ctx->sprite_traj[i][0] = ctx->sprite_traj[i][1] = 0; while ((1 << alpha) < w) alpha++; while ((1 << beta) < h) beta++; /* typo in the MPEG-4 std for the definition of w' and h' */ w2 = 1 << alpha; h2 = 1 << beta; // Note, the 4th point isn't used for GMC if (ctx->divx_version == 500 && ctx->divx_build == 413) { sprite_ref[0][0] = a * vop_ref[0][0] + d[0][0]; sprite_ref[0][1] = a * vop_ref[0][1] + d[0][1]; sprite_ref[1][0] = a * vop_ref[1][0] + d[0][0] + d[1][0]; sprite_ref[1][1] = a * vop_ref[1][1] + d[0][1] + d[1][1]; sprite_ref[2][0] = a * vop_ref[2][0] + d[0][0] + d[2][0]; sprite_ref[2][1] = a * vop_ref[2][1] + d[0][1] + d[2][1]; } else { sprite_ref[0][0] = (a >> 1) * (2 * vop_ref[0][0] + d[0][0]); sprite_ref[0][1] = (a >> 1) * (2 * vop_ref[0][1] + d[0][1]); sprite_ref[1][0] = (a >> 1) * (2 * vop_ref[1][0] + d[0][0] + d[1][0]); sprite_ref[1][1] = (a >> 1) * (2 * vop_ref[1][1] + d[0][1] + d[1][1]); sprite_ref[2][0] = (a >> 1) * (2 * vop_ref[2][0] + d[0][0] + d[2][0]); sprite_ref[2][1] = (a >> 1) * (2 * vop_ref[2][1] + d[0][1] + d[2][1]); } /* sprite_ref[3][0] = (a >> 1) * (2 * vop_ref[3][0] + d[0][0] + d[1][0] + d[2][0] + d[3][0]); * sprite_ref[3][1] = (a >> 1) * (2 * vop_ref[3][1] + d[0][1] + d[1][1] + d[2][1] + d[3][1]); */ /* This is mostly identical to the MPEG-4 std (and is totally unreadable * because of that...). Perhaps it should be reordered to be more readable. * The idea behind this virtual_ref mess is to be able to use shifts later * per pixel instead of divides so the distance between points is converted * from w&h based to w2&h2 based which are of the 2^x form. */ virtual_ref[0][0] = 16 * (vop_ref[0][0] + w2) + ROUNDED_DIV(((w - w2) * (r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) + w2 * (r * sprite_ref[1][0] - 16LL * vop_ref[1][0])), w); virtual_ref[0][1] = 16 * vop_ref[0][1] + ROUNDED_DIV(((w - w2) * (r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) + w2 * (r * sprite_ref[1][1] - 16LL * vop_ref[1][1])), w); virtual_ref[1][0] = 16 * vop_ref[0][0] + ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][0] - 16LL * vop_ref[0][0]) + h2 * (r * sprite_ref[2][0] - 16LL * vop_ref[2][0])), h); virtual_ref[1][1] = 16 * (vop_ref[0][1] + h2) + ROUNDED_DIV(((h - h2) * (r * sprite_ref[0][1] - 16LL * vop_ref[0][1]) + h2 * (r * sprite_ref[2][1] - 16LL * vop_ref[2][1])), h); switch (ctx->num_sprite_warping_points) { case 0: sprite_offset[0][0] = sprite_offset[0][1] = sprite_offset[1][0] = sprite_offset[1][1] = 0; sprite_delta[0][0] = a; sprite_delta[0][1] = sprite_delta[1][0] = 0; sprite_delta[1][1] = a; ctx->sprite_shift[0] = ctx->sprite_shift[1] = 0; break; case 1: // GMC only sprite_offset[0][0] = sprite_ref[0][0] - a * vop_ref[0][0]; sprite_offset[0][1] = sprite_ref[0][1] - a * vop_ref[0][1]; sprite_offset[1][0] = ((sprite_ref[0][0] >> 1) | (sprite_ref[0][0] & 1)) - a * (vop_ref[0][0] / 2); sprite_offset[1][1] = ((sprite_ref[0][1] >> 1) | (sprite_ref[0][1] & 1)) - a * (vop_ref[0][1] / 2); sprite_delta[0][0] = a; sprite_delta[0][1] = sprite_delta[1][0] = 0; sprite_delta[1][1] = a; ctx->sprite_shift[0] = ctx->sprite_shift[1] = 0; break; case 2: sprite_offset[0][0] = ((int64_t) sprite_ref[0][0] * (1 << alpha + rho)) + ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t) -vop_ref[0][0]) + ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) * ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1)); sprite_offset[0][1] = ((int64_t) sprite_ref[0][1] * (1 << alpha + rho)) + ((int64_t) -r * sprite_ref[0][1] + virtual_ref[0][1]) * ((int64_t) -vop_ref[0][0]) + ((int64_t) -r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t) -vop_ref[0][1]) + (1 << (alpha + rho - 1)); sprite_offset[1][0] = (((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t)-2 * vop_ref[0][0] + 1) + ((int64_t) r * sprite_ref[0][1] - virtual_ref[0][1]) * ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r * (int64_t) sprite_ref[0][0] - 16 * w2 + (1 << (alpha + rho + 1))); sprite_offset[1][1] = (((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * ((int64_t)-2 * vop_ref[0][0] + 1) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * ((int64_t)-2 * vop_ref[0][1] + 1) + 2 * w2 * r * (int64_t) sprite_ref[0][1] - 16 * w2 + (1 << (alpha + rho + 1))); sprite_delta[0][0] = (-r * sprite_ref[0][0] + virtual_ref[0][0]); sprite_delta[0][1] = (+r * sprite_ref[0][1] - virtual_ref[0][1]); sprite_delta[1][0] = (-r * sprite_ref[0][1] + virtual_ref[0][1]); sprite_delta[1][1] = (-r * sprite_ref[0][0] + virtual_ref[0][0]); ctx->sprite_shift[0] = alpha + rho; ctx->sprite_shift[1] = alpha + rho + 2; break; case 3: min_ab = FFMIN(alpha, beta); w3 = w2 >> min_ab; h3 = h2 >> min_ab; sprite_offset[0][0] = ((int64_t)sprite_ref[0][0] * (1 << (alpha + beta + rho - min_ab))) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-vop_ref[0][0]) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-vop_ref[0][1]) + ((int64_t)1 << (alpha + beta + rho - min_ab - 1)); sprite_offset[0][1] = ((int64_t)sprite_ref[0][1] * (1 << (alpha + beta + rho - min_ab))) + ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-vop_ref[0][0]) + ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-vop_ref[0][1]) + ((int64_t)1 << (alpha + beta + rho - min_ab - 1)); sprite_offset[1][0] = ((int64_t)-r * sprite_ref[0][0] + virtual_ref[0][0]) * h3 * (-2 * vop_ref[0][0] + 1) + ((int64_t)-r * sprite_ref[0][0] + virtual_ref[1][0]) * w3 * (-2 * vop_ref[0][1] + 1) + (int64_t)2 * w2 * h3 * r * sprite_ref[0][0] - 16 * w2 * h3 + ((int64_t)1 << (alpha + beta + rho - min_ab + 1)); sprite_offset[1][1] = ((int64_t)-r * sprite_ref[0][1] + virtual_ref[0][1]) * h3 * (-2 * vop_ref[0][0] + 1) + ((int64_t)-r * sprite_ref[0][1] + virtual_ref[1][1]) * w3 * (-2 * vop_ref[0][1] + 1) + (int64_t)2 * w2 * h3 * r * sprite_ref[0][1] - 16 * w2 * h3 + ((int64_t)1 << (alpha + beta + rho - min_ab + 1)); sprite_delta[0][0] = (-r * (int64_t)sprite_ref[0][0] + virtual_ref[0][0]) * h3; sprite_delta[0][1] = (-r * (int64_t)sprite_ref[0][0] + virtual_ref[1][0]) * w3; sprite_delta[1][0] = (-r * (int64_t)sprite_ref[0][1] + virtual_ref[0][1]) * h3; sprite_delta[1][1] = (-r * (int64_t)sprite_ref[0][1] + virtual_ref[1][1]) * w3; ctx->sprite_shift[0] = alpha + beta + rho - min_ab; ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2; break; } /* try to simplify the situation */ if (sprite_delta[0][0] == a << ctx->sprite_shift[0] && sprite_delta[0][1] == 0 && sprite_delta[1][0] == 0 && sprite_delta[1][1] == a << ctx->sprite_shift[0]) { sprite_offset[0][0] >>= ctx->sprite_shift[0]; sprite_offset[0][1] >>= ctx->sprite_shift[0]; sprite_offset[1][0] >>= ctx->sprite_shift[1]; sprite_offset[1][1] >>= ctx->sprite_shift[1]; sprite_delta[0][0] = a; sprite_delta[0][1] = 0; sprite_delta[1][0] = 0; sprite_delta[1][1] = a; ctx->sprite_shift[0] = 0; ctx->sprite_shift[1] = 0; s->real_sprite_warping_points = 1; } else { int shift_y = 16 - ctx->sprite_shift[0]; int shift_c = 16 - ctx->sprite_shift[1]; for (i = 0; i < 2; i++) { if (shift_c < 0 || shift_y < 0 || FFABS( sprite_offset[0][i]) >= INT_MAX >> shift_y || FFABS( sprite_offset[1][i]) >= INT_MAX >> shift_c || FFABS( sprite_delta[0][i]) >= INT_MAX >> shift_y || FFABS( sprite_delta[1][i]) >= INT_MAX >> shift_y ) { avpriv_request_sample(s->avctx, "Too large sprite shift, delta or offset"); goto overflow; } } for (i = 0; i < 2; i++) { sprite_offset[0][i] *= 1 << shift_y; sprite_offset[1][i] *= 1 << shift_c; sprite_delta[0][i] *= 1 << shift_y; sprite_delta[1][i] *= 1 << shift_y; ctx->sprite_shift[i] = 16; } for (i = 0; i < 2; i++) { int64_t sd[2] = { sprite_delta[i][0] - a * (1LL<<16), sprite_delta[i][1] - a * (1LL<<16) }; if (llabs(sprite_offset[0][i] + sprite_delta[i][0] * (w+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sprite_delta[i][1] * (h+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sprite_delta[i][0] * (w+16LL) + sprite_delta[i][1] * (h+16LL)) >= INT_MAX || llabs(sprite_delta[i][0] * (w+16LL)) >= INT_MAX || llabs(sprite_delta[i][1] * (w+16LL)) >= INT_MAX || llabs(sd[0]) >= INT_MAX || llabs(sd[1]) >= INT_MAX || llabs(sprite_offset[0][i] + sd[0] * (w+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sd[1] * (h+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sd[0] * (w+16LL) + sd[1] * (h+16LL)) >= INT_MAX ) { avpriv_request_sample(s->avctx, "Overflow on sprite points"); goto overflow; } } s->real_sprite_warping_points = ctx->num_sprite_warping_points; } for (i = 0; i < 4; i++) { s->sprite_offset[i&1][i>>1] = sprite_offset[i&1][i>>1]; s->sprite_delta [i&1][i>>1] = sprite_delta [i&1][i>>1]; } return 0; overflow: memset(s->sprite_offset, 0, sizeof(s->sprite_offset)); memset(s->sprite_delta, 0, sizeof(s->sprite_delta)); return AVERROR_PATCHWELCOME; } static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int len = FFMIN(ctx->time_increment_bits + 3, 15); get_bits(gb, len); if (get_bits1(gb)) get_bits(gb, len); check_marker(s->avctx, gb, "after new_pred"); return 0; } /** * Decode the next video packet. * @return <0 if something went wrong */ int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int mb_num_bits = av_log2(s->mb_num - 1) + 1; int header_extension = 0, mb_num, len; /* is there enough space left for a video packet + header */ if (get_bits_count(&s->gb) > s->gb.size_in_bits - 20) return AVERROR_INVALIDDATA; for (len = 0; len < 32; len++) if (get_bits1(&s->gb)) break; if (len != ff_mpeg4_get_video_packet_prefix_length(s)) { av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n"); return AVERROR_INVALIDDATA; } if (ctx->shape != RECT_SHAPE) { header_extension = get_bits1(&s->gb); // FIXME more stuff here } mb_num = get_bits(&s->gb, mb_num_bits); if (mb_num >= s->mb_num || !mb_num) { av_log(s->avctx, AV_LOG_ERROR, "illegal mb_num in video packet (%d %d) \n", mb_num, s->mb_num); return AVERROR_INVALIDDATA; } s->mb_x = mb_num % s->mb_width; s->mb_y = mb_num / s->mb_width; if (ctx->shape != BIN_ONLY_SHAPE) { int qscale = get_bits(&s->gb, s->quant_precision); if (qscale) s->chroma_qscale = s->qscale = qscale; } if (ctx->shape == RECT_SHAPE) header_extension = get_bits1(&s->gb); if (header_extension) { int time_incr = 0; while (get_bits1(&s->gb) != 0) time_incr++; check_marker(s->avctx, &s->gb, "before time_increment in video packed header"); skip_bits(&s->gb, ctx->time_increment_bits); /* time_increment */ check_marker(s->avctx, &s->gb, "before vop_coding_type in video packed header"); skip_bits(&s->gb, 2); /* vop coding type */ // FIXME not rect stuff here if (ctx->shape != BIN_ONLY_SHAPE) { skip_bits(&s->gb, 3); /* intra dc vlc threshold */ // FIXME don't just ignore everything if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { if (mpeg4_decode_sprite_trajectory(ctx, &s->gb) < 0) return AVERROR_INVALIDDATA; av_log(s->avctx, AV_LOG_ERROR, "untested\n"); } // FIXME reduced res stuff here if (s->pict_type != AV_PICTURE_TYPE_I) { int f_code = get_bits(&s->gb, 3); /* fcode_for */ if (f_code == 0) av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (f_code=0)\n"); } if (s->pict_type == AV_PICTURE_TYPE_B) { int b_code = get_bits(&s->gb, 3); if (b_code == 0) av_log(s->avctx, AV_LOG_ERROR, "Error, video packet header damaged (b_code=0)\n"); } } } if (ctx->new_pred) decode_new_pred(ctx, &s->gb); return 0; } static void reset_studio_dc_predictors(MpegEncContext *s) { /* Reset DC Predictors */ s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1 << (s->avctx->bits_per_raw_sample + s->dct_precision + s->intra_dc_precision - 1); } /** * Decode the next video packet. * @return <0 if something went wrong */ int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; GetBitContext *gb = &s->gb; unsigned vlc_len; uint16_t mb_num; if (get_bits_left(gb) >= 32 && get_bits_long(gb, 32) == SLICE_START_CODE) { vlc_len = av_log2(s->mb_width * s->mb_height) + 1; mb_num = get_bits(gb, vlc_len); if (mb_num >= s->mb_num) return AVERROR_INVALIDDATA; s->mb_x = mb_num % s->mb_width; s->mb_y = mb_num / s->mb_width; if (ctx->shape != BIN_ONLY_SHAPE) s->qscale = mpeg_get_qscale(s); if (get_bits1(gb)) { /* slice_extension_flag */ skip_bits1(gb); /* intra_slice */ skip_bits1(gb); /* slice_VOP_id_enable */ skip_bits(gb, 6); /* slice_VOP_id */ while (get_bits1(gb)) /* extra_bit_slice */ skip_bits(gb, 8); /* extra_information_slice */ } reset_studio_dc_predictors(s); } else { return AVERROR_INVALIDDATA; } return 0; } /** * Get the average motion vector for a GMC MB. * @param n either 0 for the x component or 1 for y * @return the average MV for a GMC MB */ static inline int get_amv(Mpeg4DecContext *ctx, int n) { MpegEncContext *s = &ctx->m; int x, y, mb_v, sum, dx, dy, shift; int len = 1 << (s->f_code + 4); const int a = s->sprite_warping_accuracy; if (s->workaround_bugs & FF_BUG_AMV) len >>= s->quarter_sample; if (s->real_sprite_warping_points == 1) { if (ctx->divx_version == 500 && ctx->divx_build == 413) sum = s->sprite_offset[0][n] / (1 << (a - s->quarter_sample)); else sum = RSHIFT(s->sprite_offset[0][n] * (1 << s->quarter_sample), a); } else { dx = s->sprite_delta[n][0]; dy = s->sprite_delta[n][1]; shift = ctx->sprite_shift[0]; if (n) dy -= 1 << (shift + a + 1); else dx -= 1 << (shift + a + 1); mb_v = s->sprite_offset[0][n] + dx * s->mb_x * 16 + dy * s->mb_y * 16; sum = 0; for (y = 0; y < 16; y++) { int v; v = mb_v + dy * y; // FIXME optimize for (x = 0; x < 16; x++) { sum += v >> shift; v += dx; } } sum = RSHIFT(sum, a + 8 - s->quarter_sample); } if (sum < -len) sum = -len; else if (sum >= len) sum = len - 1; return sum; } /** * Decode the dc value. * @param n block index (0-3 are luma, 4-5 are chroma) * @param dir_ptr the prediction direction will be stored here * @return the quantized dc */ static inline int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr) { int level, code; if (n < 4) code = get_vlc2(&s->gb, dc_lum.table, DC_VLC_BITS, 1); else code = get_vlc2(&s->gb, dc_chrom.table, DC_VLC_BITS, 1); if (code < 0 || code > 9 /* && s->nbit < 9 */) { av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n"); return AVERROR_INVALIDDATA; } if (code == 0) { level = 0; } else { if (IS_3IV1) { if (code == 1) level = 2 * get_bits1(&s->gb) - 1; else { if (get_bits1(&s->gb)) level = get_bits(&s->gb, code - 1) + (1 << (code - 1)); else level = -get_bits(&s->gb, code - 1) - (1 << (code - 1)); } } else { level = get_xbits(&s->gb, code); } if (code > 8) { if (get_bits1(&s->gb) == 0) { /* marker */ if (s->avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)) { av_log(s->avctx, AV_LOG_ERROR, "dc marker bit missing\n"); return AVERROR_INVALIDDATA; } } } } return ff_mpeg4_pred_dc(s, n, level, dir_ptr, 0); } /** * Decode first partition. * @return number of MBs decoded or <0 if an error occurred */ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int mb_num = 0; static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; /* decode first partition */ s->first_slice_line = 1; for (; s->mb_y < s->mb_height; s->mb_y++) { ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { const int xy = s->mb_x + s->mb_y * s->mb_stride; int cbpc; int dir = 0; mb_num++; ff_update_block_index(s); if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1) s->first_slice_line = 0; if (s->pict_type == AV_PICTURE_TYPE_I) { int i; do { if (show_bits_long(&s->gb, 19) == DC_MARKER) return mb_num - 1; cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } while (cbpc == 8); s->cbp_table[xy] = cbpc & 3; s->current_picture.mb_type[xy] = MB_TYPE_INTRA; s->mb_intra = 1; if (cbpc & 4) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); s->current_picture.qscale_table[xy] = s->qscale; s->mbintra_table[xy] = 1; for (i = 0; i < 6; i++) { int dc_pred_dir; int dc = mpeg4_decode_dc(s, i, &dc_pred_dir); if (dc < 0) { av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); return dc; } dir <<= 1; if (dc_pred_dir) dir |= 1; } s->pred_dir_table[xy] = dir; } else { /* P/S_TYPE */ int mx, my, pred_x, pred_y, bits; int16_t *const mot_val = s->current_picture.motion_val[0][s->block_index[0]]; const int stride = s->b8_stride * 2; try_again: bits = show_bits(&s->gb, 17); if (bits == MOTION_MARKER) return mb_num - 1; skip_bits1(&s->gb); if (bits & 0x10000) { /* skip mb */ if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; mx = get_amv(ctx, 0); my = get_amv(ctx, 1); } else { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; mx = my = 0; } mot_val[0] = mot_val[2] = mot_val[0 + stride] = mot_val[2 + stride] = mx; mot_val[1] = mot_val[3] = mot_val[1 + stride] = mot_val[3 + stride] = my; if (s->mbintra_table[xy]) ff_clean_intra_table_entries(s); continue; } cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "mcbpc corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (cbpc == 20) goto try_again; s->cbp_table[xy] = cbpc & (8 + 3); // 8 is dquant s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) { s->current_picture.mb_type[xy] = MB_TYPE_INTRA; s->mbintra_table[xy] = 1; mot_val[0] = mot_val[2] = mot_val[0 + stride] = mot_val[2 + stride] = 0; mot_val[1] = mot_val[3] = mot_val[1 + stride] = mot_val[3 + stride] = 0; } else { if (s->mbintra_table[xy]) ff_clean_intra_table_entries(s); if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0) s->mcsel = get_bits1(&s->gb); else s->mcsel = 0; if ((cbpc & 16) == 0) { /* 16x16 motion prediction */ ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); if (!s->mcsel) { mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; } else { mx = get_amv(ctx, 0); my = get_amv(ctx, 1); s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_GMC | MB_TYPE_L0; } mot_val[0] = mot_val[2] = mot_val[0 + stride] = mot_val[2 + stride] = mx; mot_val[1] = mot_val[3] = mot_val[1 + stride] = mot_val[3 + stride] = my; } else { int i; s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; for (i = 0; i < 4; i++) { int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; mot_val[0] = mx; mot_val[1] = my; } } } } } s->mb_x = 0; } return mb_num; } /** * decode second partition. * @return <0 if an error occurred */ static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count) { int mb_num = 0; static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; s->mb_x = s->resync_mb_x; s->first_slice_line = 1; for (s->mb_y = s->resync_mb_y; mb_num < mb_count; s->mb_y++) { ff_init_block_index(s); for (; mb_num < mb_count && s->mb_x < s->mb_width; s->mb_x++) { const int xy = s->mb_x + s->mb_y * s->mb_stride; mb_num++; ff_update_block_index(s); if (s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y + 1) s->first_slice_line = 0; if (s->pict_type == AV_PICTURE_TYPE_I) { int ac_pred = get_bits1(&s->gb); int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } s->cbp_table[xy] |= cbpy << 2; s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED; } else { /* P || S_TYPE */ if (IS_INTRA(s->current_picture.mb_type[xy])) { int i; int dir = 0; int ac_pred = get_bits1(&s->gb); int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "I cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (s->cbp_table[xy] & 8) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); s->current_picture.qscale_table[xy] = s->qscale; for (i = 0; i < 6; i++) { int dc_pred_dir; int dc = mpeg4_decode_dc(s, i, &dc_pred_dir); if (dc < 0) { av_log(s->avctx, AV_LOG_ERROR, "DC corrupted at %d %d\n", s->mb_x, s->mb_y); return dc; } dir <<= 1; if (dc_pred_dir) dir |= 1; } s->cbp_table[xy] &= 3; // remove dquant s->cbp_table[xy] |= cbpy << 2; s->current_picture.mb_type[xy] |= ac_pred * MB_TYPE_ACPRED; s->pred_dir_table[xy] = dir; } else if (IS_SKIP(s->current_picture.mb_type[xy])) { s->current_picture.qscale_table[xy] = s->qscale; s->cbp_table[xy] = 0; } else { int cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "P cbpy corrupted at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } if (s->cbp_table[xy] & 8) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); s->current_picture.qscale_table[xy] = s->qscale; s->cbp_table[xy] &= 3; // remove dquant s->cbp_table[xy] |= (cbpy ^ 0xf) << 2; } } } if (mb_num >= mb_count) return 0; s->mb_x = 0; } return 0; } /** * Decode the first and second partition. * @return <0 if error (and sets error type in the error_status_table) */ int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx) { MpegEncContext *s = &ctx->m; int mb_num; int ret; const int part_a_error = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR; const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END; mb_num = mpeg4_decode_partition_a(ctx); if (mb_num <= 0) { ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error); return mb_num ? mb_num : AVERROR_INVALIDDATA; } if (s->resync_mb_x + s->resync_mb_y * s->mb_width + mb_num > s->mb_num) { av_log(s->avctx, AV_LOG_ERROR, "slice below monitor ...\n"); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, part_a_error); return AVERROR_INVALIDDATA; } s->mb_num_left = mb_num; if (s->pict_type == AV_PICTURE_TYPE_I) { while (show_bits(&s->gb, 9) == 1) skip_bits(&s->gb, 9); if (get_bits_long(&s->gb, 19) != DC_MARKER) { av_log(s->avctx, AV_LOG_ERROR, "marker missing after first I partition at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } else { while (show_bits(&s->gb, 10) == 1) skip_bits(&s->gb, 10); if (get_bits(&s->gb, 17) != MOTION_MARKER) { av_log(s->avctx, AV_LOG_ERROR, "marker missing after first P partition at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, part_a_end); ret = mpeg4_decode_partition_b(s, mb_num); if (ret < 0) { if (s->pict_type == AV_PICTURE_TYPE_P) ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_DC_ERROR); return ret; } else { if (s->pict_type == AV_PICTURE_TYPE_P) ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_DC_END); } return 0; } /** * Decode a block. * @return <0 if an error occurred */ static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block, int n, int coded, int intra, int rvlc) { MpegEncContext *s = &ctx->m; int level, i, last, run, qmul, qadd; int av_uninit(dc_pred_dir); RLTable *rl; RL_VLC_ELEM *rl_vlc; const uint8_t *scan_table; // Note intra & rvlc should be optimized away if this is inlined if (intra) { if (ctx->use_intra_dc_vlc) { /* DC coef */ if (s->partitioned_frame) { level = s->dc_val[0][s->block_index[n]]; if (n < 4) level = FASTDIV((level + (s->y_dc_scale >> 1)), s->y_dc_scale); else level = FASTDIV((level + (s->c_dc_scale >> 1)), s->c_dc_scale); dc_pred_dir = (s->pred_dir_table[s->mb_x + s->mb_y * s->mb_stride] << n) & 32; } else { level = mpeg4_decode_dc(s, n, &dc_pred_dir); if (level < 0) return level; } block[0] = level; i = 0; } else { i = -1; ff_mpeg4_pred_dc(s, n, 0, &dc_pred_dir, 0); } if (!coded) goto not_coded; if (rvlc) { rl = &ff_rvlc_rl_intra; rl_vlc = ff_rvlc_rl_intra.rl_vlc[0]; } else { rl = &ff_mpeg4_rl_intra; rl_vlc = ff_mpeg4_rl_intra.rl_vlc[0]; } if (s->ac_pred) { if (dc_pred_dir == 0) scan_table = s->intra_v_scantable.permutated; /* left */ else scan_table = s->intra_h_scantable.permutated; /* top */ } else { scan_table = s->intra_scantable.permutated; } qmul = 1; qadd = 0; } else { i = -1; if (!coded) { s->block_last_index[n] = i; return 0; } if (rvlc) rl = &ff_rvlc_rl_inter; else rl = &ff_h263_rl_inter; scan_table = s->intra_scantable.permutated; if (s->mpeg_quant) { qmul = 1; qadd = 0; if (rvlc) rl_vlc = ff_rvlc_rl_inter.rl_vlc[0]; else rl_vlc = ff_h263_rl_inter.rl_vlc[0]; } else { qmul = s->qscale << 1; qadd = (s->qscale - 1) | 1; if (rvlc) rl_vlc = ff_rvlc_rl_inter.rl_vlc[s->qscale]; else rl_vlc = ff_h263_rl_inter.rl_vlc[s->qscale]; } } { OPEN_READER(re, &s->gb); for (;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0); if (level == 0) { /* escape */ if (rvlc) { if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in rvlc esc\n"); return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 1); last = SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run = SHOW_UBITS(re, &s->gb, 6); SKIP_COUNTER(re, &s->gb, 1 + 1 + 6); UPDATE_CACHE(re, &s->gb); if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in rvlc esc\n"); return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 1); level = SHOW_UBITS(re, &s->gb, 11); SKIP_CACHE(re, &s->gb, 11); if (SHOW_UBITS(re, &s->gb, 5) != 0x10) { av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n"); return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 5); level = level * qmul + qadd; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); SKIP_COUNTER(re, &s->gb, 1 + 11 + 5 + 1); i += run + 1; if (last) i += 192; } else { int cache; cache = GET_CACHE(re, &s->gb); if (IS_3IV1) cache ^= 0xC0000000; if (cache & 0x80000000) { if (cache & 0x40000000) { /* third escape */ SKIP_CACHE(re, &s->gb, 2); last = SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run = SHOW_UBITS(re, &s->gb, 6); SKIP_COUNTER(re, &s->gb, 2 + 1 + 6); UPDATE_CACHE(re, &s->gb); if (IS_3IV1) { level = SHOW_SBITS(re, &s->gb, 12); LAST_SKIP_BITS(re, &s->gb, 12); } else { if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in 3. esc\n"); if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR)) return AVERROR_INVALIDDATA; } SKIP_CACHE(re, &s->gb, 1); level = SHOW_SBITS(re, &s->gb, 12); SKIP_CACHE(re, &s->gb, 12); if (SHOW_UBITS(re, &s->gb, 1) == 0) { av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in 3. esc\n"); if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR)) return AVERROR_INVALIDDATA; } SKIP_COUNTER(re, &s->gb, 1 + 12 + 1); } #if 0 if (s->error_recognition >= FF_ER_COMPLIANT) { const int abs_level= FFABS(level); if (abs_level<=MAX_LEVEL && run<=MAX_RUN) { const int run1= run - rl->max_run[last][abs_level] - 1; if (abs_level <= rl->max_level[last][run]) { av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n"); return AVERROR_INVALIDDATA; } if (s->error_recognition > FF_ER_COMPLIANT) { if (abs_level <= rl->max_level[last][run]*2) { av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n"); return AVERROR_INVALIDDATA; } if (run1 >= 0 && abs_level <= rl->max_level[last][run1]) { av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n"); return AVERROR_INVALIDDATA; } } } } #endif if (level > 0) level = level * qmul + qadd; else level = level * qmul - qadd; if ((unsigned)(level + 2048) > 4095) { if (s->avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_AGGRESSIVE)) { if (level > 2560 || level < -2560) { av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc, qp=%d\n", s->qscale); return AVERROR_INVALIDDATA; } } level = level < 0 ? -2048 : 2047; } i += run + 1; if (last) i += 192; } else { /* second escape */ SKIP_BITS(re, &s->gb, 2); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i += run + rl->max_run[run >> 7][level / qmul] + 1; // FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } } else { /* first escape */ SKIP_BITS(re, &s->gb, 1); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i += run; level = level + rl->max_level[run >> 7][(run - 1) & 63] * qmul; // FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } } } else { i += run; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } ff_tlog(s->avctx, "dct[%d][%d] = %- 4d end?:%d\n", scan_table[i&63]&7, scan_table[i&63] >> 3, level, i>62); if (i > 62) { i -= 192; if (i & (~63)) { av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } block[scan_table[i]] = level; break; } block[scan_table[i]] = level; } CLOSE_READER(re, &s->gb); } not_coded: if (intra) { if (!ctx->use_intra_dc_vlc) { block[0] = ff_mpeg4_pred_dc(s, n, block[0], &dc_pred_dir, 0); i -= i >> 31; // if (i == -1) i = 0; } ff_mpeg4_pred_ac(s, block, n, dc_pred_dir); if (s->ac_pred) i = 63; // FIXME not optimal } s->block_last_index[n] = i; return 0; } /** * decode partition C of one MB. * @return <0 if an error occurred */ static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64]) { Mpeg4DecContext *ctx = s->avctx->priv_data; int cbp, mb_type; const int xy = s->mb_x + s->mb_y * s->mb_stride; av_assert2(s == (void*)ctx); mb_type = s->current_picture.mb_type[xy]; cbp = s->cbp_table[xy]; ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold; if (s->current_picture.qscale_table[xy] != s->qscale) ff_set_qscale(s, s->current_picture.qscale_table[xy]); if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_S) { int i; for (i = 0; i < 4; i++) { s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; } s->mb_intra = IS_INTRA(mb_type); if (IS_SKIP(mb_type)) { /* skip mb */ for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { s->mcsel = 1; s->mb_skipped = 0; } else { s->mcsel = 0; s->mb_skipped = 1; } } else if (s->mb_intra) { s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]); } else if (!s->mb_intra) { // s->mcsel = 0; // FIXME do we need to init that? s->mv_dir = MV_DIR_FORWARD; if (IS_8X8(mb_type)) { s->mv_type = MV_TYPE_8X8; } else { s->mv_type = MV_TYPE_16X16; } } } else { /* I-Frame */ s->mb_intra = 1; s->ac_pred = IS_ACPRED(s->current_picture.mb_type[xy]); } if (!IS_SKIP(mb_type)) { int i; s->bdsp.clear_blocks(s->block[0]); /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, s->mb_intra, ctx->rvlc) < 0) { av_log(s->avctx, AV_LOG_ERROR, "texture corrupted at %d %d %d\n", s->mb_x, s->mb_y, s->mb_intra); return AVERROR_INVALIDDATA; } cbp += cbp; } } /* per-MB end of slice check */ if (--s->mb_num_left <= 0) { if (mpeg4_is_resync(ctx)) return SLICE_END; else return SLICE_NOEND; } else { if (mpeg4_is_resync(ctx)) { const int delta = s->mb_x + 1 == s->mb_width ? 2 : 1; if (s->cbp_table[xy + delta]) return SLICE_END; } return SLICE_OK; } } static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64]) { Mpeg4DecContext *ctx = s->avctx->priv_data; int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant; int16_t *mot_val; static const int8_t quant_tab[4] = { -1, -2, 1, 2 }; const int xy = s->mb_x + s->mb_y * s->mb_stride; av_assert2(s == (void*)ctx); av_assert2(s->h263_pred); if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_S) { do { if (get_bits1(&s->gb)) { /* skip mb */ s->mb_intra = 0; for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE) { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel = 1; s->mv[0][0][0] = get_amv(ctx, 0); s->mv[0][0][1] = get_amv(ctx, 1); s->mb_skipped = 0; } else { s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; s->mcsel = 0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mb_skipped = 1; } goto end; } cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "mcbpc damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } while (cbpc == 20); s->bdsp.clear_blocks(s->block[0]); dquant = cbpc & 8; s->mb_intra = ((cbpc & 4) != 0); if (s->mb_intra) goto intra; if (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE && (cbpc & 16) == 0) s->mcsel = get_bits1(&s->gb); else s->mcsel = 0; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1) ^ 0x0F; if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "P cbpy damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } cbp = (cbpc & 3) | (cbpy << 2); if (dquant) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); if ((!s->progressive_sequence) && (cbp || (s->workaround_bugs & FF_BUG_XVID_ILACE))) s->interlaced_dct = get_bits1(&s->gb); s->mv_dir = MV_DIR_FORWARD; if ((cbpc & 16) == 0) { if (s->mcsel) { s->current_picture.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 global motion prediction */ s->mv_type = MV_TYPE_16X16; mx = get_amv(ctx, 0); my = get_amv(ctx, 1); s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } else if ((!s->progressive_sequence) && get_bits1(&s->gb)) { s->current_picture.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_L0 | MB_TYPE_INTERLACED; /* 16x8 field motion prediction */ s->mv_type = MV_TYPE_FIELD; s->field_select[0][0] = get_bits1(&s->gb); s->field_select[0][1] = get_bits1(&s->gb); ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y / 2, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; } } else { s->current_picture.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0; /* 16x16 motion prediction */ s->mv_type = MV_TYPE_16X16; ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->mv[0][0][0] = mx; s->mv[0][0][1] = my; } } else { s->current_picture.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0; s->mv_type = MV_TYPE_8X8; for (i = 0; i < 4; i++) { mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); mx = ff_h263_decode_motion(s, pred_x, s->f_code); if (mx >= 0xffff) return AVERROR_INVALIDDATA; my = ff_h263_decode_motion(s, pred_y, s->f_code); if (my >= 0xffff) return AVERROR_INVALIDDATA; s->mv[0][i][0] = mx; s->mv[0][i][1] = my; mot_val[0] = mx; mot_val[1] = my; } } } else if (s->pict_type == AV_PICTURE_TYPE_B) { int modb1; // first bit of modb int modb2; // second bit of modb int mb_type; s->mb_intra = 0; // B-frames never contain intra blocks s->mcsel = 0; // ... true gmc blocks if (s->mb_x == 0) { for (i = 0; i < 2; i++) { s->last_mv[i][0][0] = s->last_mv[i][0][1] = s->last_mv[i][1][0] = s->last_mv[i][1][1] = 0; } ff_thread_await_progress(&s->next_picture_ptr->tf, s->mb_y, 0); } /* if we skipped it in the future P-frame than skip it now too */ s->mb_skipped = s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]; // Note, skiptab=0 if last was GMC if (s->mb_skipped) { /* skip mb */ for (i = 0; i < 6; i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->mv[0][0][1] = s->mv[1][0][0] = s->mv[1][0][1] = 0; s->current_picture.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0; goto end; } modb1 = get_bits1(&s->gb); if (modb1) { // like MB_TYPE_B_DIRECT but no vectors coded mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1; cbp = 0; } else { modb2 = get_bits1(&s->gb); mb_type = get_vlc2(&s->gb, mb_type_b_vlc.table, MB_TYPE_B_VLC_BITS, 1); if (mb_type < 0) { av_log(s->avctx, AV_LOG_ERROR, "illegal MB_type\n"); return AVERROR_INVALIDDATA; } mb_type = mb_type_b_map[mb_type]; if (modb2) { cbp = 0; } else { s->bdsp.clear_blocks(s->block[0]); cbp = get_bits(&s->gb, 6); } if ((!IS_DIRECT(mb_type)) && cbp) { if (get_bits1(&s->gb)) ff_set_qscale(s, s->qscale + get_bits1(&s->gb) * 4 - 2); } if (!s->progressive_sequence) { if (cbp) s->interlaced_dct = get_bits1(&s->gb); if (!IS_DIRECT(mb_type) && get_bits1(&s->gb)) { mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED; mb_type &= ~MB_TYPE_16x16; if (USES_LIST(mb_type, 0)) { s->field_select[0][0] = get_bits1(&s->gb); s->field_select[0][1] = get_bits1(&s->gb); } if (USES_LIST(mb_type, 1)) { s->field_select[1][0] = get_bits1(&s->gb); s->field_select[1][1] = get_bits1(&s->gb); } } } s->mv_dir = 0; if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) { s->mv_type = MV_TYPE_16X16; if (USES_LIST(mb_type, 0)) { s->mv_dir = MV_DIR_FORWARD; mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code); my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code); s->last_mv[0][1][0] = s->last_mv[0][0][0] = s->mv[0][0][0] = mx; s->last_mv[0][1][1] = s->last_mv[0][0][1] = s->mv[0][0][1] = my; } if (USES_LIST(mb_type, 1)) { s->mv_dir |= MV_DIR_BACKWARD; mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code); my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code); s->last_mv[1][1][0] = s->last_mv[1][0][0] = s->mv[1][0][0] = mx; s->last_mv[1][1][1] = s->last_mv[1][0][1] = s->mv[1][0][1] = my; } } else if (!IS_DIRECT(mb_type)) { s->mv_type = MV_TYPE_FIELD; if (USES_LIST(mb_type, 0)) { s->mv_dir = MV_DIR_FORWARD; for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], s->f_code); my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, s->f_code); s->last_mv[0][i][0] = s->mv[0][i][0] = mx; s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2; } } if (USES_LIST(mb_type, 1)) { s->mv_dir |= MV_DIR_BACKWARD; for (i = 0; i < 2; i++) { mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], s->b_code); my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, s->b_code); s->last_mv[1][i][0] = s->mv[1][i][0] = mx; s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2; } } } } if (IS_DIRECT(mb_type)) { if (IS_SKIP(mb_type)) { mx = my = 0; } else { mx = ff_h263_decode_motion(s, 0, 1); my = ff_h263_decode_motion(s, 0, 1); } s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; mb_type |= ff_mpeg4_set_direct_mv(s, mx, my); } s->current_picture.mb_type[xy] = mb_type; } else { /* I-Frame */ do { cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); if (cbpc < 0) { av_log(s->avctx, AV_LOG_ERROR, "I cbpc damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } } while (cbpc == 8); dquant = cbpc & 4; s->mb_intra = 1; intra: s->ac_pred = get_bits1(&s->gb); if (s->ac_pred) s->current_picture.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED; else s->current_picture.mb_type[xy] = MB_TYPE_INTRA; cbpy = get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1); if (cbpy < 0) { av_log(s->avctx, AV_LOG_ERROR, "I cbpy damaged at %d %d\n", s->mb_x, s->mb_y); return AVERROR_INVALIDDATA; } cbp = (cbpc & 3) | (cbpy << 2); ctx->use_intra_dc_vlc = s->qscale < ctx->intra_dc_threshold; if (dquant) ff_set_qscale(s, s->qscale + quant_tab[get_bits(&s->gb, 2)]); if (!s->progressive_sequence) s->interlaced_dct = get_bits1(&s->gb); s->bdsp.clear_blocks(s->block[0]); /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 1, 0) < 0) return AVERROR_INVALIDDATA; cbp += cbp; } goto end; } /* decode each block */ for (i = 0; i < 6; i++) { if (mpeg4_decode_block(ctx, block[i], i, cbp & 32, 0, 0) < 0) return AVERROR_INVALIDDATA; cbp += cbp; } end: /* per-MB end of slice check */ if (s->codec_id == AV_CODEC_ID_MPEG4) { int next = mpeg4_is_resync(ctx); if (next) { if (s->mb_x + s->mb_y*s->mb_width + 1 > next && (s->avctx->err_recognition & AV_EF_AGGRESSIVE)) { return AVERROR_INVALIDDATA; } else if (s->mb_x + s->mb_y*s->mb_width + 1 >= next) return SLICE_END; if (s->pict_type == AV_PICTURE_TYPE_B) { const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1; ff_thread_await_progress(&s->next_picture_ptr->tf, (s->mb_x + delta >= s->mb_width) ? FFMIN(s->mb_y + 1, s->mb_height - 1) : s->mb_y, 0); if (s->next_picture.mbskip_table[xy + delta]) return SLICE_OK; } return SLICE_END; } } return SLICE_OK; } /* As per spec, studio start code search isn't the same as the old type of start code */ static void next_start_code_studio(GetBitContext *gb) { align_get_bits(gb); while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) { get_bits(gb, 8); } } /* additional_code, vlc index */ static const uint8_t ac_state_tab[22][2] = { {0, 0}, {0, 1}, {1, 1}, {2, 1}, {3, 1}, {4, 1}, {5, 1}, {1, 2}, {2, 2}, {3, 2}, {4, 2}, {5, 2}, {6, 2}, {1, 3}, {2, 4}, {3, 5}, {4, 6}, {5, 7}, {6, 8}, {7, 9}, {8, 10}, {0, 11} }; static int mpeg4_decode_studio_block(MpegEncContext *s, int32_t block[64], int n) { Mpeg4DecContext *ctx = s->avctx->priv_data; int cc, dct_dc_size, dct_diff, code, j, idx = 1, group = 0, run = 0, additional_code_len, sign, mismatch; VLC *cur_vlc = &ctx->studio_intra_tab[0]; uint8_t *const scantable = s->intra_scantable.permutated; const uint16_t *quant_matrix; uint32_t flc; const int min = -1 * (1 << (s->avctx->bits_per_raw_sample + 6)); const int max = ((1 << (s->avctx->bits_per_raw_sample + 6)) - 1); mismatch = 1; memset(block, 0, 64 * sizeof(int32_t)); if (n < 4) { cc = 0; dct_dc_size = get_vlc2(&s->gb, ctx->studio_luma_dc.table, STUDIO_INTRA_BITS, 2); quant_matrix = s->intra_matrix; } else { cc = (n & 1) + 1; if (ctx->rgb) dct_dc_size = get_vlc2(&s->gb, ctx->studio_luma_dc.table, STUDIO_INTRA_BITS, 2); else dct_dc_size = get_vlc2(&s->gb, ctx->studio_chroma_dc.table, STUDIO_INTRA_BITS, 2); quant_matrix = s->chroma_intra_matrix; } if (dct_dc_size < 0) { av_log(s->avctx, AV_LOG_ERROR, "illegal dct_dc_size vlc\n"); return AVERROR_INVALIDDATA; } else if (dct_dc_size == 0) { dct_diff = 0; } else { dct_diff = get_xbits(&s->gb, dct_dc_size); if (dct_dc_size > 8) { if(!check_marker(s->avctx, &s->gb, "dct_dc_size > 8")) return AVERROR_INVALIDDATA; } } s->last_dc[cc] += dct_diff; if (s->mpeg_quant) block[0] = s->last_dc[cc] * (8 >> s->intra_dc_precision); else block[0] = s->last_dc[cc] * (8 >> s->intra_dc_precision) * (8 >> s->dct_precision); /* TODO: support mpeg_quant for AC coefficients */ block[0] = av_clip(block[0], min, max); mismatch ^= block[0]; /* AC Coefficients */ while (1) { group = get_vlc2(&s->gb, cur_vlc->table, STUDIO_INTRA_BITS, 2); if (group < 0) { av_log(s->avctx, AV_LOG_ERROR, "illegal ac coefficient group vlc\n"); return AVERROR_INVALIDDATA; } additional_code_len = ac_state_tab[group][0]; cur_vlc = &ctx->studio_intra_tab[ac_state_tab[group][1]]; if (group == 0) { /* End of Block */ break; } else if (group >= 1 && group <= 6) { /* Zero run length (Table B.47) */ run = 1 << additional_code_len; if (additional_code_len) run += get_bits(&s->gb, additional_code_len); idx += run; continue; } else if (group >= 7 && group <= 12) { /* Zero run length and +/-1 level (Table B.48) */ code = get_bits(&s->gb, additional_code_len); sign = code & 1; code >>= 1; run = (1 << (additional_code_len - 1)) + code; idx += run; j = scantable[idx++]; block[j] = sign ? 1 : -1; } else if (group >= 13 && group <= 20) { /* Level value (Table B.49) */ j = scantable[idx++]; block[j] = get_xbits(&s->gb, additional_code_len); } else if (group == 21) { /* Escape */ j = scantable[idx++]; additional_code_len = s->avctx->bits_per_raw_sample + s->dct_precision + 4; flc = get_bits(&s->gb, additional_code_len); if (flc >> (additional_code_len-1)) block[j] = -1 * (( flc ^ ((1 << additional_code_len) -1)) + 1); else block[j] = flc; } block[j] = ((8 * 2 * block[j] * quant_matrix[j] * s->qscale) >> s->dct_precision) / 32; block[j] = av_clip(block[j], min, max); mismatch ^= block[j]; } block[63] ^= mismatch & 1; return 0; } static int mpeg4_decode_studio_mb(MpegEncContext *s, int16_t block_[12][64]) { int i; /* StudioMacroblock */ /* Assumes I-VOP */ s->mb_intra = 1; if (get_bits1(&s->gb)) { /* compression_mode */ /* DCT */ /* macroblock_type, 1 or 2-bit VLC */ if (!get_bits1(&s->gb)) { skip_bits1(&s->gb); s->qscale = mpeg_get_qscale(s); } for (i = 0; i < mpeg4_block_count[s->chroma_format]; i++) { if (mpeg4_decode_studio_block(s, (*s->block32)[i], i) < 0) return AVERROR_INVALIDDATA; } } else { /* DPCM */ check_marker(s->avctx, &s->gb, "DPCM block start"); avpriv_request_sample(s->avctx, "DPCM encoded block"); next_start_code_studio(&s->gb); return SLICE_ERROR; } if (get_bits_left(&s->gb) >= 24 && show_bits(&s->gb, 23) == 0) { next_start_code_studio(&s->gb); return SLICE_END; } return SLICE_OK; } static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb) { int hours, minutes, seconds; if (!show_bits(gb, 23)) { av_log(s->avctx, AV_LOG_WARNING, "GOP header invalid\n"); return AVERROR_INVALIDDATA; } hours = get_bits(gb, 5); minutes = get_bits(gb, 6); check_marker(s->avctx, gb, "in gop_header"); seconds = get_bits(gb, 6); s->time_base = seconds + 60*(minutes + 60*hours); skip_bits1(gb); skip_bits1(gb); return 0; } static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb, int *profile, int *level) { *profile = get_bits(gb, 4); *level = get_bits(gb, 4); // for Simple profile, level 0 if (*profile == 0 && *level == 8) { *level = 0; } return 0; } static int mpeg4_decode_visual_object(MpegEncContext *s, GetBitContext *gb) { int visual_object_type; int is_visual_object_identifier = get_bits1(gb); if (is_visual_object_identifier) { skip_bits(gb, 4+3); } visual_object_type = get_bits(gb, 4); if (visual_object_type == VOT_VIDEO_ID || visual_object_type == VOT_STILL_TEXTURE_ID) { int video_signal_type = get_bits1(gb); if (video_signal_type) { int video_range, color_description; skip_bits(gb, 3); // video_format video_range = get_bits1(gb); color_description = get_bits1(gb); s->avctx->color_range = video_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (color_description) { s->avctx->color_primaries = get_bits(gb, 8); s->avctx->color_trc = get_bits(gb, 8); s->avctx->colorspace = get_bits(gb, 8); } } } return 0; } static void mpeg4_load_default_matrices(MpegEncContext *s) { int i, v; /* load default matrices */ for (i = 0; i < 64; i++) { int j = s->idsp.idct_permutation[i]; v = ff_mpeg4_default_intra_matrix[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; v = ff_mpeg4_default_non_intra_matrix[i]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } } static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int width, height, vo_ver_id; /* vol header */ skip_bits(gb, 1); /* random access */ s->vo_type = get_bits(gb, 8); /* If we are in studio profile (per vo_type), check if its all consistent * and if so continue pass control to decode_studio_vol_header(). * elIf something is inconsistent, error out * else continue with (non studio) vol header decpoding. */ if (s->vo_type == CORE_STUDIO_VO_TYPE || s->vo_type == SIMPLE_STUDIO_VO_TYPE) { if (s->avctx->profile != FF_PROFILE_UNKNOWN && s->avctx->profile != FF_PROFILE_MPEG4_SIMPLE_STUDIO) return AVERROR_INVALIDDATA; s->studio_profile = 1; s->avctx->profile = FF_PROFILE_MPEG4_SIMPLE_STUDIO; return decode_studio_vol_header(ctx, gb); } else if (s->studio_profile) { return AVERROR_PATCHWELCOME; } if (get_bits1(gb) != 0) { /* is_ol_id */ vo_ver_id = get_bits(gb, 4); /* vo_ver_id */ skip_bits(gb, 3); /* vo_priority */ } else { vo_ver_id = 1; } s->aspect_ratio_info = get_bits(gb, 4); if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height } else { s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info]; } if ((ctx->vol_control_parameters = get_bits1(gb))) { /* vol control parameter */ int chroma_format = get_bits(gb, 2); if (chroma_format != CHROMA_420) av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n"); s->low_delay = get_bits1(gb); if (get_bits1(gb)) { /* vbv parameters */ get_bits(gb, 15); /* first_half_bitrate */ check_marker(s->avctx, gb, "after first_half_bitrate"); get_bits(gb, 15); /* latter_half_bitrate */ check_marker(s->avctx, gb, "after latter_half_bitrate"); get_bits(gb, 15); /* first_half_vbv_buffer_size */ check_marker(s->avctx, gb, "after first_half_vbv_buffer_size"); get_bits(gb, 3); /* latter_half_vbv_buffer_size */ get_bits(gb, 11); /* first_half_vbv_occupancy */ check_marker(s->avctx, gb, "after first_half_vbv_occupancy"); get_bits(gb, 15); /* latter_half_vbv_occupancy */ check_marker(s->avctx, gb, "after latter_half_vbv_occupancy"); } } else { /* is setting low delay flag only once the smartest thing to do? * low delay detection will not be overridden. */ if (s->picture_number == 0) { switch(s->vo_type) { case SIMPLE_VO_TYPE: case ADV_SIMPLE_VO_TYPE: s->low_delay = 1; break; default: s->low_delay = 0; } } } ctx->shape = get_bits(gb, 2); /* vol shape */ if (ctx->shape != RECT_SHAPE) av_log(s->avctx, AV_LOG_ERROR, "only rectangular vol supported\n"); if (ctx->shape == GRAY_SHAPE && vo_ver_id != 1) { av_log(s->avctx, AV_LOG_ERROR, "Gray shape not supported\n"); skip_bits(gb, 4); /* video_object_layer_shape_extension */ } check_marker(s->avctx, gb, "before time_increment_resolution"); s->avctx->framerate.num = get_bits(gb, 16); if (!s->avctx->framerate.num) { av_log(s->avctx, AV_LOG_ERROR, "framerate==0\n"); return AVERROR_INVALIDDATA; } ctx->time_increment_bits = av_log2(s->avctx->framerate.num - 1) + 1; if (ctx->time_increment_bits < 1) ctx->time_increment_bits = 1; check_marker(s->avctx, gb, "before fixed_vop_rate"); if (get_bits1(gb) != 0) /* fixed_vop_rate */ s->avctx->framerate.den = get_bits(gb, ctx->time_increment_bits); else s->avctx->framerate.den = 1; s->avctx->time_base = av_inv_q(av_mul_q(s->avctx->framerate, (AVRational){s->avctx->ticks_per_frame, 1})); ctx->t_frame = 0; if (ctx->shape != BIN_ONLY_SHAPE) { if (ctx->shape == RECT_SHAPE) { check_marker(s->avctx, gb, "before width"); width = get_bits(gb, 13); check_marker(s->avctx, gb, "before height"); height = get_bits(gb, 13); check_marker(s->avctx, gb, "after height"); if (width && height && /* they should be non zero but who knows */ !(s->width && s->codec_tag == AV_RL32("MP4S"))) { if (s->width && s->height && (s->width != width || s->height != height)) s->context_reinit = 1; s->width = width; s->height = height; } } s->progressive_sequence = s->progressive_frame = get_bits1(gb) ^ 1; s->interlaced_dct = 0; if (!get_bits1(gb) && (s->avctx->debug & FF_DEBUG_PICT_INFO)) av_log(s->avctx, AV_LOG_INFO, /* OBMC Disable */ "MPEG-4 OBMC not supported (very likely buggy encoder)\n"); if (vo_ver_id == 1) ctx->vol_sprite_usage = get_bits1(gb); /* vol_sprite_usage */ else ctx->vol_sprite_usage = get_bits(gb, 2); /* vol_sprite_usage */ if (ctx->vol_sprite_usage == STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "Static Sprites not supported\n"); if (ctx->vol_sprite_usage == STATIC_SPRITE || ctx->vol_sprite_usage == GMC_SPRITE) { if (ctx->vol_sprite_usage == STATIC_SPRITE) { skip_bits(gb, 13); // sprite_width check_marker(s->avctx, gb, "after sprite_width"); skip_bits(gb, 13); // sprite_height check_marker(s->avctx, gb, "after sprite_height"); skip_bits(gb, 13); // sprite_left check_marker(s->avctx, gb, "after sprite_left"); skip_bits(gb, 13); // sprite_top check_marker(s->avctx, gb, "after sprite_top"); } ctx->num_sprite_warping_points = get_bits(gb, 6); if (ctx->num_sprite_warping_points > 3) { av_log(s->avctx, AV_LOG_ERROR, "%d sprite_warping_points\n", ctx->num_sprite_warping_points); ctx->num_sprite_warping_points = 0; return AVERROR_INVALIDDATA; } s->sprite_warping_accuracy = get_bits(gb, 2); ctx->sprite_brightness_change = get_bits1(gb); if (ctx->vol_sprite_usage == STATIC_SPRITE) skip_bits1(gb); // low_latency_sprite } // FIXME sadct disable bit if verid!=1 && shape not rect if (get_bits1(gb) == 1) { /* not_8_bit */ s->quant_precision = get_bits(gb, 4); /* quant_precision */ if (get_bits(gb, 4) != 8) /* bits_per_pixel */ av_log(s->avctx, AV_LOG_ERROR, "N-bit not supported\n"); if (s->quant_precision != 5) av_log(s->avctx, AV_LOG_ERROR, "quant precision %d\n", s->quant_precision); if (s->quant_precision<3 || s->quant_precision>9) { s->quant_precision = 5; } } else { s->quant_precision = 5; } // FIXME a bunch of grayscale shape things if ((s->mpeg_quant = get_bits1(gb))) { /* vol_quant_type */ int i, v; mpeg4_load_default_matrices(s); /* load custom intra matrix */ if (get_bits1(gb)) { int last = 0; for (i = 0; i < 64; i++) { int j; if (get_bits_left(gb) < 8) { av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n"); return AVERROR_INVALIDDATA; } v = get_bits(gb, 8); if (v == 0) break; last = v; j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = last; s->chroma_intra_matrix[j] = last; } /* replicate last value */ for (; i < 64; i++) { int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = last; s->chroma_intra_matrix[j] = last; } } /* load custom non intra matrix */ if (get_bits1(gb)) { int last = 0; for (i = 0; i < 64; i++) { int j; if (get_bits_left(gb) < 8) { av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n"); return AVERROR_INVALIDDATA; } v = get_bits(gb, 8); if (v == 0) break; last = v; j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } /* replicate last value */ for (; i < 64; i++) { int j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->inter_matrix[j] = last; s->chroma_inter_matrix[j] = last; } } // FIXME a bunch of grayscale shape things } if (vo_ver_id != 1) s->quarter_sample = get_bits1(gb); else s->quarter_sample = 0; if (get_bits_left(gb) < 4) { av_log(s->avctx, AV_LOG_ERROR, "VOL Header truncated\n"); return AVERROR_INVALIDDATA; } if (!get_bits1(gb)) { int pos = get_bits_count(gb); int estimation_method = get_bits(gb, 2); if (estimation_method < 2) { if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* opaque */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* transparent */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_cae */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* inter_cae */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* no_update */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* upsampling */ } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* intra_blocks */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter_blocks */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* inter4v_blocks */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* not coded blocks */ } if (!check_marker(s->avctx, gb, "in complexity estimation part 1")) { skip_bits_long(gb, pos - get_bits_count(gb)); goto no_cplx_est; } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_coeffs */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* dct_lines */ ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* vlc_syms */ ctx->cplx_estimation_trash_i += 4 * get_bits1(gb); /* vlc_bits */ } if (!get_bits1(gb)) { ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* apm */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* npm */ ctx->cplx_estimation_trash_b += 8 * get_bits1(gb); /* interpolate_mc_q */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* forwback_mc_q */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel2 */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* halfpel4 */ } if (!check_marker(s->avctx, gb, "in complexity estimation part 2")) { skip_bits_long(gb, pos - get_bits_count(gb)); goto no_cplx_est; } if (estimation_method == 1) { ctx->cplx_estimation_trash_i += 8 * get_bits1(gb); /* sadct */ ctx->cplx_estimation_trash_p += 8 * get_bits1(gb); /* qpel */ } } else av_log(s->avctx, AV_LOG_ERROR, "Invalid Complexity estimation method %d\n", estimation_method); } else { no_cplx_est: ctx->cplx_estimation_trash_i = ctx->cplx_estimation_trash_p = ctx->cplx_estimation_trash_b = 0; } ctx->resync_marker = !get_bits1(gb); /* resync_marker_disabled */ s->data_partitioning = get_bits1(gb); if (s->data_partitioning) ctx->rvlc = get_bits1(gb); if (vo_ver_id != 1) { ctx->new_pred = get_bits1(gb); if (ctx->new_pred) { av_log(s->avctx, AV_LOG_ERROR, "new pred not supported\n"); skip_bits(gb, 2); /* requested upstream message type */ skip_bits1(gb); /* newpred segment type */ } if (get_bits1(gb)) // reduced_res_vop av_log(s->avctx, AV_LOG_ERROR, "reduced resolution VOP not supported\n"); } else { ctx->new_pred = 0; } ctx->scalability = get_bits1(gb); if (ctx->scalability) { GetBitContext bak = *gb; int h_sampling_factor_n; int h_sampling_factor_m; int v_sampling_factor_n; int v_sampling_factor_m; skip_bits1(gb); // hierarchy_type skip_bits(gb, 4); /* ref_layer_id */ skip_bits1(gb); /* ref_layer_sampling_dir */ h_sampling_factor_n = get_bits(gb, 5); h_sampling_factor_m = get_bits(gb, 5); v_sampling_factor_n = get_bits(gb, 5); v_sampling_factor_m = get_bits(gb, 5); ctx->enhancement_type = get_bits1(gb); if (h_sampling_factor_n == 0 || h_sampling_factor_m == 0 || v_sampling_factor_n == 0 || v_sampling_factor_m == 0) { /* illegal scalability header (VERY broken encoder), * trying to workaround */ ctx->scalability = 0; *gb = bak; } else av_log(s->avctx, AV_LOG_ERROR, "scalability not supported\n"); // bin shape stuff FIXME } } if (s->avctx->debug&FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, "tb %d/%d, tincrbits:%d, qp_prec:%d, ps:%d, low_delay:%d %s%s%s%s\n", s->avctx->framerate.den, s->avctx->framerate.num, ctx->time_increment_bits, s->quant_precision, s->progressive_sequence, s->low_delay, ctx->scalability ? "scalability " :"" , s->quarter_sample ? "qpel " : "", s->data_partitioning ? "partition " : "", ctx->rvlc ? "rvlc " : "" ); } return 0; } /** * Decode the user data stuff in the header. * Also initializes divx/xvid/lavc_version/build. */ static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; char buf[256]; int i; int e; int ver = 0, build = 0, ver2 = 0, ver3 = 0; char last; for (i = 0; i < 255 && get_bits_count(gb) < gb->size_in_bits; i++) { if (show_bits(gb, 23) == 0) break; buf[i] = get_bits(gb, 8); } buf[i] = 0; /* divx detection */ e = sscanf(buf, "DivX%dBuild%d%c", &ver, &build, &last); if (e < 2) e = sscanf(buf, "DivX%db%d%c", &ver, &build, &last); if (e >= 2) { ctx->divx_version = ver; ctx->divx_build = build; s->divx_packed = e == 3 && last == 'p'; } /* libavcodec detection */ e = sscanf(buf, "FFmpe%*[^b]b%d", &build) + 3; if (e != 4) e = sscanf(buf, "FFmpeg v%d.%d.%d / libavcodec build: %d", &ver, &ver2, &ver3, &build); if (e != 4) { e = sscanf(buf, "Lavc%d.%d.%d", &ver, &ver2, &ver3) + 1; if (e > 1) { if (ver > 0xFFU || ver2 > 0xFFU || ver3 > 0xFFU) { av_log(s->avctx, AV_LOG_WARNING, "Unknown Lavc version string encountered, %d.%d.%d; " "clamping sub-version values to 8-bits.\n", ver, ver2, ver3); } build = ((ver & 0xFF) << 16) + ((ver2 & 0xFF) << 8) + (ver3 & 0xFF); } } if (e != 4) { if (strcmp(buf, "ffmpeg") == 0) ctx->lavc_build = 4600; } if (e == 4) ctx->lavc_build = build; /* Xvid detection */ e = sscanf(buf, "XviD%d", &build); if (e == 1) ctx->xvid_build = build; return 0; } int ff_mpeg4_workaround_bugs(AVCodecContext *avctx) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext *s = &ctx->m; if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) { if (s->codec_tag == AV_RL32("XVID") || s->codec_tag == AV_RL32("XVIX") || s->codec_tag == AV_RL32("RMP4") || s->codec_tag == AV_RL32("ZMP4") || s->codec_tag == AV_RL32("SIPP")) ctx->xvid_build = 0; } if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) if (s->codec_tag == AV_RL32("DIVX") && s->vo_type == 0 && ctx->vol_control_parameters == 0) ctx->divx_version = 400; // divx 4 if (ctx->xvid_build >= 0 && ctx->divx_version >= 0) { ctx->divx_version = ctx->divx_build = -1; } if (s->workaround_bugs & FF_BUG_AUTODETECT) { if (s->codec_tag == AV_RL32("XVIX")) s->workaround_bugs |= FF_BUG_XVID_ILACE; if (s->codec_tag == AV_RL32("UMP4")) s->workaround_bugs |= FF_BUG_UMP4; if (ctx->divx_version >= 500 && ctx->divx_build < 1814) s->workaround_bugs |= FF_BUG_QPEL_CHROMA; if (ctx->divx_version > 502 && ctx->divx_build < 1814) s->workaround_bugs |= FF_BUG_QPEL_CHROMA2; if (ctx->xvid_build <= 3U) s->padding_bug_score = 256 * 256 * 256 * 64; if (ctx->xvid_build <= 1U) s->workaround_bugs |= FF_BUG_QPEL_CHROMA; if (ctx->xvid_build <= 12U) s->workaround_bugs |= FF_BUG_EDGE; if (ctx->xvid_build <= 32U) s->workaround_bugs |= FF_BUG_DC_CLIP; #define SET_QPEL_FUNC(postfix1, postfix2) \ s->qdsp.put_ ## postfix1 = ff_put_ ## postfix2; \ s->qdsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2; \ s->qdsp.avg_ ## postfix1 = ff_avg_ ## postfix2; if (ctx->lavc_build < 4653U) s->workaround_bugs |= FF_BUG_STD_QPEL; if (ctx->lavc_build < 4655U) s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE; if (ctx->lavc_build < 4670U) s->workaround_bugs |= FF_BUG_EDGE; if (ctx->lavc_build <= 4712U) s->workaround_bugs |= FF_BUG_DC_CLIP; if ((ctx->lavc_build&0xFF) >= 100) { if (ctx->lavc_build > 3621476 && ctx->lavc_build < 3752552 && (ctx->lavc_build < 3752037 || ctx->lavc_build > 3752191) // 3.2.1+ ) s->workaround_bugs |= FF_BUG_IEDGE; } if (ctx->divx_version >= 0) s->workaround_bugs |= FF_BUG_DIRECT_BLOCKSIZE; if (ctx->divx_version == 501 && ctx->divx_build == 20020416) s->padding_bug_score = 256 * 256 * 256 * 64; if (ctx->divx_version < 500U) s->workaround_bugs |= FF_BUG_EDGE; if (ctx->divx_version >= 0) s->workaround_bugs |= FF_BUG_HPEL_CHROMA; } if (s->workaround_bugs & FF_BUG_STD_QPEL) { SET_QPEL_FUNC(qpel_pixels_tab[0][5], qpel16_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][7], qpel16_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][9], qpel16_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][5], qpel8_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][7], qpel8_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][9], qpel8_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c) } if (avctx->debug & FF_DEBUG_BUGS) av_log(s->avctx, AV_LOG_DEBUG, "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n", s->workaround_bugs, ctx->lavc_build, ctx->xvid_build, ctx->divx_version, ctx->divx_build, s->divx_packed ? "p" : ""); if (CONFIG_MPEG4_DECODER && ctx->xvid_build >= 0 && s->codec_id == AV_CODEC_ID_MPEG4 && avctx->idct_algo == FF_IDCT_AUTO) { avctx->idct_algo = FF_IDCT_XVID; ff_mpv_idct_init(s); return 1; } return 0; } static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int time_incr, time_increment; int64_t pts; s->mcsel = 0; s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* pict type: I = 0 , P = 1 */ if (s->pict_type == AV_PICTURE_TYPE_B && s->low_delay && ctx->vol_control_parameters == 0 && !(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)) { av_log(s->avctx, AV_LOG_ERROR, "low_delay flag set incorrectly, clearing it\n"); s->low_delay = 0; } s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B; if (s->partitioned_frame) s->decode_mb = mpeg4_decode_partitioned_mb; else s->decode_mb = mpeg4_decode_mb; time_incr = 0; while (get_bits1(gb) != 0) time_incr++; check_marker(s->avctx, gb, "before time_increment"); if (ctx->time_increment_bits == 0 || !(show_bits(gb, ctx->time_increment_bits + 1) & 1)) { av_log(s->avctx, AV_LOG_WARNING, "time_increment_bits %d is invalid in relation to the current bitstream, this is likely caused by a missing VOL header\n", ctx->time_increment_bits); for (ctx->time_increment_bits = 1; ctx->time_increment_bits < 16; ctx->time_increment_bits++) { if (s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE)) { if ((show_bits(gb, ctx->time_increment_bits + 6) & 0x37) == 0x30) break; } else if ((show_bits(gb, ctx->time_increment_bits + 5) & 0x1F) == 0x18) break; } av_log(s->avctx, AV_LOG_WARNING, "time_increment_bits set to %d bits, based on bitstream analysis\n", ctx->time_increment_bits); if (s->avctx->framerate.num && 4*s->avctx->framerate.num < 1<<ctx->time_increment_bits) { s->avctx->framerate.num = 1<<ctx->time_increment_bits; s->avctx->time_base = av_inv_q(av_mul_q(s->avctx->framerate, (AVRational){s->avctx->ticks_per_frame, 1})); } } if (IS_3IV1) time_increment = get_bits1(gb); // FIXME investigate further else time_increment = get_bits(gb, ctx->time_increment_bits); if (s->pict_type != AV_PICTURE_TYPE_B) { s->last_time_base = s->time_base; s->time_base += time_incr; s->time = s->time_base * (int64_t)s->avctx->framerate.num + time_increment; if (s->workaround_bugs & FF_BUG_UMP4) { if (s->time < s->last_non_b_time) { /* header is not mpeg-4-compatible, broken encoder, * trying to workaround */ s->time_base++; s->time += s->avctx->framerate.num; } } s->pp_time = s->time - s->last_non_b_time; s->last_non_b_time = s->time; } else { s->time = (s->last_time_base + time_incr) * (int64_t)s->avctx->framerate.num + time_increment; s->pb_time = s->pp_time - (s->last_non_b_time - s->time); if (s->pp_time <= s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time <= 0) { /* messed up order, maybe after seeking? skipping current B-frame */ return FRAME_SKIPPED; } ff_mpeg4_init_direct_mv(s); if (ctx->t_frame == 0) ctx->t_frame = s->pb_time; if (ctx->t_frame == 0) ctx->t_frame = 1; // 1/0 protection s->pp_field_time = (ROUNDED_DIV(s->last_non_b_time, ctx->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; s->pb_field_time = (ROUNDED_DIV(s->time, ctx->t_frame) - ROUNDED_DIV(s->last_non_b_time - s->pp_time, ctx->t_frame)) * 2; if (s->pp_field_time <= s->pb_field_time || s->pb_field_time <= 1) { s->pb_field_time = 2; s->pp_field_time = 4; if (!s->progressive_sequence) return FRAME_SKIPPED; } } if (s->avctx->framerate.den) pts = ROUNDED_DIV(s->time, s->avctx->framerate.den); else pts = AV_NOPTS_VALUE; ff_dlog(s->avctx, "MPEG4 PTS: %"PRId64"\n", pts); check_marker(s->avctx, gb, "before vop_coded"); /* vop coded */ if (get_bits1(gb) != 1) { if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n"); return FRAME_SKIPPED; } if (ctx->new_pred) decode_new_pred(ctx, gb); if (ctx->shape != BIN_ONLY_SHAPE && (s->pict_type == AV_PICTURE_TYPE_P || (s->pict_type == AV_PICTURE_TYPE_S && ctx->vol_sprite_usage == GMC_SPRITE))) { /* rounding type for motion estimation */ s->no_rounding = get_bits1(gb); } else { s->no_rounding = 0; } // FIXME reduced res stuff if (ctx->shape != RECT_SHAPE) { if (ctx->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) { skip_bits(gb, 13); /* width */ check_marker(s->avctx, gb, "after width"); skip_bits(gb, 13); /* height */ check_marker(s->avctx, gb, "after height"); skip_bits(gb, 13); /* hor_spat_ref */ check_marker(s->avctx, gb, "after hor_spat_ref"); skip_bits(gb, 13); /* ver_spat_ref */ } skip_bits1(gb); /* change_CR_disable */ if (get_bits1(gb) != 0) skip_bits(gb, 8); /* constant_alpha_value */ } // FIXME complexity estimation stuff if (ctx->shape != BIN_ONLY_SHAPE) { skip_bits_long(gb, ctx->cplx_estimation_trash_i); if (s->pict_type != AV_PICTURE_TYPE_I) skip_bits_long(gb, ctx->cplx_estimation_trash_p); if (s->pict_type == AV_PICTURE_TYPE_B) skip_bits_long(gb, ctx->cplx_estimation_trash_b); if (get_bits_left(gb) < 3) { av_log(s->avctx, AV_LOG_ERROR, "Header truncated\n"); return AVERROR_INVALIDDATA; } ctx->intra_dc_threshold = ff_mpeg4_dc_threshold[get_bits(gb, 3)]; if (!s->progressive_sequence) { s->top_field_first = get_bits1(gb); s->alternate_scan = get_bits1(gb); } else s->alternate_scan = 0; } if (s->alternate_scan) { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } else { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } if (s->pict_type == AV_PICTURE_TYPE_S) { if((ctx->vol_sprite_usage == STATIC_SPRITE || ctx->vol_sprite_usage == GMC_SPRITE)) { if (mpeg4_decode_sprite_trajectory(ctx, gb) < 0) return AVERROR_INVALIDDATA; if (ctx->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n"); if (ctx->vol_sprite_usage == STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n"); } else { memset(s->sprite_offset, 0, sizeof(s->sprite_offset)); memset(s->sprite_delta, 0, sizeof(s->sprite_delta)); } } if (ctx->shape != BIN_ONLY_SHAPE) { s->chroma_qscale = s->qscale = get_bits(gb, s->quant_precision); if (s->qscale == 0) { av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG-4 header (qscale=0)\n"); return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then } if (s->pict_type != AV_PICTURE_TYPE_I) { s->f_code = get_bits(gb, 3); /* fcode_for */ if (s->f_code == 0) { av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG-4 header (f_code=0)\n"); s->f_code = 1; return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then } } else s->f_code = 1; if (s->pict_type == AV_PICTURE_TYPE_B) { s->b_code = get_bits(gb, 3); if (s->b_code == 0) { av_log(s->avctx, AV_LOG_ERROR, "Error, header damaged or not MPEG4 header (b_code=0)\n"); s->b_code=1; return AVERROR_INVALIDDATA; // makes no sense to continue, as the MV decoding will break very quickly } } else s->b_code = 1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%d,%d %s size:%d pro:%d alt:%d top:%d %spel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64" tincr:%d\n", s->qscale, s->f_code, s->b_code, s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")), gb->size_in_bits,s->progressive_sequence, s->alternate_scan, s->top_field_first, s->quarter_sample ? "q" : "h", s->data_partitioning, ctx->resync_marker, ctx->num_sprite_warping_points, s->sprite_warping_accuracy, 1 - s->no_rounding, s->vo_type, ctx->vol_control_parameters ? " VOLC" : " ", ctx->intra_dc_threshold, ctx->cplx_estimation_trash_i, ctx->cplx_estimation_trash_p, ctx->cplx_estimation_trash_b, s->time, time_increment ); } if (!ctx->scalability) { if (ctx->shape != RECT_SHAPE && s->pict_type != AV_PICTURE_TYPE_I) skip_bits1(gb); // vop shape coding type } else { if (ctx->enhancement_type) { int load_backward_shape = get_bits1(gb); if (load_backward_shape) av_log(s->avctx, AV_LOG_ERROR, "load backward shape isn't supported\n"); } skip_bits(gb, 2); // ref_select_code } } /* detect buggy encoders which don't set the low_delay flag * (divx4/xvid/opendivx). Note we cannot detect divx5 without B-frames * easily (although it's buggy too) */ if (s->vo_type == 0 && ctx->vol_control_parameters == 0 && ctx->divx_version == -1 && s->picture_number == 0) { av_log(s->avctx, AV_LOG_WARNING, "looks like this file was encoded with (divx4/(old)xvid/opendivx) -> forcing low_delay flag\n"); s->low_delay = 1; } s->picture_number++; // better than pic number==0 always ;) // FIXME add short header support s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table; s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table; if (s->workaround_bugs & FF_BUG_EDGE) { s->h_edge_pos = s->width; s->v_edge_pos = s->height; } return 0; } static void read_quant_matrix_ext(MpegEncContext *s, GetBitContext *gb) { int i, j, v; if (get_bits1(gb)) { /* intra_quantiser_matrix */ for (i = 0; i < 64; i++) { v = get_bits(gb, 8); j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } } if (get_bits1(gb)) { /* non_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { get_bits(gb, 8); } } if (get_bits1(gb)) { /* chroma_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { v = get_bits(gb, 8); j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->chroma_intra_matrix[j] = v; } } if (get_bits1(gb)) { /* chroma_non_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { get_bits(gb, 8); } } next_start_code_studio(gb); } static void extension_and_user_data(MpegEncContext *s, GetBitContext *gb, int id) { uint32_t startcode; uint8_t extension_type; startcode = show_bits_long(gb, 32); if (startcode == USER_DATA_STARTCODE || startcode == EXT_STARTCODE) { if ((id == 2 || id == 4) && startcode == EXT_STARTCODE) { skip_bits_long(gb, 32); extension_type = get_bits(gb, 4); if (extension_type == QUANT_MATRIX_EXT_ID) read_quant_matrix_ext(s, gb); } } } static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; skip_bits(gb, 16); /* Time_code[63..48] */ check_marker(s->avctx, gb, "after Time_code[63..48]"); skip_bits(gb, 16); /* Time_code[47..32] */ check_marker(s->avctx, gb, "after Time_code[47..32]"); skip_bits(gb, 16); /* Time_code[31..16] */ check_marker(s->avctx, gb, "after Time_code[31..16]"); skip_bits(gb, 16); /* Time_code[15..0] */ check_marker(s->avctx, gb, "after Time_code[15..0]"); skip_bits(gb, 4); /* reserved_bits */ } /** * Decode the next studio vop header. * @return <0 if something went wrong */ static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; if (get_bits_left(gb) <= 32) return 0; s->decode_mb = mpeg4_decode_studio_mb; decode_smpte_tc(ctx, gb); skip_bits(gb, 10); /* temporal_reference */ skip_bits(gb, 2); /* vop_structure */ s->pict_type = get_bits(gb, 2) + AV_PICTURE_TYPE_I; /* vop_coding_type */ if (get_bits1(gb)) { /* vop_coded */ skip_bits1(gb); /* top_field_first */ skip_bits1(gb); /* repeat_first_field */ s->progressive_frame = get_bits1(gb) ^ 1; /* progressive_frame */ } if (s->pict_type == AV_PICTURE_TYPE_I) { if (get_bits1(gb)) reset_studio_dc_predictors(s); } if (ctx->shape != BIN_ONLY_SHAPE) { s->alternate_scan = get_bits1(gb); s->frame_pred_frame_dct = get_bits1(gb); s->dct_precision = get_bits(gb, 2); s->intra_dc_precision = get_bits(gb, 2); s->q_scale_type = get_bits1(gb); } if (s->alternate_scan) { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_vertical_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } else { ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } mpeg4_load_default_matrices(s); next_start_code_studio(gb); extension_and_user_data(s, gb, 4); return 0; } static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int visual_object_type; skip_bits(gb, 4); /* visual_object_verid */ visual_object_type = get_bits(gb, 4); if (visual_object_type != VOT_VIDEO_ID) { avpriv_request_sample(s->avctx, "VO type %u", visual_object_type); return AVERROR_PATCHWELCOME; } next_start_code_studio(gb); extension_and_user_data(s, gb, 1); return 0; } static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; int width, height; int bits_per_raw_sample; // random_accessible_vol and video_object_type_indication have already // been read by the caller decode_vol_header() skip_bits(gb, 4); /* video_object_layer_verid */ ctx->shape = get_bits(gb, 2); /* video_object_layer_shape */ skip_bits(gb, 4); /* video_object_layer_shape_extension */ skip_bits1(gb); /* progressive_sequence */ if (ctx->shape != BIN_ONLY_SHAPE) { ctx->rgb = get_bits1(gb); /* rgb_components */ s->chroma_format = get_bits(gb, 2); /* chroma_format */ if (!s->chroma_format) { av_log(s->avctx, AV_LOG_ERROR, "illegal chroma format\n"); return AVERROR_INVALIDDATA; } bits_per_raw_sample = get_bits(gb, 4); /* bit_depth */ if (bits_per_raw_sample == 10) { if (ctx->rgb) { s->avctx->pix_fmt = AV_PIX_FMT_GBRP10; } else { s->avctx->pix_fmt = s->chroma_format == CHROMA_422 ? AV_PIX_FMT_YUV422P10 : AV_PIX_FMT_YUV444P10; } } else { avpriv_request_sample(s->avctx, "MPEG-4 Studio profile bit-depth %u", bits_per_raw_sample); return AVERROR_PATCHWELCOME; } s->avctx->bits_per_raw_sample = bits_per_raw_sample; } if (ctx->shape == RECT_SHAPE) { check_marker(s->avctx, gb, "before video_object_layer_width"); width = get_bits(gb, 14); /* video_object_layer_width */ check_marker(s->avctx, gb, "before video_object_layer_height"); height = get_bits(gb, 14); /* video_object_layer_height */ check_marker(s->avctx, gb, "after video_object_layer_height"); /* Do the same check as non-studio profile */ if (width && height) { if (s->width && s->height && (s->width != width || s->height != height)) s->context_reinit = 1; s->width = width; s->height = height; } } s->aspect_ratio_info = get_bits(gb, 4); if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { s->avctx->sample_aspect_ratio.num = get_bits(gb, 8); // par_width s->avctx->sample_aspect_ratio.den = get_bits(gb, 8); // par_height } else { s->avctx->sample_aspect_ratio = ff_h263_pixel_aspect[s->aspect_ratio_info]; } skip_bits(gb, 4); /* frame_rate_code */ skip_bits(gb, 15); /* first_half_bit_rate */ check_marker(s->avctx, gb, "after first_half_bit_rate"); skip_bits(gb, 15); /* latter_half_bit_rate */ check_marker(s->avctx, gb, "after latter_half_bit_rate"); skip_bits(gb, 15); /* first_half_vbv_buffer_size */ check_marker(s->avctx, gb, "after first_half_vbv_buffer_size"); skip_bits(gb, 3); /* latter_half_vbv_buffer_size */ skip_bits(gb, 11); /* first_half_vbv_buffer_size */ check_marker(s->avctx, gb, "after first_half_vbv_buffer_size"); skip_bits(gb, 15); /* latter_half_vbv_occupancy */ check_marker(s->avctx, gb, "after latter_half_vbv_occupancy"); s->low_delay = get_bits1(gb); s->mpeg_quant = get_bits1(gb); /* mpeg2_stream */ next_start_code_studio(gb); extension_and_user_data(s, gb, 2); return 0; } /** * Decode MPEG-4 headers. * @return <0 if no VOP found (or a damaged one) * FRAME_SKIPPED if a not coded VOP is found * 0 if a VOP is found */ int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; unsigned startcode, v; int ret; int vol = 0; /* search next start code */ align_get_bits(gb); // If we have not switched to studio profile than we also did not switch bps // that means something else (like a previous instance) outside set bps which // would be inconsistant with the currect state, thus reset it if (!s->studio_profile && s->avctx->bits_per_raw_sample != 8) s->avctx->bits_per_raw_sample = 0; if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) { skip_bits(gb, 24); if (get_bits(gb, 8) == 0xF0) goto end; } startcode = 0xff; for (;;) { if (get_bits_count(gb) >= gb->size_in_bits) { if (gb->size_in_bits == 8 && (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) { av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits); return FRAME_SKIPPED; // divx bug } else return AVERROR_INVALIDDATA; // end of stream } /* use the bits after the test */ v = get_bits(gb, 8); startcode = ((startcode << 8) | v) & 0xffffffff; if ((startcode & 0xFFFFFF00) != 0x100) continue; // no startcode if (s->avctx->debug & FF_DEBUG_STARTCODE) { av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode); if (startcode <= 0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start"); else if (startcode <= 0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start"); else if (startcode <= 0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode <= 0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start"); else if (startcode <= 0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode == 0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start"); else if (startcode == 0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End"); else if (startcode == 0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data"); else if (startcode == 0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start"); else if (startcode == 0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error"); else if (startcode == 0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start"); else if (startcode == 0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start"); else if (startcode == 0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start"); else if (startcode == 0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start"); else if (startcode == 0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start"); else if (startcode == 0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start"); else if (startcode == 0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); else if (startcode == 0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); else if (startcode == 0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); else if (startcode == 0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start"); else if (startcode == 0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start"); else if (startcode == 0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start"); else if (startcode == 0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start"); else if (startcode == 0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start"); else if (startcode == 0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); else if (startcode <= 0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); else if (startcode <= 0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb)); } if (startcode >= 0x120 && startcode <= 0x12F) { if (vol) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring multiple VOL headers\n"); continue; } vol++; if ((ret = decode_vol_header(ctx, gb)) < 0) return ret; } else if (startcode == USER_DATA_STARTCODE) { decode_user_data(ctx, gb); } else if (startcode == GOP_STARTCODE) { mpeg4_decode_gop_header(s, gb); } else if (startcode == VOS_STARTCODE) { int profile, level; mpeg4_decode_profile_level(s, gb, &profile, &level); if (profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO && (level > 0 && level < 9)) { s->studio_profile = 1; next_start_code_studio(gb); extension_and_user_data(s, gb, 0); } else if (s->studio_profile) { avpriv_request_sample(s->avctx, "Mixes studio and non studio profile\n"); return AVERROR_PATCHWELCOME; } s->avctx->profile = profile; s->avctx->level = level; } else if (startcode == VISUAL_OBJ_STARTCODE) { if (s->studio_profile) { if ((ret = decode_studiovisualobject(ctx, gb)) < 0) return ret; } else mpeg4_decode_visual_object(s, gb); } else if (startcode == VOP_STARTCODE) { break; } align_get_bits(gb); startcode = 0xff; } end: if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) s->low_delay = 1; s->avctx->has_b_frames = !s->low_delay; if (s->studio_profile) { av_assert0(s->avctx->profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO); if (!s->avctx->bits_per_raw_sample) { av_log(s->avctx, AV_LOG_ERROR, "Missing VOL header\n"); return AVERROR_INVALIDDATA; } return decode_studio_vop_header(ctx, gb); } else return decode_vop_header(ctx, gb); } av_cold void ff_mpeg4videodec_static_init(void) { static int done = 0; if (!done) { ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]); ff_rl_init(&ff_rvlc_rl_inter, ff_mpeg4_static_rl_table_store[1]); ff_rl_init(&ff_rvlc_rl_intra, ff_mpeg4_static_rl_table_store[2]); INIT_VLC_RL(ff_mpeg4_rl_intra, 554); INIT_VLC_RL(ff_rvlc_rl_inter, 1072); INIT_VLC_RL(ff_rvlc_rl_intra, 1072); INIT_VLC_STATIC(&dc_lum, DC_VLC_BITS, 10 /* 13 */, &ff_mpeg4_DCtab_lum[0][1], 2, 1, &ff_mpeg4_DCtab_lum[0][0], 2, 1, 512); INIT_VLC_STATIC(&dc_chrom, DC_VLC_BITS, 10 /* 13 */, &ff_mpeg4_DCtab_chrom[0][1], 2, 1, &ff_mpeg4_DCtab_chrom[0][0], 2, 1, 512); INIT_VLC_STATIC(&sprite_trajectory, SPRITE_TRAJ_VLC_BITS, 15, &ff_sprite_trajectory_tab[0][1], 4, 2, &ff_sprite_trajectory_tab[0][0], 4, 2, 128); INIT_VLC_STATIC(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4, &ff_mb_type_b_tab[0][1], 2, 1, &ff_mb_type_b_tab[0][0], 2, 1, 16); done = 1; } } int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext *s = &ctx->m; /* divx 5.01+ bitstream reorder stuff */ /* Since this clobbers the input buffer and hwaccel codecs still need the * data during hwaccel->end_frame we should not do this any earlier */ if (s->divx_packed) { int current_pos = s->gb.buffer == s->bitstream_buffer ? 0 : (get_bits_count(&s->gb) >> 3); int startcode_found = 0; if (buf_size - current_pos > 7) { int i; for (i = current_pos; i < buf_size - 4; i++) if (buf[i] == 0 && buf[i + 1] == 0 && buf[i + 2] == 1 && buf[i + 3] == 0xB6) { startcode_found = !(buf[i + 4] & 0x40); break; } } if (startcode_found) { if (!ctx->showed_packed_warning) { av_log(s->avctx, AV_LOG_INFO, "Video uses a non-standard and " "wasteful way to store B-frames ('packed B-frames'). " "Consider using the mpeg4_unpack_bframes bitstream filter without encoding but stream copy to fix it.\n"); ctx->showed_packed_warning = 1; } av_fast_padded_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, buf_size - current_pos); if (!s->bitstream_buffer) { s->bitstream_buffer_size = 0; return AVERROR(ENOMEM); } memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); s->bitstream_buffer_size = buf_size - current_pos; } } return 0; } #if HAVE_THREADS static int mpeg4_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { Mpeg4DecContext *s = dst->priv_data; const Mpeg4DecContext *s1 = src->priv_data; int init = s->m.context_initialized; int ret = ff_mpeg_update_thread_context(dst, src); if (ret < 0) return ret; memcpy(((uint8_t*)s) + sizeof(MpegEncContext), ((uint8_t*)s1) + sizeof(MpegEncContext), sizeof(Mpeg4DecContext) - sizeof(MpegEncContext)); if (CONFIG_MPEG4_DECODER && !init && s1->xvid_build >= 0) ff_xvid_idct_init(&s->m.idsp, dst); return 0; } #endif static av_cold int init_studio_vlcs(Mpeg4DecContext *ctx) { int i, ret; for (i = 0; i < 12; i++) { ret = init_vlc(&ctx->studio_intra_tab[i], STUDIO_INTRA_BITS, 22, &ff_mpeg4_studio_intra[i][0][1], 4, 2, &ff_mpeg4_studio_intra[i][0][0], 4, 2, 0); if (ret < 0) return ret; } ret = init_vlc(&ctx->studio_luma_dc, STUDIO_INTRA_BITS, 19, &ff_mpeg4_studio_dc_luma[0][1], 4, 2, &ff_mpeg4_studio_dc_luma[0][0], 4, 2, 0); if (ret < 0) return ret; ret = init_vlc(&ctx->studio_chroma_dc, STUDIO_INTRA_BITS, 19, &ff_mpeg4_studio_dc_chroma[0][1], 4, 2, &ff_mpeg4_studio_dc_chroma[0][0], 4, 2, 0); if (ret < 0) return ret; return 0; } static av_cold int decode_init(AVCodecContext *avctx) { Mpeg4DecContext *ctx = avctx->priv_data; MpegEncContext *s = &ctx->m; int ret; ctx->divx_version = ctx->divx_build = ctx->xvid_build = ctx->lavc_build = -1; if ((ret = ff_h263_decode_init(avctx)) < 0) return ret; ff_mpeg4videodec_static_init(); if ((ret = init_studio_vlcs(ctx)) < 0) return ret; s->h263_pred = 1; s->low_delay = 0; /* default, might be overridden in the vol header during header parsing */ s->decode_mb = mpeg4_decode_mb; ctx->time_increment_bits = 4; /* default value for broken headers */ avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; avctx->internal->allocate_progress = 1; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { Mpeg4DecContext *ctx = avctx->priv_data; int i; if (!avctx->internal->is_copy) { for (i = 0; i < 12; i++) ff_free_vlc(&ctx->studio_intra_tab[i]); ff_free_vlc(&ctx->studio_luma_dc); ff_free_vlc(&ctx->studio_chroma_dc); } return ff_h263_decode_end(avctx); } static const AVOption mpeg4_options[] = { {"quarter_sample", "1/4 subpel MC", offsetof(MpegEncContext, quarter_sample), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0}, {"divx_packed", "divx style packed b frames", offsetof(MpegEncContext, divx_packed), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0}, {NULL} }; static const AVClass mpeg4_class = { .class_name = "MPEG4 Video Decoder", .item_name = av_default_item_name, .option = mpeg4_options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_mpeg4_decoder = { .name = "mpeg4", .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_MPEG4, .priv_data_size = sizeof(Mpeg4DecContext), .init = decode_init, .close = decode_end, .decode = ff_h263_decode_frame, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_FRAME_THREADS, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = ff_mpeg_flush, .max_lowres = 3, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420, .profiles = NULL_IF_CONFIG_SMALL(ff_mpeg4_video_profiles), .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context), .priv_class = &mpeg4_class, .hw_configs = (const AVCodecHWConfigInternal*[]) { #if CONFIG_MPEG4_NVDEC_HWACCEL HWACCEL_NVDEC(mpeg4), #endif #if CONFIG_MPEG4_VAAPI_HWACCEL HWACCEL_VAAPI(mpeg4), #endif #if CONFIG_MPEG4_VDPAU_HWACCEL HWACCEL_VDPAU(mpeg4), #endif #if CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL HWACCEL_VIDEOTOOLBOX(mpeg4), #endif NULL }, };
./CrossVul/dataset_final_sorted/CWE-617/c/bad_219_2
crossvul-cpp_data_bad_1770_2
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #ifdef ENABLE_EDP #include <stdio.h> #include <unistd.h> #include <errno.h> #include <arpa/inet.h> #include <fnmatch.h> #include <assert.h> static int seq = 0; int edp_send(struct lldpd *global, struct lldpd_hardware *hardware) { const u_int8_t mcastaddr[] = EDP_MULTICAST_ADDR; const u_int8_t llcorg[] = LLC_ORG_EXTREME; struct lldpd_chassis *chassis; int length, i, v; u_int8_t *packet, *pos, *pos_llc, *pos_len_eh, *pos_len_edp, *pos_edp, *tlv, *end; u_int16_t checksum; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan; unsigned int state = 0; #endif u_int8_t edp_fakeversion[] = {7, 6, 4, 99}; /* Subsequent XXX can be replaced by other values. We place them here to ensure the position of "" to be a bit invariant with version changes. */ char *deviceslot[] = { "eth", "veth", "XXX", "XXX", "XXX", "XXX", "XXX", "XXX", "", NULL }; log_debug("edp", "send EDP frame on port %s", hardware->h_ifname); chassis = hardware->h_lport.p_chassis; #ifdef ENABLE_DOT1 while (state != 2) { #endif length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; v = 0; /* Ethernet header */ if (!( POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && POKE_SAVE(pos_len_eh) && /* We compute the len later */ POKE_UINT16(0))) goto toobig; /* LLC */ if (!( POKE_SAVE(pos_llc) && /* We need to save our current position to compute ethernet len */ /* SSAP and DSAP */ POKE_UINT8(0xaa) && POKE_UINT8(0xaa) && /* Control field */ POKE_UINT8(0x03) && /* ORG */ POKE_BYTES(llcorg, sizeof(llcorg)) && POKE_UINT16(LLC_PID_EDP))) goto toobig; /* EDP header */ if ((chassis->c_id_len != ETHER_ADDR_LEN) || (chassis->c_id_subtype != LLDP_CHASSISID_SUBTYPE_LLADDR)) { log_warnx("edp", "local chassis does not use MAC address as chassis ID!?"); free(packet); return EINVAL; } if (!( POKE_SAVE(pos_edp) && /* Save the start of EDP frame */ POKE_UINT8(1) && POKE_UINT8(0) && POKE_SAVE(pos_len_edp) && /* We compute the len and the checksum later */ POKE_UINT32(0) && /* Len + Checksum */ POKE_UINT16(seq) && POKE_UINT16(0) && POKE_BYTES(chassis->c_id, ETHER_ADDR_LEN))) goto toobig; seq++; #ifdef ENABLE_DOT1 switch (state) { case 0: #endif /* Display TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_DISPLAY) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_UINT8(0) && /* Add a NULL character for better compatibility */ POKE_END_EDP_TLV)) goto toobig; /* Info TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_INFO))) goto toobig; /* We try to emulate the slot thing */ for (i=0; deviceslot[i] != NULL; i++) { if (strncmp(hardware->h_ifname, deviceslot[i], strlen(deviceslot[i])) == 0) { if (!( POKE_UINT16(i) && POKE_UINT16(atoi(hardware->h_ifname + strlen(deviceslot[i]))))) goto toobig; break; } } /* If we don't find a "slot", we say that the interface is in slot 8 */ if (deviceslot[i] == NULL) { if (!( POKE_UINT16(8) && POKE_UINT16(hardware->h_ifindex))) goto toobig; } if (!( POKE_UINT16(0) && /* vchassis */ POKE_UINT32(0) && POKE_UINT16(0) && /* Reserved */ /* Version */ POKE_BYTES(edp_fakeversion, sizeof(edp_fakeversion)) && /* Connections, we say that we won't have more interfaces than this mask. */ POKE_UINT32(0xffffffff) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_UINT32(0) && POKE_END_EDP_TLV)) goto toobig; #ifdef ENABLE_DOT1 break; case 1: TAILQ_FOREACH(vlan, &hardware->h_lport.p_vlans, v_entries) { v++; if (!( POKE_START_EDP_TLV(EDP_TLV_VLAN) && POKE_UINT8(0) && /* Flags: no IP address */ POKE_UINT8(0) && /* Reserved */ POKE_UINT16(vlan->v_vid) && POKE_UINT32(0) && /* Reserved */ POKE_UINT32(0) && /* IP address */ /* VLAN name */ POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_UINT8(0) && POKE_END_EDP_TLV)) goto toobig; } break; } if ((state == 1) && (v == 0)) { /* No VLAN, no need to send another TLV */ free(packet); break; } #endif /* Null TLV */ if (!( POKE_START_EDP_TLV(EDP_TLV_NULL) && POKE_END_EDP_TLV && POKE_SAVE(end))) goto toobig; /* Compute len and checksum */ i = end - pos_llc; /* Ethernet length */ v = end - pos_edp; /* EDP length */ POKE_RESTORE(pos_len_eh); if (!(POKE_UINT16(i))) goto toobig; POKE_RESTORE(pos_len_edp); if (!(POKE_UINT16(v))) goto toobig; checksum = frame_checksum(pos_edp, v, 0); if (!(POKE_UINT16(checksum))) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, end - packet) == -1) { log_warn("edp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } free(packet); #ifdef ENABLE_DOT1 state++; } #endif hardware->h_tx_cnt++; return 0; toobig: free(packet); return E2BIG; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_len < (x)) { \ log_warnx("edp", name " EDP TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int edp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; #ifdef ENABLE_DOT1 struct lldpd_mgmt *mgmt, *mgmt_next, *m; struct lldpd_vlan *lvlan = NULL, *lvlan_next; #endif const unsigned char edpaddr[] = EDP_MULTICAST_ADDR; int length, gotend = 0, gotvlans = 0, edp_len, tlv_len, tlv_type; int edp_port, edp_slot; u_int8_t *pos, *pos_edp, *tlv; u_int8_t version[4]; #ifdef ENABLE_DOT1 struct in_addr address; struct lldpd_port *oport; #endif log_debug("edp", "decode EDP frame on port %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("edp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("edp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t) + 8 /* LLC */ + 10 + ETHER_ADDR_LEN /* EDP header */) { log_warnx("edp", "too short EDP frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(edpaddr, sizeof(edpaddr)) != 0) { log_info("edp", "frame not targeted at EDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); PEEK_DISCARD_UINT16; PEEK_DISCARD(6); /* LLC: DSAP + SSAP + control + org */ if (PEEK_UINT16 != LLC_PID_EDP) { log_debug("edp", "incorrect LLC protocol ID received on %s", hardware->h_ifname); goto malformed; } (void)PEEK_SAVE(pos_edp); /* Save the start of EDP packet */ if (PEEK_UINT8 != 1) { log_warnx("edp", "incorrect EDP version for frame received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD_UINT8; /* Reserved */ edp_len = PEEK_UINT16; PEEK_DISCARD_UINT16; /* Checksum */ PEEK_DISCARD_UINT16; /* Sequence */ if (PEEK_UINT16 != 0) { /* ID Type = 0 = MAC */ log_warnx("edp", "incorrect device id type for frame received on %s", hardware->h_ifname); goto malformed; } if (edp_len > length + 10) { log_warnx("edp", "incorrect size for EDP frame received on %s", hardware->h_ifname); goto malformed; } chassis->c_ttl = cfg->g_config.c_tx_interval * cfg->g_config.c_tx_hold; chassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR; chassis->c_id_len = ETHER_ADDR_LEN; if ((chassis->c_id = (char *)malloc(ETHER_ADDR_LEN)) == NULL) { log_warn("edp", "unable to allocate memory for chassis ID"); goto malformed; } PEEK_BYTES(chassis->c_id, ETHER_ADDR_LEN); /* Let's check checksum */ if (frame_checksum(pos_edp, edp_len, 0) != 0) { log_warnx("edp", "incorrect EDP checksum for frame received on %s", hardware->h_ifname); goto malformed; } while (length && !gotend) { if (length < 4) { log_warnx("edp", "EDP TLV header is too large for " "frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_UINT8 != EDP_TLV_MARKER) { log_warnx("edp", "incorrect marker starting EDP TLV header for frame " "received on %s", hardware->h_ifname); goto malformed; } tlv_type = PEEK_UINT8; tlv_len = PEEK_UINT16 - 4; (void)PEEK_SAVE(tlv); if ((tlv_len < 0) || (tlv_len > length)) { log_debug("edp", "incorrect size in EDP TLV header for frame " "received on %s", hardware->h_ifname); /* Some poor old Extreme Summit are quite bogus */ gotend = 1; break; } switch (tlv_type) { case EDP_TLV_INFO: CHECK_TLV_SIZE(32, "Info"); port->p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME; edp_slot = PEEK_UINT16; edp_port = PEEK_UINT16; if (asprintf(&port->p_id, "%d/%d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port ID"); goto malformed; } port->p_id_len = strlen(port->p_id); if (asprintf(&port->p_descr, "Slot %d / Port %d", edp_slot + 1, edp_port + 1) == -1) { log_warn("edp", "unable to allocate memory for " "port description"); goto malformed; } PEEK_DISCARD_UINT16; /* vchassis */ PEEK_DISCARD(6); /* Reserved */ PEEK_BYTES(version, 4); if (asprintf(&chassis->c_descr, "EDP enabled device, version %d.%d.%d.%d", version[0], version[1], version[2], version[3]) == -1) { log_warn("edp", "unable to allocate memory for " "chassis description"); goto malformed; } break; case EDP_TLV_DISPLAY: if ((chassis->c_name = (char *)calloc(1, tlv_len + 1)) == NULL) { log_warn("edp", "unable to allocate memory for chassis " "name"); goto malformed; } /* TLV display contains a lot of garbage */ PEEK_BYTES(chassis->c_name, tlv_len); break; case EDP_TLV_NULL: if (tlv_len != 0) { log_warnx("edp", "null tlv with incorrect size in frame " "received on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("edp", "extra data after edp frame on %s", hardware->h_ifname); gotend = 1; break; case EDP_TLV_VLAN: #ifdef ENABLE_DOT1 CHECK_TLV_SIZE(12, "VLAN"); if ((lvlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("edp", "unable to allocate vlan"); goto malformed; } PEEK_DISCARD_UINT16; /* Flags + reserved */ lvlan->v_vid = PEEK_UINT16; /* VID */ PEEK_DISCARD(4); /* Reserved */ PEEK_BYTES(&address, sizeof(address)); if (address.s_addr != INADDR_ANY) { mgmt = lldpd_alloc_mgmt(LLDPD_AF_IPV4, &address, sizeof(struct in_addr), 0); if (mgmt == NULL) { log_warn("edp", "Out of memory"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); } if ((lvlan->v_name = (char *)calloc(1, tlv_len + 1 - 12)) == NULL) { log_warn("edp", "unable to allocate vlan name"); goto malformed; } PEEK_BYTES(lvlan->v_name, tlv_len - 12); TAILQ_INSERT_TAIL(&port->p_vlans, lvlan, v_entries); lvlan = NULL; #endif gotvlans = 1; break; default: log_debug("edp", "unknown EDP TLV type (%d) received on %s", tlv_type, hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } PEEK_DISCARD(tlv + tlv_len - pos); } if ((chassis->c_id == NULL) || (port->p_id == NULL) || (chassis->c_name == NULL) || (chassis->c_descr == NULL) || (port->p_descr == NULL) || (gotend == 0)) { #ifdef ENABLE_DOT1 if (gotvlans && gotend) { /* VLAN can be sent in a separate frames. We need to add * those vlans to an existing port */ TAILQ_FOREACH(oport, &hardware->h_rports, p_entries) { if (!((oport->p_protocol == LLDPD_MODE_EDP) && (oport->p_chassis->c_id_subtype == chassis->c_id_subtype) && (oport->p_chassis->c_id_len == chassis->c_id_len) && (memcmp(oport->p_chassis->c_id, chassis->c_id, chassis->c_id_len) == 0))) continue; /* We attach the VLANs to the found port */ lldpd_vlan_cleanup(oport); for (lvlan = TAILQ_FIRST(&port->p_vlans); lvlan != NULL; lvlan = lvlan_next) { lvlan_next = TAILQ_NEXT(lvlan, v_entries); TAILQ_REMOVE(&port->p_vlans, lvlan, v_entries); TAILQ_INSERT_TAIL(&oport->p_vlans, lvlan, v_entries); } /* And the IP addresses */ for (mgmt = TAILQ_FIRST(&chassis->c_mgmt); mgmt != NULL; mgmt = mgmt_next) { mgmt_next = TAILQ_NEXT(mgmt, m_entries); TAILQ_REMOVE(&chassis->c_mgmt, mgmt, m_entries); /* Don't add an address that already exists! */ TAILQ_FOREACH(m, &chassis->c_mgmt, m_entries) if (m->m_family == mgmt->m_family && !memcmp(&m->m_addr, &mgmt->m_addr, sizeof(m->m_addr))) break; if (m == NULL) TAILQ_INSERT_TAIL(&oport->p_chassis->c_mgmt, mgmt, m_entries); } } /* We discard the remaining frame */ goto malformed; } #else if (gotvlans) goto malformed; #endif log_warnx("edp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_DOT1 free(lvlan); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; } #endif /* ENABLE_EDP */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1770_2
crossvul-cpp_data_bad_3362_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/module.h" #include "magick/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *,DDSVector4 *,unsigned char *,size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse, matte; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) { ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { matte = MagickTrue; decoder = ReadUncompressedRGBA; } else { matte = MagickTrue; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { matte = MagickFalse; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { matte = MagickFalse; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { matte = MagickTrue; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { matte = MagickTrue; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; for (n = 0; n < num_images; n++) { if (n != 0) { if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); /* Start a new image */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->matte = matte; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; PixelPacket *q; register ssize_t i, x; size_t bits; ssize_t j, y; unsigned char code; unsigned short c0, c1; for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if (((x + i) < (ssize_t) image->columns) && ((y + j) < (ssize_t) image->rows)) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code])); if ((colors.a[code] != 0) && (image->matte == MagickFalse)) image->matte=MagickTrue; /* Correct matte */ q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; PixelPacket *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; MagickSizeType alpha_bits; PixelPacket *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image))); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(q,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } SetPixelAlpha(q,QuantumRange); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleMatteType); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else if (alphaBits == 2) { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (color >> 8))); SetPixelGray(q,ScaleCharToQuantum((unsigned char)color)); } else { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255))); } } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = SetMagickInfo("DDS"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT1"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT5"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { register ssize_t i; MagickOffsetType offset; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if (clusterFit == MagickFalse || count == 0) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (!image->matte) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, &image->exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,&image->exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MaxTextExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->matte) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->matte != MagickFalse) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) ResetMagickMemory(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->matte != MagickFalse) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t i, y, bx, by; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const PixelPacket *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p++; match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char* indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4* points, const ssize_t* map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p))); if (image->matte) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p))); p++; } } }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_3362_0
crossvul-cpp_data_bad_219_1
/* * H.263 decoder * Copyright (c) 2001 Fabrice Bellard * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * H.263 decoder. */ #define UNCHECKED_BITSTREAM_READER 1 #include "libavutil/cpu.h" #include "avcodec.h" #include "error_resilience.h" #include "flv.h" #include "h263.h" #include "h263_parser.h" #include "hwaccel.h" #include "internal.h" #include "mpeg_er.h" #include "mpeg4video.h" #include "mpeg4video_parser.h" #include "mpegutils.h" #include "mpegvideo.h" #include "msmpeg4.h" #include "qpeldsp.h" #include "thread.h" #include "wmv2.h" static enum AVPixelFormat h263_get_format(AVCodecContext *avctx) { /* MPEG-4 Studio Profile only, not supported by hardware */ if (avctx->bits_per_raw_sample > 8) { av_assert1(avctx->profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO); return avctx->pix_fmt; } if (avctx->codec->id == AV_CODEC_ID_MSS2) return AV_PIX_FMT_YUV420P; if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY)) { if (avctx->color_range == AVCOL_RANGE_UNSPECIFIED) avctx->color_range = AVCOL_RANGE_MPEG; return AV_PIX_FMT_GRAY8; } return avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts); } av_cold int ff_h263_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int ret; s->out_format = FMT_H263; // set defaults ff_mpv_decode_defaults(s); ff_mpv_decode_init(s, avctx); s->quant_precision = 5; s->decode_mb = ff_h263_decode_mb; s->low_delay = 1; s->unrestricted_mv = 1; /* select sub codec */ switch (avctx->codec->id) { case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: s->unrestricted_mv = 0; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; break; case AV_CODEC_ID_MPEG4: break; case AV_CODEC_ID_MSMPEG4V1: s->h263_pred = 1; s->msmpeg4_version = 1; break; case AV_CODEC_ID_MSMPEG4V2: s->h263_pred = 1; s->msmpeg4_version = 2; break; case AV_CODEC_ID_MSMPEG4V3: s->h263_pred = 1; s->msmpeg4_version = 3; break; case AV_CODEC_ID_WMV1: s->h263_pred = 1; s->msmpeg4_version = 4; break; case AV_CODEC_ID_WMV2: s->h263_pred = 1; s->msmpeg4_version = 5; break; case AV_CODEC_ID_VC1: case AV_CODEC_ID_WMV3: case AV_CODEC_ID_VC1IMAGE: case AV_CODEC_ID_WMV3IMAGE: case AV_CODEC_ID_MSS2: s->h263_pred = 1; s->msmpeg4_version = 6; avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break; case AV_CODEC_ID_H263I: break; case AV_CODEC_ID_FLV1: s->h263_flv = 1; break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported codec %d\n", avctx->codec->id); return AVERROR(ENOSYS); } s->codec_id = avctx->codec->id; if (avctx->codec_tag == AV_RL32("L263") || avctx->codec_tag == AV_RL32("S263")) if (avctx->extradata_size == 56 && avctx->extradata[0] == 1) s->ehc_mode = 1; /* for H.263, we allocate the images after having read the header */ if (avctx->codec->id != AV_CODEC_ID_H263 && avctx->codec->id != AV_CODEC_ID_H263P && avctx->codec->id != AV_CODEC_ID_MPEG4) { avctx->pix_fmt = h263_get_format(avctx); ff_mpv_idct_init(s); if ((ret = ff_mpv_common_init(s)) < 0) return ret; } ff_h263dsp_init(&s->h263dsp); ff_qpeldsp_init(&s->qdsp); ff_h263_decode_init_vlc(); return 0; } av_cold int ff_h263_decode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; ff_mpv_common_end(s); return 0; } /** * Return the number of bytes consumed for building the current frame. */ static int get_consumed_bytes(MpegEncContext *s, int buf_size) { int pos = (get_bits_count(&s->gb) + 7) >> 3; if (s->divx_packed || s->avctx->hwaccel) { /* We would have to scan through the whole buf to handle the weird * reordering ... */ return buf_size; } else if (s->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { pos -= s->parse_context.last_index; // padding is not really read so this might be -1 if (pos < 0) pos = 0; return pos; } else { // avoid infinite loops (maybe not needed...) if (pos == 0) pos = 1; // oops ;) if (pos + 10 > buf_size) pos = buf_size; return pos; } } static int decode_slice(MpegEncContext *s) { const int part_mask = s->partitioned_frame ? (ER_AC_END | ER_AC_ERROR) : 0x7F; const int mb_size = 16 >> s->avctx->lowres; int ret; s->last_resync_gb = s->gb; s->first_slice_line = 1; s->resync_mb_x = s->mb_x; s->resync_mb_y = s->mb_y; ff_set_qscale(s, s->qscale); if (s->studio_profile) { if ((ret = ff_mpeg4_decode_studio_slice_header(s->avctx->priv_data)) < 0) return ret; } if (s->avctx->hwaccel) { const uint8_t *start = s->gb.buffer + get_bits_count(&s->gb) / 8; ret = s->avctx->hwaccel->decode_slice(s->avctx, start, s->gb.buffer_end - start); // ensure we exit decode loop s->mb_y = s->mb_height; return ret; } if (s->partitioned_frame) { const int qscale = s->qscale; if (CONFIG_MPEG4_DECODER && s->codec_id == AV_CODEC_ID_MPEG4) if ((ret = ff_mpeg4_decode_partitions(s->avctx->priv_data)) < 0) return ret; /* restore variables which were modified */ s->first_slice_line = 1; s->mb_x = s->resync_mb_x; s->mb_y = s->resync_mb_y; ff_set_qscale(s, qscale); } for (; s->mb_y < s->mb_height; s->mb_y++) { /* per-row end of slice checks */ if (s->msmpeg4_version) { if (s->resync_mb_y + s->slice_height == s->mb_y) { ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_MB_END); return 0; } } if (s->msmpeg4_version == 1) { s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128; } ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { int ret; ff_update_block_index(s); if (s->resync_mb_x == s->mb_x && s->resync_mb_y + 1 == s->mb_y) s->first_slice_line = 0; /* DCT & quantize */ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; ff_dlog(s, "%d %06X\n", get_bits_count(&s->gb), show_bits(&s->gb, 24)); ff_tlog(NULL, "Decoding MB at %dx%d\n", s->mb_x, s->mb_y); ret = s->decode_mb(s, s->block); if (s->pict_type != AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); if (ret < 0) { const int xy = s->mb_x + s->mb_y * s->mb_stride; if (ret == SLICE_END) { ff_mpv_reconstruct_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END & part_mask); s->padding_bug_score--; if (++s->mb_x >= s->mb_width) { s->mb_x = 0; ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); ff_mpv_report_decode_progress(s); s->mb_y++; } return 0; } else if (ret == SLICE_NOEND) { av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x + 1, s->mb_y, ER_MB_END & part_mask); return AVERROR_INVALIDDATA; } av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR & part_mask); if (s->avctx->err_recognition & AV_EF_IGNORE_ERR) continue; return AVERROR_INVALIDDATA; } ff_mpv_reconstruct_mb(s, s->block); if (s->loop_filter) ff_h263_loop_filter(s); } ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); ff_mpv_report_decode_progress(s); s->mb_x = 0; } av_assert1(s->mb_x == 0 && s->mb_y == s->mb_height); // Detect incorrect padding with wrong stuffing codes used by NEC N-02B if (s->codec_id == AV_CODEC_ID_MPEG4 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 48 && show_bits(&s->gb, 24) == 0x4010 && !s->data_partitioning) s->padding_bug_score += 32; /* try to detect the padding bug */ if (s->codec_id == AV_CODEC_ID_MPEG4 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 0 && get_bits_left(&s->gb) < 137 && !s->data_partitioning) { const int bits_count = get_bits_count(&s->gb); const int bits_left = s->gb.size_in_bits - bits_count; if (bits_left == 0) { s->padding_bug_score += 16; } else if (bits_left != 1) { int v = show_bits(&s->gb, 8); v |= 0x7F >> (7 - (bits_count & 7)); if (v == 0x7F && bits_left <= 8) s->padding_bug_score--; else if (v == 0x7F && ((get_bits_count(&s->gb) + 8) & 8) && bits_left <= 16) s->padding_bug_score += 4; else s->padding_bug_score++; } } if (s->codec_id == AV_CODEC_ID_H263 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 8 && get_bits_left(&s->gb) < 300 && s->pict_type == AV_PICTURE_TYPE_I && show_bits(&s->gb, 8) == 0 && !s->data_partitioning) { s->padding_bug_score += 32; } if (s->codec_id == AV_CODEC_ID_H263 && (s->workaround_bugs & FF_BUG_AUTODETECT) && get_bits_left(&s->gb) >= 64 && AV_RB64(s->gb.buffer_end - 8) == 0xCDCDCDCDFC7F0000) { s->padding_bug_score += 32; } if (s->workaround_bugs & FF_BUG_AUTODETECT) { if ( (s->padding_bug_score > -2 && !s->data_partitioning)) s->workaround_bugs |= FF_BUG_NO_PADDING; else s->workaround_bugs &= ~FF_BUG_NO_PADDING; } // handle formats which don't have unique end markers if (s->msmpeg4_version || (s->workaround_bugs & FF_BUG_NO_PADDING)) { // FIXME perhaps solve this more cleanly int left = get_bits_left(&s->gb); int max_extra = 7; /* no markers in M$ crap */ if (s->msmpeg4_version && s->pict_type == AV_PICTURE_TYPE_I) max_extra += 17; /* buggy padding but the frame should still end approximately at * the bitstream end */ if ((s->workaround_bugs & FF_BUG_NO_PADDING) && (s->avctx->err_recognition & (AV_EF_BUFFER|AV_EF_AGGRESSIVE))) max_extra += 48; else if ((s->workaround_bugs & FF_BUG_NO_PADDING)) max_extra += 256 * 256 * 256 * 64; if (left > max_extra) av_log(s->avctx, AV_LOG_ERROR, "discarding %d junk bits at end, next would be %X\n", left, show_bits(&s->gb, 24)); else if (left < 0) av_log(s->avctx, AV_LOG_ERROR, "overreading %d bits\n", -left); else ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x - 1, s->mb_y, ER_MB_END); return 0; } av_log(s->avctx, AV_LOG_ERROR, "slice end not reached but screenspace end (%d left %06X, score= %d)\n", get_bits_left(&s->gb), show_bits(&s->gb, 24), s->padding_bug_score); ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END & part_mask); return AVERROR_INVALIDDATA; } int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *s = avctx->priv_data; int ret; int slice_ret = 0; AVFrame *pict = data; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay == 0 && s->next_picture_ptr) { if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0) return ret; s->next_picture_ptr = NULL; *got_frame = 1; } return 0; } if (s->avctx->flags & AV_CODEC_FLAG_TRUNCATED) { int next; if (CONFIG_MPEG4_DECODER && s->codec_id == AV_CODEC_ID_MPEG4) { next = ff_mpeg4_find_frame_end(&s->parse_context, buf, buf_size); } else if (CONFIG_H263_DECODER && s->codec_id == AV_CODEC_ID_H263) { next = ff_h263_find_frame_end(&s->parse_context, buf, buf_size); } else if (CONFIG_H263P_DECODER && s->codec_id == AV_CODEC_ID_H263P) { next = ff_h263_find_frame_end(&s->parse_context, buf, buf_size); } else { av_log(s->avctx, AV_LOG_ERROR, "this codec does not support truncated bitstreams\n"); return AVERROR(ENOSYS); } if (ff_combine_frame(&s->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0) return buf_size; } retry: if (s->divx_packed && s->bitstream_buffer_size) { int i; for(i=0; i < buf_size-3; i++) { if (buf[i]==0 && buf[i+1]==0 && buf[i+2]==1) { if (buf[i+3]==0xB0) { av_log(s->avctx, AV_LOG_WARNING, "Discarding excessive bitstream in packed xvid\n"); s->bitstream_buffer_size = 0; } break; } } } if (s->bitstream_buffer_size && (s->divx_packed || buf_size <= MAX_NVOP_SIZE)) // divx 5.01+/xvid frame reorder ret = init_get_bits8(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size); else ret = init_get_bits8(&s->gb, buf, buf_size); s->bitstream_buffer_size = 0; if (ret < 0) return ret; if (!s->context_initialized) // we need the idct permutation for reading a custom matrix ff_mpv_idct_init(s); /* let's go :-) */ if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) { ret = ff_wmv2_decode_picture_header(s); } else if (CONFIG_MSMPEG4_DECODER && s->msmpeg4_version) { ret = ff_msmpeg4_decode_picture_header(s); } else if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) { if (s->avctx->extradata_size && s->picture_number == 0) { GetBitContext gb; if (init_get_bits8(&gb, s->avctx->extradata, s->avctx->extradata_size) >= 0 ) ff_mpeg4_decode_picture_header(avctx->priv_data, &gb); } ret = ff_mpeg4_decode_picture_header(avctx->priv_data, &s->gb); } else if (CONFIG_H263I_DECODER && s->codec_id == AV_CODEC_ID_H263I) { ret = ff_intel_h263_decode_picture_header(s); } else if (CONFIG_FLV_DECODER && s->h263_flv) { ret = ff_flv_decode_picture_header(s); } else { ret = ff_h263_decode_picture_header(s); } if (ret < 0 || ret == FRAME_SKIPPED) { if ( s->width != avctx->coded_width || s->height != avctx->coded_height) { av_log(s->avctx, AV_LOG_WARNING, "Reverting picture dimensions change due to header decoding failure\n"); s->width = avctx->coded_width; s->height= avctx->coded_height; } } if (ret == FRAME_SKIPPED) return get_consumed_bytes(s, buf_size); /* skip if the header was thrashed */ if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return ret; } if (!s->context_initialized) { avctx->pix_fmt = h263_get_format(avctx); if ((ret = ff_mpv_common_init(s)) < 0) return ret; } if (!s->current_picture_ptr || s->current_picture_ptr->f->data[0]) { int i = ff_find_unused_picture(s->avctx, s->picture, 0); if (i < 0) return i; s->current_picture_ptr = &s->picture[i]; } avctx->has_b_frames = !s->low_delay; if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) { if (ff_mpeg4_workaround_bugs(avctx) == 1) goto retry; if (s->studio_profile != (s->idsp.idct == NULL)) ff_mpv_idct_init(s); } /* After H.263 & MPEG-4 header decode we have the height, width, * and other parameters. So then we could init the picture. * FIXME: By the way H.263 decoder is evolving it should have * an H263EncContext */ if (s->width != avctx->coded_width || s->height != avctx->coded_height || s->context_reinit) { /* H.263 could change picture size any time */ s->context_reinit = 0; ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; ff_set_sar(avctx, avctx->sample_aspect_ratio); if ((ret = ff_mpv_common_frame_size_change(s))) return ret; if (avctx->pix_fmt != h263_get_format(avctx)) { av_log(avctx, AV_LOG_ERROR, "format change not supported\n"); avctx->pix_fmt = AV_PIX_FMT_NONE; return AVERROR_UNKNOWN; } } if (s->codec_id == AV_CODEC_ID_H263 || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_H263I) s->gob_index = H263_GOB_HEIGHT(s->height); // for skipping the frame s->current_picture.f->pict_type = s->pict_type; s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if (!s->last_picture_ptr && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) return get_consumed_bytes(s, buf_size); if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); if (s->next_p_frame_damaged) { if (s->pict_type == AV_PICTURE_TYPE_B) return get_consumed_bytes(s, buf_size); else s->next_p_frame_damaged = 0; } if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) { s->me.qpel_put = s->qdsp.put_qpel_pixels_tab; s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; } else { s->me.qpel_put = s->qdsp.put_no_rnd_qpel_pixels_tab; s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; } if ((ret = ff_mpv_frame_start(s, avctx)) < 0) return ret; if (!s->divx_packed) ff_thread_finish_setup(avctx); if (avctx->hwaccel) { ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); if (ret < 0 ) return ret; } ff_mpeg_er_frame_start(s); /* the second part of the wmv2 header contains the MB skip bits which * are stored in current_picture->mb_type which is not available before * ff_mpv_frame_start() */ if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) { ret = ff_wmv2_decode_secondary_picture_header(s); if (ret < 0) return ret; if (ret == 1) goto frame_end; } /* decode each macroblock */ s->mb_x = 0; s->mb_y = 0; slice_ret = decode_slice(s); while (s->mb_y < s->mb_height) { if (s->msmpeg4_version) { if (s->slice_height == 0 || s->mb_x != 0 || slice_ret < 0 || (s->mb_y % s->slice_height) != 0 || get_bits_left(&s->gb) < 0) break; } else { int prev_x = s->mb_x, prev_y = s->mb_y; if (ff_h263_resync(s) < 0) break; if (prev_y * s->mb_width + prev_x < s->mb_y * s->mb_width + s->mb_x) s->er.error_occurred = 1; } if (s->msmpeg4_version < 4 && s->h263_pred) ff_mpeg4_clean_buffers(s); if (decode_slice(s) < 0) slice_ret = AVERROR_INVALIDDATA; } if (s->msmpeg4_version && s->msmpeg4_version < 4 && s->pict_type == AV_PICTURE_TYPE_I) if (!CONFIG_MSMPEG4_DECODER || ff_msmpeg4_decode_ext_header(s, buf_size) < 0) s->er.error_status_table[s->mb_num - 1] = ER_MB_ERROR; av_assert1(s->bitstream_buffer_size == 0); frame_end: ff_er_frame_end(&s->er); if (avctx->hwaccel) { ret = avctx->hwaccel->end_frame(avctx); if (ret < 0) return ret; } ff_mpv_frame_end(s); if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) ff_mpeg4_frame_end(avctx, buf, buf_size); if (!s->divx_packed && avctx->hwaccel) ff_thread_finish_setup(avctx); av_assert1(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type); av_assert1(s->current_picture.f->pict_type == s->pict_type); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->current_picture_ptr, pict); ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1); } else if (s->last_picture_ptr) { if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->last_picture_ptr, pict); ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1); } if (s->last_picture_ptr || s->low_delay) { if ( pict->format == AV_PIX_FMT_YUV420P && (s->codec_tag == AV_RL32("GEOV") || s->codec_tag == AV_RL32("GEOX"))) { int x, y, p; av_frame_make_writable(pict); for (p=0; p<3; p++) { int w = AV_CEIL_RSHIFT(pict-> width, !!p); int h = AV_CEIL_RSHIFT(pict->height, !!p); int linesize = pict->linesize[p]; for (y=0; y<(h>>1); y++) for (x=0; x<w; x++) FFSWAP(int, pict->data[p][x + y*linesize], pict->data[p][x + (h-1-y)*linesize]); } } *got_frame = 1; } if (slice_ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) return slice_ret; else return get_consumed_bytes(s, buf_size); } const enum AVPixelFormat ff_h263_hwaccel_pixfmt_list_420[] = { #if CONFIG_H263_VAAPI_HWACCEL || CONFIG_MPEG4_VAAPI_HWACCEL AV_PIX_FMT_VAAPI, #endif #if CONFIG_MPEG4_NVDEC_HWACCEL AV_PIX_FMT_CUDA, #endif #if CONFIG_MPEG4_VDPAU_HWACCEL AV_PIX_FMT_VDPAU, #endif #if CONFIG_H263_VIDEOTOOLBOX_HWACCEL || CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL AV_PIX_FMT_VIDEOTOOLBOX, #endif AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; AVCodec ff_h263_decoder = { .name = "h263", .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H263, .priv_data_size = sizeof(MpegEncContext), .init = ff_h263_decode_init, .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = ff_mpeg_flush, .max_lowres = 3, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420, }; AVCodec ff_h263p_decoder = { .name = "h263p", .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H263P, .priv_data_size = sizeof(MpegEncContext), .init = ff_h263_decode_init, .close = ff_h263_decode_end, .decode = ff_h263_decode_frame, .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY, .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .flush = ff_mpeg_flush, .max_lowres = 3, .pix_fmts = ff_h263_hwaccel_pixfmt_list_420, .hw_configs = (const AVCodecHWConfigInternal*[]) { #if CONFIG_H263_VAAPI_HWACCEL HWACCEL_VAAPI(h263), #endif #if CONFIG_MPEG4_VDPAU_HWACCEL HWACCEL_VDPAU(mpeg4), #endif #if CONFIG_H263_VIDEOTOOLBOX_HWACCEL HWACCEL_VIDEOTOOLBOX(h263), #endif NULL }, };
./CrossVul/dataset_final_sorted/CWE-617/c/bad_219_1
crossvul-cpp_data_bad_3363_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP N N GGGG % % P P NN N G % % PPPP N N N G GG % % P N NN G G % % P N N GGG % % % % % % Read/Write Portable Network Graphics Image Format % % % % Software Design % % Cristy % % Glenn Randers-Pehrson % % November 1997 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/static.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/transform.h" #include "magick/utility.h" #if defined(MAGICKCORE_PNG_DELEGATE) /* Suppress libpng pedantic warnings that were added in * libpng-1.2.41 and libpng-1.4.0. If you are working on * migration to libpng-1.5, remove these defines and then * fix any code that generates warnings. */ /* #define PNG_DEPRECATED Use of this function is deprecated */ /* #define PNG_USE_RESULT The result of this function must be checked */ /* #define PNG_NORETURN This function does not return */ /* #define PNG_ALLOCATED The result of the function is new memory */ /* #define PNG_DEPSTRUCT Access to this struct member is deprecated */ /* PNG_PTR_NORETURN does not work on some platforms, in libpng-1.5.x */ #define PNG_PTR_NORETURN #include "png.h" #include "zlib.h" /* ImageMagick differences */ #define first_scene scene #if PNG_LIBPNG_VER > 10011 /* Optional declarations. Define or undefine them as you like. */ /* #define PNG_DEBUG -- turning this on breaks VisualC compiling */ /* Features under construction. Define these to work on them. */ #undef MNG_OBJECT_BUFFERS #undef MNG_BASI_SUPPORTED #define MNG_COALESCE_LAYERS /* In 5.4.4, this interfered with MMAP'ed files. */ #define MNG_INSERT_LAYERS /* Troublesome, but seem to work as of 5.4.4 */ #if defined(MAGICKCORE_JPEG_DELEGATE) # define JNG_SUPPORTED /* Not finished as of 5.5.2. See "To do" comments. */ #endif #if !defined(RGBColorMatchExact) #define IsPNGColorEqual(color,target) \ (((color).red == (target).red) && \ ((color).green == (target).green) && \ ((color).blue == (target).blue)) #endif /* Table of recognized sRGB ICC profiles */ struct sRGB_info_struct { png_uint_32 len; png_uint_32 crc; png_byte intent; }; const struct sRGB_info_struct sRGB_info[] = { /* ICC v2 perceptual sRGB_IEC61966-2-1_black_scaled.icc */ { 3048, 0x3b8772b9UL, 0}, /* ICC v2 relative sRGB_IEC61966-2-1_no_black_scaling.icc */ { 3052, 0x427ebb21UL, 1}, /* ICC v4 perceptual sRGB_v4_ICC_preference_displayclass.icc */ {60988, 0x306fd8aeUL, 0}, /* ICC v4 perceptual sRGB_v4_ICC_preference.icc perceptual */ {60960, 0xbbef7812UL, 0}, /* HP? sRGB v2 media-relative sRGB_IEC61966-2-1_noBPC.icc */ { 3024, 0x5d5129ceUL, 1}, /* HP-Microsoft sRGB v2 perceptual */ { 3144, 0x182ea552UL, 0}, /* HP-Microsoft sRGB v2 media-relative */ { 3144, 0xf29e526dUL, 1}, /* Facebook's "2012/01/25 03:41:57", 524, "TINYsRGB.icc" */ { 524, 0xd4938c39UL, 0}, /* "2012/11/28 22:35:21", 3212, "Argyll_sRGB.icm") */ { 3212, 0x034af5a1UL, 0}, /* Not recognized */ { 0, 0x00000000UL, 0}, }; /* Macros for left-bit-replication to ensure that pixels * and PixelPackets all have the same image->depth, and for use * in PNG8 quantization. */ /* LBR01: Replicate top bit */ #define LBR01PacketRed(pixelpacket) \ (pixelpacket).red=(ScaleQuantumToChar((pixelpacket).red) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketGreen(pixelpacket) \ (pixelpacket).green=(ScaleQuantumToChar((pixelpacket).green) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketBlue(pixelpacket) \ (pixelpacket).blue=(ScaleQuantumToChar((pixelpacket).blue) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketOpacity(pixelpacket) \ (pixelpacket).opacity=(ScaleQuantumToChar((pixelpacket).opacity) < 0x10 ? \ 0 : QuantumRange); #define LBR01PacketRGB(pixelpacket) \ { \ LBR01PacketRed((pixelpacket)); \ LBR01PacketGreen((pixelpacket)); \ LBR01PacketBlue((pixelpacket)); \ } #define LBR01PacketRGBO(pixelpacket) \ { \ LBR01PacketRGB((pixelpacket)); \ LBR01PacketOpacity((pixelpacket)); \ } #define LBR01PixelRed(pixel) \ (SetPixelRed((pixel), \ ScaleQuantumToChar(GetPixelRed((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelGreen(pixel) \ (SetPixelGreen((pixel), \ ScaleQuantumToChar(GetPixelGreen((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelBlue(pixel) \ (SetPixelBlue((pixel), \ ScaleQuantumToChar(GetPixelBlue((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelOpacity(pixel) \ (SetPixelOpacity((pixel), \ ScaleQuantumToChar(GetPixelOpacity((pixel))) < 0x10 ? \ 0 : QuantumRange)); #define LBR01PixelRGB(pixel) \ { \ LBR01PixelRed((pixel)); \ LBR01PixelGreen((pixel)); \ LBR01PixelBlue((pixel)); \ } #define LBR01PixelRGBO(pixel) \ { \ LBR01PixelRGB((pixel)); \ LBR01PixelOpacity((pixel)); \ } /* LBR02: Replicate top 2 bits */ #define LBR02PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xc0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xc0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xc0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xc0; \ (pixelpacket).opacity=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6))); \ } #define LBR02PacketRGB(pixelpacket) \ { \ LBR02PacketRed((pixelpacket)); \ LBR02PacketGreen((pixelpacket)); \ LBR02PacketBlue((pixelpacket)); \ } #define LBR02PacketRGBO(pixelpacket) \ { \ LBR02PacketRGB((pixelpacket)); \ LBR02PacketOpacity((pixelpacket)); \ } #define LBR02PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xc0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xc0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xc0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02Opacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xc0; \ SetPixelOpacity((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 2) | (lbr_bits >> 4) | (lbr_bits >> 6)))); \ } #define LBR02PixelRGB(pixel) \ { \ LBR02PixelRed((pixel)); \ LBR02PixelGreen((pixel)); \ LBR02PixelBlue((pixel)); \ } #define LBR02PixelRGBO(pixel) \ { \ LBR02PixelRGB((pixel)); \ LBR02Opacity((pixel)); \ } /* LBR03: Replicate top 3 bits (only used with opaque pixels during PNG8 quantization) */ #define LBR03PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xe0; \ (pixelpacket).red=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xe0; \ (pixelpacket).green=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xe0; \ (pixelpacket).blue=ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6))); \ } #define LBR03PacketRGB(pixelpacket) \ { \ LBR03PacketRed((pixelpacket)); \ LBR03PacketGreen((pixelpacket)); \ LBR03PacketBlue((pixelpacket)); \ } #define LBR03PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xe0; \ SetPixelRed((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xe0; \ SetPixelGreen((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelBlue(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelBlue((pixel))) \ & 0xe0; \ SetPixelBlue((pixel), ScaleCharToQuantum( \ (lbr_bits | (lbr_bits >> 3) | (lbr_bits >> 6)))); \ } #define LBR03PixelRGB(pixel) \ { \ LBR03PixelRed((pixel)); \ LBR03PixelGreen((pixel)); \ LBR03PixelBlue((pixel)); \ } /* LBR04: Replicate top 4 bits */ #define LBR04PacketRed(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).red) & 0xf0; \ (pixelpacket).red=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketGreen(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).green) & 0xf0; \ (pixelpacket).green=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketBlue(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).blue) & 0xf0; \ (pixelpacket).blue=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketOpacity(pixelpacket) \ { \ unsigned char lbr_bits=ScaleQuantumToChar((pixelpacket).opacity) & 0xf0; \ (pixelpacket).opacity=ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4))); \ } #define LBR04PacketRGB(pixelpacket) \ { \ LBR04PacketRed((pixelpacket)); \ LBR04PacketGreen((pixelpacket)); \ LBR04PacketBlue((pixelpacket)); \ } #define LBR04PacketRGBO(pixelpacket) \ { \ LBR04PacketRGB((pixelpacket)); \ LBR04PacketOpacity((pixelpacket)); \ } #define LBR04PixelRed(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelRed((pixel))) \ & 0xf0; \ SetPixelRed((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelGreen(pixel) \ { \ unsigned char lbr_bits=ScaleQuantumToChar(GetPixelGreen((pixel)))\ & 0xf0; \ SetPixelGreen((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelBlue(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelBlue((pixel))) & 0xf0; \ SetPixelBlue((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelOpacity(pixel) \ { \ unsigned char lbr_bits= \ ScaleQuantumToChar(GetPixelOpacity((pixel))) & 0xf0; \ SetPixelOpacity((pixel),\ ScaleCharToQuantum((lbr_bits | (lbr_bits >> 4)))); \ } #define LBR04PixelRGB(pixel) \ { \ LBR04PixelRed((pixel)); \ LBR04PixelGreen((pixel)); \ LBR04PixelBlue((pixel)); \ } #define LBR04PixelRGBO(pixel) \ { \ LBR04PixelRGB((pixel)); \ LBR04PixelOpacity((pixel)); \ } /* Establish thread safety. setjmp/longjmp is claimed to be safe on these platforms: setjmp/longjmp is alleged to be unsafe on these platforms: */ #ifdef PNG_SETJMP_SUPPORTED # ifndef IMPNG_SETJMP_IS_THREAD_SAFE # define IMPNG_SETJMP_NOT_THREAD_SAFE # endif # ifdef IMPNG_SETJMP_NOT_THREAD_SAFE static SemaphoreInfo *ping_semaphore = (SemaphoreInfo *) NULL; # endif #endif /* This temporary until I set up malloc'ed object attributes array. Recompile with MNG_MAX_OBJECTS=65536L to avoid this limit but waste more memory. */ #define MNG_MAX_OBJECTS 256 /* If this not defined, spec is interpreted strictly. If it is defined, an attempt will be made to recover from some errors, including o global PLTE too short */ #undef MNG_LOOSE /* Don't try to define PNG_MNG_FEATURES_SUPPORTED here. Make sure it's defined in libpng/pngconf.h, version 1.0.9 or later. It won't work with earlier versions of libpng. From libpng-1.0.3a to libpng-1.0.8, PNG_READ|WRITE_EMPTY_PLTE were used but those have been deprecated in libpng in favor of PNG_MNG_FEATURES_SUPPORTED, so we set them here. PNG_MNG_FEATURES_SUPPORTED is disabled by default in libpng-1.0.9 and will be enabled by default in libpng-1.2.0. */ #ifdef PNG_MNG_FEATURES_SUPPORTED # ifndef PNG_READ_EMPTY_PLTE_SUPPORTED # define PNG_READ_EMPTY_PLTE_SUPPORTED # endif # ifndef PNG_WRITE_EMPTY_PLTE_SUPPORTED # define PNG_WRITE_EMPTY_PLTE_SUPPORTED # endif #endif /* Maximum valid size_t in PNG/MNG chunks is (2^31)-1 This macro is only defined in libpng-1.0.3 and later. Previously it was PNG_MAX_UINT but that was deprecated in libpng-1.2.6 */ #ifndef PNG_UINT_31_MAX #define PNG_UINT_31_MAX (png_uint_32) 0x7fffffffL #endif /* Constant strings for known chunk types. If you need to add a chunk, add a string holding the name here. To make the code more portable, we use ASCII numbers like this, not characters. */ /* until registration of eXIf */ static const png_byte mng_exIf[5]={101, 120, 73, 102, (png_byte) '\0'}; /* after registration of eXIf */ static const png_byte mng_eXIf[5]={101, 88, 73, 102, (png_byte) '\0'}; static const png_byte mng_MHDR[5]={ 77, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_BACK[5]={ 66, 65, 67, 75, (png_byte) '\0'}; static const png_byte mng_BASI[5]={ 66, 65, 83, 73, (png_byte) '\0'}; static const png_byte mng_CLIP[5]={ 67, 76, 73, 80, (png_byte) '\0'}; static const png_byte mng_CLON[5]={ 67, 76, 79, 78, (png_byte) '\0'}; static const png_byte mng_DEFI[5]={ 68, 69, 70, 73, (png_byte) '\0'}; static const png_byte mng_DHDR[5]={ 68, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_DISC[5]={ 68, 73, 83, 67, (png_byte) '\0'}; static const png_byte mng_ENDL[5]={ 69, 78, 68, 76, (png_byte) '\0'}; static const png_byte mng_FRAM[5]={ 70, 82, 65, 77, (png_byte) '\0'}; static const png_byte mng_IEND[5]={ 73, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_IHDR[5]={ 73, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_JHDR[5]={ 74, 72, 68, 82, (png_byte) '\0'}; static const png_byte mng_LOOP[5]={ 76, 79, 79, 80, (png_byte) '\0'}; static const png_byte mng_MAGN[5]={ 77, 65, 71, 78, (png_byte) '\0'}; static const png_byte mng_MEND[5]={ 77, 69, 78, 68, (png_byte) '\0'}; static const png_byte mng_MOVE[5]={ 77, 79, 86, 69, (png_byte) '\0'}; static const png_byte mng_PAST[5]={ 80, 65, 83, 84, (png_byte) '\0'}; static const png_byte mng_PLTE[5]={ 80, 76, 84, 69, (png_byte) '\0'}; static const png_byte mng_SAVE[5]={ 83, 65, 86, 69, (png_byte) '\0'}; static const png_byte mng_SEEK[5]={ 83, 69, 69, 75, (png_byte) '\0'}; static const png_byte mng_SHOW[5]={ 83, 72, 79, 87, (png_byte) '\0'}; static const png_byte mng_TERM[5]={ 84, 69, 82, 77, (png_byte) '\0'}; static const png_byte mng_bKGD[5]={ 98, 75, 71, 68, (png_byte) '\0'}; static const png_byte mng_caNv[5]={ 99, 97, 78, 118, (png_byte) '\0'}; static const png_byte mng_cHRM[5]={ 99, 72, 82, 77, (png_byte) '\0'}; static const png_byte mng_gAMA[5]={103, 65, 77, 65, (png_byte) '\0'}; static const png_byte mng_iCCP[5]={105, 67, 67, 80, (png_byte) '\0'}; static const png_byte mng_nEED[5]={110, 69, 69, 68, (png_byte) '\0'}; static const png_byte mng_pHYg[5]={112, 72, 89, 103, (png_byte) '\0'}; static const png_byte mng_vpAg[5]={118, 112, 65, 103, (png_byte) '\0'}; static const png_byte mng_pHYs[5]={112, 72, 89, 115, (png_byte) '\0'}; static const png_byte mng_sBIT[5]={115, 66, 73, 84, (png_byte) '\0'}; static const png_byte mng_sRGB[5]={115, 82, 71, 66, (png_byte) '\0'}; static const png_byte mng_tRNS[5]={116, 82, 78, 83, (png_byte) '\0'}; #if defined(JNG_SUPPORTED) static const png_byte mng_IDAT[5]={ 73, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAT[5]={ 74, 68, 65, 84, (png_byte) '\0'}; static const png_byte mng_JDAA[5]={ 74, 68, 65, 65, (png_byte) '\0'}; static const png_byte mng_JdAA[5]={ 74, 100, 65, 65, (png_byte) '\0'}; static const png_byte mng_JSEP[5]={ 74, 83, 69, 80, (png_byte) '\0'}; static const png_byte mng_oFFs[5]={111, 70, 70, 115, (png_byte) '\0'}; #endif #if 0 /* Other known chunks that are not yet supported by ImageMagick: */ static const png_byte mng_hIST[5]={104, 73, 83, 84, (png_byte) '\0'}; static const png_byte mng_iTXt[5]={105, 84, 88, 116, (png_byte) '\0'}; static const png_byte mng_sPLT[5]={115, 80, 76, 84, (png_byte) '\0'}; static const png_byte mng_sTER[5]={115, 84, 69, 82, (png_byte) '\0'}; static const png_byte mng_tEXt[5]={116, 69, 88, 116, (png_byte) '\0'}; static const png_byte mng_tIME[5]={116, 73, 77, 69, (png_byte) '\0'}; static const png_byte mng_zTXt[5]={122, 84, 88, 116, (png_byte) '\0'}; #endif typedef struct _MngBox { long left, right, top, bottom; } MngBox; typedef struct _MngPair { volatile long a, b; } MngPair; #ifdef MNG_OBJECT_BUFFERS typedef struct _MngBuffer { size_t height, width; Image *image; png_color plte[256]; int reference_count; unsigned char alpha_sample_depth, compression_method, color_type, concrete, filter_method, frozen, image_type, interlace_method, pixel_sample_depth, plte_length, sample_depth, viewable; } MngBuffer; #endif typedef struct _MngInfo { #ifdef MNG_OBJECT_BUFFERS MngBuffer *ob[MNG_MAX_OBJECTS]; #endif Image * image; RectangleInfo page; int adjoin, #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED bytes_in_read_buffer, found_empty_plte, #endif equal_backgrounds, equal_chrms, equal_gammas, #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) equal_palettes, #endif equal_physs, equal_srgbs, framing_mode, have_global_bkgd, have_global_chrm, have_global_gama, have_global_phys, have_global_sbit, have_global_srgb, have_saved_bkgd_index, have_write_global_chrm, have_write_global_gama, have_write_global_plte, have_write_global_srgb, need_fram, object_id, old_framing_mode, saved_bkgd_index; int new_number_colors; ssize_t image_found, loop_count[256], loop_iteration[256], scenes_found, x_off[MNG_MAX_OBJECTS], y_off[MNG_MAX_OBJECTS]; MngBox clip, frame, image_box, object_clip[MNG_MAX_OBJECTS]; unsigned char /* These flags could be combined into one byte */ exists[MNG_MAX_OBJECTS], frozen[MNG_MAX_OBJECTS], loop_active[256], invisible[MNG_MAX_OBJECTS], viewable[MNG_MAX_OBJECTS]; MagickOffsetType loop_jump[256]; png_colorp global_plte; png_color_8 global_sbit; png_byte #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED read_buffer[8], #endif global_trns[256]; float global_gamma; ChromaticityInfo global_chrm; RenderingIntent global_srgb_intent; unsigned int delay, global_plte_length, global_trns_length, global_x_pixels_per_unit, global_y_pixels_per_unit, mng_width, mng_height, ticks_per_second; MagickBooleanType need_blob; unsigned int IsPalette, global_phys_unit_type, basi_warning, clon_warning, dhdr_warning, jhdr_warning, magn_warning, past_warning, phyg_warning, phys_warning, sbit_warning, show_warning, mng_type, write_mng, write_png_colortype, write_png_depth, write_png_compression_level, write_png_compression_strategy, write_png_compression_filter, write_png8, write_png24, write_png32, write_png48, write_png64; #ifdef MNG_BASI_SUPPORTED size_t basi_width, basi_height; unsigned int basi_depth, basi_color_type, basi_compression_method, basi_filter_type, basi_interlace_method, basi_red, basi_green, basi_blue, basi_alpha, basi_viewable; #endif png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; PixelPacket mng_global_bkgd; /* Added at version 6.6.6-7 */ MagickBooleanType ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, ping_exclude_eXIf, ping_exclude_EXIF, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tRNS, ping_exclude_vpAg, ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, /* Added at version 6.8.5-7 */ ping_preserve_iCCP, /* Added at version 6.8.9-9 */ ping_exclude_tIME; } MngInfo; #endif /* VER */ /* Forward declarations. */ static MagickBooleanType WritePNGImage(const ImageInfo *,Image *); static MagickBooleanType WriteMNGImage(const ImageInfo *,Image *); #if defined(JNG_SUPPORTED) static MagickBooleanType WriteJNGImage(const ImageInfo *,Image *); #endif #if PNG_LIBPNG_VER > 10011 #if (MAGICKCORE_QUANTUM_DEPTH >= 16) static MagickBooleanType LosslessReduceDepthOK(Image *image) { /* Reduce bit depth if it can be reduced losslessly from 16+ to 8. * * This is true if the high byte and the next highest byte of * each sample of the image, the colormap, and the background color * are equal to each other. We check this by seeing if the samples * are unchanged when we scale them down to 8 and back up to Quantum. * * We don't use the method GetImageDepth() because it doesn't check * background and doesn't handle PseudoClass specially. */ #define QuantumToCharToQuantumEqQuantum(quantum) \ ((ScaleCharToQuantum((unsigned char) ScaleQuantumToChar(quantum))) == quantum) MagickBooleanType ok_to_reduce=MagickFalse; if (image->depth >= 16) { const PixelPacket *p; ok_to_reduce= QuantumToCharToQuantumEqQuantum(image->background_color.red) && QuantumToCharToQuantumEqQuantum(image->background_color.green) && QuantumToCharToQuantumEqQuantum(image->background_color.blue) ? MagickTrue : MagickFalse; if (ok_to_reduce != MagickFalse && image->storage_class == PseudoClass) { int indx; for (indx=0; indx < (ssize_t) image->colors; indx++) { ok_to_reduce=( QuantumToCharToQuantumEqQuantum( image->colormap[indx].red) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].green) && QuantumToCharToQuantumEqQuantum( image->colormap[indx].blue)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; } } if ((ok_to_reduce != MagickFalse) && (image->storage_class != PseudoClass)) { ssize_t y; register ssize_t x; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) { ok_to_reduce = MagickFalse; break; } for (x=(ssize_t) image->columns-1; x >= 0; x--) { ok_to_reduce= QuantumToCharToQuantumEqQuantum(GetPixelRed(p)) && QuantumToCharToQuantumEqQuantum(GetPixelGreen(p)) && QuantumToCharToQuantumEqQuantum(GetPixelBlue(p)) ? MagickTrue : MagickFalse; if (ok_to_reduce == MagickFalse) break; p++; } if (x >= 0) break; } } if (ok_to_reduce != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " OK to reduce PNG bit depth to 8 without loss of info"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Not OK to reduce PNG bit depth to 8 without loss of info"); } } return ok_to_reduce; } #endif /* MAGICKCORE_QUANTUM_DEPTH >= 16 */ static const char* PngColorTypeToString(const unsigned int color_type) { const char *result = "Unknown"; switch (color_type) { case PNG_COLOR_TYPE_GRAY: result = "Gray"; break; case PNG_COLOR_TYPE_GRAY_ALPHA: result = "Gray+Alpha"; break; case PNG_COLOR_TYPE_PALETTE: result = "Palette"; break; case PNG_COLOR_TYPE_RGB: result = "RGB"; break; case PNG_COLOR_TYPE_RGB_ALPHA: result = "RGB+Alpha"; break; } return result; } static int Magick_RenderingIntent_to_PNG_RenderingIntent(const RenderingIntent intent) { switch (intent) { case PerceptualIntent: return 0; case RelativeIntent: return 1; case SaturationIntent: return 2; case AbsoluteIntent: return 3; default: return -1; } } static RenderingIntent Magick_RenderingIntent_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return PerceptualIntent; case 1: return RelativeIntent; case 2: return SaturationIntent; case 3: return AbsoluteIntent; default: return UndefinedIntent; } } static const char * Magick_RenderingIntentString_from_PNG_RenderingIntent(const int ping_intent) { switch (ping_intent) { case 0: return "Perceptual Intent"; case 1: return "Relative Intent"; case 2: return "Saturation Intent"; case 3: return "Absolute Intent"; default: return "Undefined Intent"; } } static const char * Magick_ColorType_from_PNG_ColorType(const int ping_colortype) { switch (ping_colortype) { case 0: return "Grayscale"; case 2: return "Truecolor"; case 3: return "Indexed"; case 4: return "GrayAlpha"; case 6: return "RGBA"; default: return "UndefinedColorType"; } } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* MAGICKCORE_PNG_DELEGATE */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMNG() returns MagickTrue if the image format type, identified by the % magick string, is MNG. % % The format of the IsMNG method is: % % MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsMNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\212MNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJNG() returns MagickTrue if the image format type, identified by the % magick string, is JNG. % % The format of the IsJNG method is: % % MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsJNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\213JNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P N G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPNG() returns MagickTrue if the image format type, identified by the % magick string, is PNG. % % The format of the IsPNG method is: % % MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPNG(const unsigned char *magick,const size_t length) { if (length < 8) return(MagickFalse); if (memcmp(magick,"\211PNG\r\n\032\n",8) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_PNG_DELEGATE) #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #if (PNG_LIBPNG_VER > 10011) static size_t WriteBlobMSBULong(Image *image,const size_t value) { unsigned char buffer[4]; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; return((size_t) WriteBlob(image,4,buffer)); } static void PNGLong(png_bytep p,png_uint_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGsLong(png_bytep p,png_int_32 value) { *p++=(png_byte) ((value >> 24) & 0xff); *p++=(png_byte) ((value >> 16) & 0xff); *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGShort(png_bytep p,png_uint_16 value) { *p++=(png_byte) ((value >> 8) & 0xff); *p++=(png_byte) (value & 0xff); } static void PNGType(png_bytep p,const png_byte *type) { (void) CopyMagickMemory(p,type,4*sizeof(png_byte)); } static void LogPNGChunk(MagickBooleanType logging, const png_byte *type, size_t length) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing %c%c%c%c chunk, length: %.20g", type[0],type[1],type[2],type[3],(double) length); } #endif /* PNG_LIBPNG_VER > 10011 */ #if defined(__cplusplus) || defined(c_plusplus) } #endif #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPNGImage() reads a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image or set of images. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadPNGImage method is: % % Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % % To do, more or less in chronological order (as of version 5.5.2, % November 26, 2002 -- glennrp -- see also "To do" under WriteMNGImage): % % Get 16-bit cheap transparency working. % % (At this point, PNG decoding is supposed to be in full MNG-LC compliance) % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % (At this point, PNG encoding should be in full MNG compliance) % % Provide options for choice of background to use when the MNG BACK % chunk is not present or is not mandatory (i.e., leave transparent, % user specified, MNG BACK, PNG bKGD) % % Implement LOOP/ENDL [done, but could do discretionary loops more % efficiently by linking in the duplicate frames.]. % % Decode and act on the MHDR simplicity profile (offer option to reject % files or attempt to process them anyway when the profile isn't LC or VLC). % % Upgrade to full MNG without Delta-PNG. % % o BACK [done a while ago except for background image ID] % o MOVE [done 15 May 1999] % o CLIP [done 15 May 1999] % o DISC [done 19 May 1999] % o SAVE [partially done 19 May 1999 (marks objects frozen)] % o SEEK [partially done 19 May 1999 (discard function only)] % o SHOW % o PAST % o BASI % o MNG-level tEXt/iTXt/zTXt % o pHYg % o pHYs % o sBIT % o bKGD % o iTXt (wait for libpng implementation). % % Use the scene signature to discover when an identical scene is % being reused, and just point to the original image->exception instead % of storing another set of pixels. This not specific to MNG % but could be applied generally. % % Upgrade to full MNG with Delta-PNG. % % JNG tEXt/iTXt/zTXt % % We will not attempt to read files containing the CgBI chunk. % They are really Xcode files meant for display on the iPhone. % These are not valid PNG files and it is impossible to recover % the original PNG from files that have been converted to Xcode-PNG, % since irretrievable loss of color data has occurred due to the % use of premultiplied alpha. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* This the function that does the actual reading of data. It is the same as the one supplied in libpng, except that it receives the datastream from the ReadBlob() function instead of standard input. */ static void png_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) ReadBlob(image,(size_t) length,data); if (check != length) { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent, "Expected %.20g bytes; found %.20g bytes",(double) length, (double) check); png_warning(png_ptr,msg); png_error(png_ptr,"Read Exception"); } } } #if !defined(PNG_READ_EMPTY_PLTE_SUPPORTED) && \ !defined(PNG_MNG_FEATURES_SUPPORTED) /* We use mng_get_data() instead of png_get_data() if we have a libpng * older than libpng-1.0.3a, which was the first to allow the empty * PLTE, or a newer libpng in which PNG_MNG_FEATURES_SUPPORTED was * ifdef'ed out. Earlier versions would crash if the bKGD chunk was * encountered after an empty PLTE, so we have to look ahead for bKGD * chunks and remove them from the datastream that is passed to libpng, * and store their contents for later use. */ static void mng_get_data(png_structp png_ptr,png_bytep data,png_size_t length) { MngInfo *mng_info; Image *image; png_size_t check; register ssize_t i; i=0; mng_info=(MngInfo *) png_get_io_ptr(png_ptr); image=(Image *) mng_info->image; while (mng_info->bytes_in_read_buffer && length) { data[i]=mng_info->read_buffer[i]; mng_info->bytes_in_read_buffer--; length--; i++; } if (length != 0) { check=(png_size_t) ReadBlob(image,(size_t) length,(char *) data); if (check != length) png_error(png_ptr,"Read Exception"); if (length == 4) { if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 0)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_PLTE,4) == 0) mng_info->found_empty_plte=MagickTrue; if (memcmp(mng_info->read_buffer,mng_IEND,4) == 0) { mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; } } if ((data[0] == 0) && (data[1] == 0) && (data[2] == 0) && (data[3] == 1)) { check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->read_buffer[4]=0; mng_info->bytes_in_read_buffer=4; if (memcmp(mng_info->read_buffer,mng_bKGD,4) == 0) if (mng_info->found_empty_plte) { /* Skip the bKGD data byte and CRC. */ check=(png_size_t) ReadBlob(image,5,(char *) mng_info->read_buffer); check=(png_size_t) ReadBlob(image,(size_t) length, (char *) mng_info->read_buffer); mng_info->saved_bkgd_index=mng_info->read_buffer[0]; mng_info->have_saved_bkgd_index=MagickTrue; mng_info->bytes_in_read_buffer=0; } } } } } #endif static void png_put_data(png_structp png_ptr,png_bytep data,png_size_t length) { Image *image; image=(Image *) png_get_io_ptr(png_ptr); if (length != 0) { png_size_t check; check=(png_size_t) WriteBlob(image,(size_t) length,data); if (check != length) png_error(png_ptr,"WriteBlob Failed"); } } static void png_flush_data(png_structp png_ptr) { (void) png_ptr; } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED static int PalettesAreEqual(Image *a,Image *b) { ssize_t i; if ((a == (Image *) NULL) || (b == (Image *) NULL)) return((int) MagickFalse); if (a->storage_class != PseudoClass || b->storage_class != PseudoClass) return((int) MagickFalse); if (a->colors != b->colors) return((int) MagickFalse); for (i=0; i < (ssize_t) a->colors; i++) { if ((a->colormap[i].red != b->colormap[i].red) || (a->colormap[i].green != b->colormap[i].green) || (a->colormap[i].blue != b->colormap[i].blue)) return((int) MagickFalse); } return((int) MagickTrue); } #endif static void MngInfoDiscardObject(MngInfo *mng_info,int i) { if (i && (i < MNG_MAX_OBJECTS) && (mng_info != (MngInfo *) NULL) && mng_info->exists[i] && !mng_info->frozen[i]) { #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) { if (mng_info->ob[i]->reference_count > 0) mng_info->ob[i]->reference_count--; if (mng_info->ob[i]->reference_count == 0) { if (mng_info->ob[i]->image != (Image *) NULL) mng_info->ob[i]->image=DestroyImage(mng_info->ob[i]->image); mng_info->ob[i]=DestroyString(mng_info->ob[i]); } } mng_info->ob[i]=(MngBuffer *) NULL; #endif mng_info->exists[i]=MagickFalse; mng_info->invisible[i]=MagickFalse; mng_info->viewable[i]=MagickFalse; mng_info->frozen[i]=MagickFalse; mng_info->x_off[i]=0; mng_info->y_off[i]=0; mng_info->object_clip[i].left=0; mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].top=0; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } } static MngInfo *MngInfoFreeStruct(MngInfo *mng_info) { register ssize_t i; if (mng_info == (MngInfo *) NULL) return((MngInfo *) NULL); for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); if (mng_info->global_plte != (png_colorp) NULL) mng_info->global_plte=(png_colorp) RelinquishMagickMemory(mng_info->global_plte); return((MngInfo *) RelinquishMagickMemory(mng_info)); } static MngBox mng_minimum_box(MngBox box1,MngBox box2) { MngBox box; box=box1; if (box.left < box2.left) box.left=box2.left; if (box.top < box2.top) box.top=box2.top; if (box.right > box2.right) box.right=box2.right; if (box.bottom > box2.bottom) box.bottom=box2.bottom; return box; } static MngBox mng_read_box(MngBox previous_box,char delta_type,unsigned char *p) { MngBox box; /* Read clipping boundaries from DEFI, CLIP, FRAM, or PAST chunk. */ box.left=(ssize_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); box.right=(ssize_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); box.top=(ssize_t) ((p[8] << 24) | (p[9] << 16) | (p[10] << 8) | p[11]); box.bottom=(ssize_t) ((p[12] << 24) | (p[13] << 16) | (p[14] << 8) | p[15]); if (delta_type != 0) { box.left+=previous_box.left; box.right+=previous_box.right; box.top+=previous_box.top; box.bottom+=previous_box.bottom; } return(box); } static MngPair mng_read_pair(MngPair previous_pair,int delta_type, unsigned char *p) { MngPair pair; /* Read two ssize_ts from CLON, MOVE or PAST chunk */ pair.a=(long) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); pair.b=(long) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); if (delta_type != 0) { pair.a+=previous_pair.a; pair.b+=previous_pair.b; } return(pair); } static long mng_get_long(unsigned char *p) { return((long) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])); } typedef struct _PNGErrorInfo { Image *image; ExceptionInfo *exception; } PNGErrorInfo; static void MagickPNGErrorHandler(png_struct *ping,png_const_charp message) { Image *image; image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s error: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderError, message,"`%s'",image->filename); #if (PNG_LIBPNG_VER < 10500) /* A warning about deprecated use of jmpbuf here is unavoidable if you * are building with libpng-1.4.x and can be ignored. */ longjmp(ping->jmpbuf,1); #else png_longjmp(ping,1); #endif } static void MagickPNGWarningHandler(png_struct *ping,png_const_charp message) { Image *image; if (LocaleCompare(message, "Missing PLTE before tRNS") == 0) png_error(ping, message); image=(Image *) png_get_error_ptr(ping); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " libpng-%s warning: %s", PNG_LIBPNG_VER_STRING,message); (void) ThrowMagickException(&image->exception,GetMagickModule(),CoderWarning, message,"`%s'",image->filename); } #ifdef PNG_USER_MEM_SUPPORTED #if PNG_LIBPNG_VER >= 10400 static png_voidp Magick_png_malloc(png_structp png_ptr,png_alloc_size_t size) #else static png_voidp Magick_png_malloc(png_structp png_ptr,png_size_t size) #endif { (void) png_ptr; return((png_voidp) AcquireMagickMemory((size_t) size)); } /* Free a pointer. It is removed from the list at the same time. */ static png_free_ptr Magick_png_free(png_structp png_ptr,png_voidp ptr) { (void) png_ptr; ptr=RelinquishMagickMemory(ptr); return((png_free_ptr) NULL); } #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif static int Magick_png_read_raw_profile(png_struct *ping,Image *image, const ImageInfo *image_info, png_textp text,int ii) { register ssize_t i; register unsigned char *dp; register png_charp sp; png_uint_32 length, nibbles; StringInfo *profile; const unsigned char unhex[103]={0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,1, 2,3,4,5,6,7,8,9,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,10,11,12, 13,14,15}; sp=text[ii].text+1; /* look for newline */ while (*sp != '\n') sp++; /* look for length */ while (*sp == '\0' || *sp == ' ' || *sp == '\n') sp++; length=(png_uint_32) StringToLong(sp); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu",(unsigned long) length); while (*sp != ' ' && *sp != '\n') sp++; /* allocate space */ if (length == 0) { png_warning(ping,"invalid profile length"); return(MagickFalse); } profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { png_warning(ping, "unable to copy profile"); return(MagickFalse); } /* copy profile, skipping white space and column 1 "=" signs */ dp=GetStringInfoDatum(profile); nibbles=length*2; for (i=0; i < (ssize_t) nibbles; i++) { while (*sp < '0' || (*sp > '9' && *sp < 'a') || *sp > 'f') { if (*sp == '\0') { png_warning(ping, "ran out of profile data"); return(MagickFalse); } sp++; } if (i%2 == 0) *dp=(unsigned char) (16*unhex[(int) *sp++]); else (*dp++)+=unhex[(int) *sp++]; } /* We have already read "Raw profile type. */ (void) SetImageProfile(image,&text[ii].key[17],profile); profile=DestroyStringInfo(profile); if (image_info->verbose) (void) printf(" Found a generic profile, type %s\n",&text[ii].key[17]); return MagickTrue; } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) static int read_user_chunk_callback(png_struct *ping, png_unknown_chunkp chunk) { Image *image; /* The unknown chunk structure contains the chunk data: png_byte name[5]; png_byte *data; png_size_t size; Note that libpng has already taken care of the CRC handling. */ LogMagickEvent(CoderEvent,GetMagickModule(), " read_user_chunk: found %c%c%c%c chunk", chunk->name[0],chunk->name[1],chunk->name[2],chunk->name[3]); if (chunk->name[0] == 101 && (chunk->name[1] == 88 || chunk->name[1] == 120 ) && chunk->name[2] == 73 && chunk-> name[3] == 102) { /* process eXIf or exIf chunk */ PNGErrorInfo *error_info; StringInfo *profile; unsigned char *p; png_byte *s; int i; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " recognized eXIf|exIf chunk"); image=(Image *) png_get_user_chunk_ptr(ping); error_info=(PNGErrorInfo *) png_get_error_ptr(ping); profile=BlobToStringInfo((const void *) NULL,chunk->size+6); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(error_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1); } p=GetStringInfoDatum(profile); /* Initialize profile with "Exif\0\0" */ *p++ ='E'; *p++ ='x'; *p++ ='i'; *p++ ='f'; *p++ ='\0'; *p++ ='\0'; /* copy chunk->data to profile */ s=chunk->data; for (i=0; i < (ssize_t) chunk->size; i++) *p++ = *s++; (void) SetImageProfile(image,"exif",profile); return(1); } /* vpAg (deprecated, replaced by caNv) */ if (chunk->name[0] == 118 && chunk->name[1] == 112 && chunk->name[2] == 65 && chunk->name[3] == 103) { /* recognized vpAg */ if (chunk->size != 9) return(-1); /* Error return */ if (chunk->data[8] != 0) return(0); /* ImageMagick requires pixel units */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t) ((chunk->data[0] << 24) | (chunk->data[1] << 16) | (chunk->data[2] << 8) | chunk->data[3]); image->page.height=(size_t) ((chunk->data[4] << 24) | (chunk->data[5] << 16) | (chunk->data[6] << 8) | chunk->data[7]); return(1); } /* caNv */ if (chunk->name[0] == 99 && chunk->name[1] == 97 && chunk->name[2] == 78 && chunk->name[3] == 118) { /* recognized caNv */ if (chunk->size != 16) return(-1); /* Error return */ image=(Image *) png_get_user_chunk_ptr(ping); image->page.width=(size_t) ((chunk->data[0] << 24) | (chunk->data[1] << 16) | (chunk->data[2] << 8) | chunk->data[3]); image->page.height=(size_t) ((chunk->data[4] << 24) | (chunk->data[5] << 16) | (chunk->data[6] << 8) | chunk->data[7]); image->page.x=(size_t) ((chunk->data[8] << 24) | (chunk->data[9] << 16) | (chunk->data[10] << 8) | chunk->data[11]); image->page.y=(size_t) ((chunk->data[12] << 24) | (chunk->data[13] << 16) | (chunk->data[14] << 8) | chunk->data[15]); /* Return one of the following: */ /* return(-n); chunk had an error */ /* return(0); did not recognize */ /* return(n); success */ return(1); } return(0); /* Did not recognize */ } #endif #if defined(PNG_tIME_SUPPORTED) static void read_tIME_chunk(Image *image,png_struct *ping,png_info *info) { png_timep time; if (png_get_tIME(ping,info,&time)) { char timestamp[21]; FormatLocaleString(timestamp,21,"%04d-%02d-%02dT%02d:%02d:%02dZ", time->year,time->month,time->day,time->hour,time->minute,time->second); SetImageProperty(image,"png:tIME",timestamp); } } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOnePNGImage() reads a Portable Network Graphics (PNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ReadOnePNGImage method is: % % Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { /* Read one PNG image */ /* To do: Read the tEXt/Creation Time chunk into the date:create property */ Image *image; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; int intent, /* "PNG Rendering intent", which is ICC intent + 1 */ num_raw_profiles, num_text, num_text_total, num_passes, number_colors, pass, ping_bit_depth, ping_color_type, ping_file_depth, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans, unit_type; double file_gamma; LongPixelPacket transparent_color; MagickBooleanType logging, ping_found_cHRM, ping_found_gAMA, ping_found_iCCP, ping_found_sRGB, ping_found_sRGB_cHRM, ping_preserve_iCCP, status; MemoryInfo *volatile pixel_info; png_bytep ping_trans_alpha; png_color_16p ping_background, ping_trans_color; png_info *end_info, *ping_info; png_struct *ping; png_textp text; png_uint_32 ping_height, ping_width, x_resolution, y_resolution; ssize_t ping_rowbytes, y; register unsigned char *p; register IndexPacket *indexes; register ssize_t i, x; register PixelPacket *q; size_t length, row_offset; ssize_t j; unsigned char *ping_pixels; #ifdef PNG_UNKNOWN_CHUNKS_SUPPORTED png_byte unused_chunks[]= { 104, 73, 83, 84, (png_byte) '\0', /* hIST */ 105, 84, 88, 116, (png_byte) '\0', /* iTXt */ 112, 67, 65, 76, (png_byte) '\0', /* pCAL */ 115, 67, 65, 76, (png_byte) '\0', /* sCAL */ 115, 80, 76, 84, (png_byte) '\0', /* sPLT */ #if !defined(PNG_tIME_SUPPORTED) 116, 73, 77, 69, (png_byte) '\0', /* tIME */ #endif #ifdef PNG_APNG_SUPPORTED /* libpng was built with APNG patch; */ /* ignore the APNG chunks */ 97, 99, 84, 76, (png_byte) '\0', /* acTL */ 102, 99, 84, 76, (png_byte) '\0', /* fcTL */ 102, 100, 65, 84, (png_byte) '\0', /* fdAT */ #endif }; #endif /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,32); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,32); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOnePNGImage()\n" " IM version = %s\n" " Libpng version = %s", im_vers, libpng_vers); if (logging != MagickFalse) { if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule()," Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", zlib_runv); } } #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif #if (PNG_LIBPNG_VER >= 10400) # ifndef PNG_TRANSFORM_GRAY_TO_RGB /* Added at libpng-1.4.0beta67 */ if (image_info->verbose) { printf("Your PNG library (libpng-%s) is an old beta version.\n", PNG_LIBPNG_VER_STRING); printf("Please update it.\n"); } # endif #endif image=mng_info->image; if (logging != MagickFalse) { (void)LogMagickEvent(CoderEvent,GetMagickModule(), " Before reading:\n" " image->matte=%d\n" " image->rendering_intent=%d\n" " image->colorspace=%d\n" " image->gamma=%f", (int) image->matte, (int) image->rendering_intent, (int) image->colorspace, image->gamma); } intent=Magick_RenderingIntent_to_PNG_RenderingIntent(image->rendering_intent); /* Set to an out-of-range color unless tRNS chunk is present */ transparent_color.red=65537; transparent_color.green=65537; transparent_color.blue=65537; transparent_color.opacity=65537; number_colors=0; num_text = 0; num_text_total = 0; num_raw_profiles = 0; ping_found_cHRM = MagickFalse; ping_found_gAMA = MagickFalse; ping_found_iCCP = MagickFalse; ping_found_sRGB = MagickFalse; ping_found_sRGB_cHRM = MagickFalse; ping_preserve_iCCP = MagickFalse; /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_read_struct_2(PNG_LIBPNG_VER_STRING, image, MagickPNGErrorHandler,MagickPNGWarningHandler, NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_read_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_read_struct(&ping,(png_info **) NULL,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } end_info=png_create_info_struct(ping); if (end_info == (png_info *) NULL) { png_destroy_read_struct(&ping,&ping_info,(png_info **) NULL); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } pixel_info=(MemoryInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG image is corrupt. */ png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() with error."); if (image != (Image *) NULL) InheritException(exception,&image->exception); return(GetFirstImageInList(image)); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for reading. */ mng_info->image_found++; png_set_sig_bytes(ping,8); if (LocaleCompare(image_info->magick,"MNG") == 0) { #if defined(PNG_MNG_FEATURES_SUPPORTED) (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); png_set_read_fn(ping,image,png_get_data); #else #if defined(PNG_READ_EMPTY_PLTE_SUPPORTED) png_permit_empty_plte(ping,MagickTrue); png_set_read_fn(ping,image,png_get_data); #else mng_info->image=image; mng_info->bytes_in_read_buffer=0; mng_info->found_empty_plte=MagickFalse; mng_info->have_saved_bkgd_index=MagickFalse; png_set_read_fn(ping,mng_info,mng_get_data); #endif #endif } else png_set_read_fn(ping,image,png_get_data); { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",value) == MagickFalse) { value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) ping_preserve_iCCP=MagickTrue; #if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) /* Don't let libpng check for ICC/sRGB profile because we're going * to do that anyway. This feature was added at libpng-1.6.12. * If logging, go ahead and check and issue a warning as appropriate. */ if (logging == MagickFalse) png_set_option(ping, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) else { /* Ignore the iCCP chunk */ png_set_keep_unknown_chunks(ping, 1, mng_iCCP, 1); } #endif } #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) /* Ignore unused chunks and all unknown chunks except for exIf, caNv, and vpAg */ # if PNG_LIBPNG_VER < 10700 /* Avoid libpng16 warning */ png_set_keep_unknown_chunks(ping, 2, NULL, 0); # else png_set_keep_unknown_chunks(ping, 1, NULL, 0); # endif png_set_keep_unknown_chunks(ping, 2, mng_exIf, 1); png_set_keep_unknown_chunks(ping, 2, mng_caNv, 1); png_set_keep_unknown_chunks(ping, 2, mng_vpAg, 1); png_set_keep_unknown_chunks(ping, 1, unused_chunks, (int)sizeof(unused_chunks)/5); /* Callback for other unknown chunks */ png_set_read_user_chunk_fn(ping, image, read_user_chunk_callback); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED #if (PNG_LIBPNG_VER >= 10400) /* Limit the size of the chunk storage cache used for sPLT, text, * and unknown chunks. */ png_set_chunk_cache_max(ping, 32767); #endif #endif #ifdef PNG_READ_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature */ png_set_check_for_invalid_index (ping, 0); #endif #if (PNG_LIBPNG_VER < 10400) # if defined(PNG_USE_PNGGCCRD) && defined(PNG_ASSEMBLER_CODE_SUPPORTED) && \ (PNG_LIBPNG_VER >= 10200) && (PNG_LIBPNG_VER < 10220) && defined(__i386__) /* Disable thread-unsafe features of pnggccrd */ if (png_access_version_number() >= 10200) { png_uint_32 mmx_disable_mask=0; png_uint_32 asm_flags; mmx_disable_mask |= ( PNG_ASM_FLAG_MMX_READ_COMBINE_ROW \ | PNG_ASM_FLAG_MMX_READ_FILTER_SUB \ | PNG_ASM_FLAG_MMX_READ_FILTER_AVG \ | PNG_ASM_FLAG_MMX_READ_FILTER_PAETH ); asm_flags=png_get_asm_flags(ping); png_set_asm_flags(ping, asm_flags & ~mmx_disable_mask); } # endif #endif png_read_info(ping,ping_info); /* Read and check IHDR chunk data */ png_get_IHDR(ping,ping_info,&ping_width,&ping_height, &ping_bit_depth,&ping_color_type, &ping_interlace_method,&ping_compression_method, &ping_filter_method); ping_file_depth = ping_bit_depth; /* Swap bytes if requested */ if (ping_file_depth == 16) { const char *value; value=GetImageOption(image_info,"png:swap-bytes"); if (value == NULL) value=GetImageArtifact(image,"png:swap-bytes"); if (value != NULL) png_set_swap(ping); } /* Save bit-depth and color-type in case we later want to write a PNG00 */ { char msg[MaxTextExtent]; (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_color_type); (void) SetImageProperty(image,"png:IHDR.color-type-orig",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_bit_depth); (void) SetImageProperty(image,"png:IHDR.bit-depth-orig",msg); } (void) png_get_tRNS(ping, ping_info, &ping_trans_alpha, &ping_num_trans, &ping_trans_color); (void) png_get_bKGD(ping, ping_info, &ping_background); if (ping_bit_depth < 8) { png_set_packing(ping); ping_bit_depth = 8; } image->depth=ping_bit_depth; image->depth=GetImageQuantumDepth(image,MagickFalse); image->interlace=ping_interlace_method != 0 ? PNGInterlace : NoInterlace; if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { image->rendering_intent=UndefinedIntent; intent=Magick_RenderingIntent_to_PNG_RenderingIntent(UndefinedIntent); (void) ResetMagickMemory(&image->chromaticity,0, sizeof(image->chromaticity)); } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG width: %.20g, height: %.20g\n" " PNG color_type: %d, bit_depth: %d\n" " PNG compression_method: %d\n" " PNG interlace_method: %d, filter_method: %d", (double) ping_width, (double) ping_height, ping_color_type, ping_bit_depth, ping_compression_method, ping_interlace_method,ping_filter_method); } if (png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_gAMA)) { ping_found_gAMA=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG gAMA chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { ping_found_cHRM=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG cHRM chunk."); } if (ping_found_iCCP != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { ping_found_sRGB=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG sRGB chunk."); } #ifdef PNG_READ_iCCP_SUPPORTED if (ping_found_iCCP !=MagickTrue && ping_found_sRGB != MagickTrue && png_get_valid(ping,ping_info, PNG_INFO_iCCP)) { ping_found_iCCP=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found PNG iCCP chunk."); } if (png_get_valid(ping,ping_info,PNG_INFO_iCCP)) { int compression; #if (PNG_LIBPNG_VER < 10500) png_charp info; #else png_bytep info; #endif png_charp name; png_uint_32 profile_length; (void) png_get_iCCP(ping,ping_info,&name,(int *) &compression,&info, &profile_length); if (profile_length != 0) { StringInfo *profile; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG iCCP chunk."); profile=BlobToStringInfo(info,profile_length); if (profile == (StringInfo *) NULL) { png_warning(ping, "ICC profile is NULL"); profile=DestroyStringInfo(profile); } else { if (ping_preserve_iCCP == MagickFalse) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } break; } } } if (sRGB_info[icheck].len == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); (void) SetImageProfile(image,"icc",profile); } } else /* Preserve-iCCP */ { (void) SetImageProfile(image,"icc",profile); } profile=DestroyStringInfo(profile); } } } #endif #if defined(PNG_READ_sRGB_SUPPORTED) { if (ping_found_iCCP==MagickFalse && png_get_valid(ping,ping_info, PNG_INFO_sRGB)) { if (png_get_sRGB(ping,ping_info,&intent)) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (intent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG sRGB chunk: rendering_intent: %d",intent); } } else if (mng_info->have_global_srgb) { if (image->rendering_intent == UndefinedIntent) image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent (mng_info->global_srgb_intent); } } #endif { if (!png_get_gAMA(ping,ping_info,&file_gamma)) if (mng_info->have_global_gama) png_set_gAMA(ping,ping_info,mng_info->global_gamma); if (png_get_gAMA(ping,ping_info,&file_gamma)) { image->gamma=(float) file_gamma; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG gAMA chunk: gamma: %f",file_gamma); } } if (!png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { if (mng_info->have_global_chrm != MagickFalse) { (void) png_set_cHRM(ping,ping_info, mng_info->global_chrm.white_point.x, mng_info->global_chrm.white_point.y, mng_info->global_chrm.red_primary.x, mng_info->global_chrm.red_primary.y, mng_info->global_chrm.green_primary.x, mng_info->global_chrm.green_primary.y, mng_info->global_chrm.blue_primary.x, mng_info->global_chrm.blue_primary.y); } } if (png_get_valid(ping,ping_info,PNG_INFO_cHRM)) { (void) png_get_cHRM(ping,ping_info, &image->chromaticity.white_point.x, &image->chromaticity.white_point.y, &image->chromaticity.red_primary.x, &image->chromaticity.red_primary.y, &image->chromaticity.green_primary.x, &image->chromaticity.green_primary.y, &image->chromaticity.blue_primary.x, &image->chromaticity.blue_primary.y); ping_found_cHRM=MagickTrue; if (image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f) ping_found_sRGB_cHRM=MagickTrue; } if (image->rendering_intent != UndefinedIntent) { if (ping_found_sRGB != MagickTrue && (ping_found_gAMA != MagickTrue || (image->gamma > .45 && image->gamma < .46)) && (ping_found_cHRM != MagickTrue || ping_found_sRGB_cHRM != MagickFalse) && ping_found_iCCP != MagickTrue) { png_set_sRGB(ping,ping_info, Magick_RenderingIntent_to_PNG_RenderingIntent (image->rendering_intent)); file_gamma=1.000f/2.200f; ping_found_sRGB=MagickTrue; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting sRGB as if in input"); } } #if defined(PNG_oFFs_SUPPORTED) if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { image->page.x=(ssize_t) png_get_x_offset_pixels(ping, ping_info); image->page.y=(ssize_t) png_get_y_offset_pixels(ping, ping_info); if (logging != MagickFalse) if (image->page.x || image->page.y) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG oFFs chunk: x: %.20g, y: %.20g.",(double) image->page.x,(double) image->page.y); } #endif #if defined(PNG_pHYs_SUPPORTED) if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { if (mng_info->have_global_phys) { png_set_pHYs(ping,ping_info, mng_info->global_x_pixels_per_unit, mng_info->global_y_pixels_per_unit, mng_info->global_phys_unit_type); } } x_resolution=0; y_resolution=0; unit_type=0; if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { /* Set image resolution. */ (void) png_get_pHYs(ping,ping_info,&x_resolution,&y_resolution, &unit_type); image->x_resolution=(double) x_resolution; image->y_resolution=(double) y_resolution; if (unit_type == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=(double) x_resolution/100.0; image->y_resolution=(double) y_resolution/100.0; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) x_resolution,(double) y_resolution,unit_type); } #endif if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); if ((number_colors == 0) && ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE)) { if (mng_info->global_plte_length) { png_set_PLTE(ping,ping_info,mng_info->global_plte, (int) mng_info->global_plte_length); if (!png_get_valid(ping,ping_info,PNG_INFO_tRNS)) if (mng_info->global_trns_length) { if (mng_info->global_trns_length > mng_info->global_plte_length) { png_warning(ping, "global tRNS has more entries than global PLTE"); } else { png_set_tRNS(ping,ping_info,mng_info->global_trns, (int) mng_info->global_trns_length,NULL); } } #ifdef PNG_READ_bKGD_SUPPORTED if ( #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED mng_info->have_saved_bkgd_index || #endif png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { png_color_16 background; #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED if (mng_info->have_saved_bkgd_index) background.index=mng_info->saved_bkgd_index; #endif if (png_get_valid(ping, ping_info, PNG_INFO_bKGD)) background.index=ping_background->index; background.red=(png_uint_16) mng_info->global_plte[background.index].red; background.green=(png_uint_16) mng_info->global_plte[background.index].green; background.blue=(png_uint_16) mng_info->global_plte[background.index].blue; background.gray=(png_uint_16) mng_info->global_plte[background.index].green; png_set_bKGD(ping,ping_info,&background); } #endif } else png_error(ping,"No global PLTE in file"); } } #ifdef PNG_READ_bKGD_SUPPORTED if (mng_info->have_global_bkgd && (!png_get_valid(ping,ping_info,PNG_INFO_bKGD))) image->background_color=mng_info->mng_global_bkgd; if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { unsigned int bkgd_scale; /* Set image background color. * Scale background components to 16-bit, then scale * to quantum depth */ bkgd_scale = 1; if (ping_file_depth == 1) bkgd_scale = 255; else if (ping_file_depth == 2) bkgd_scale = 85; else if (ping_file_depth == 4) bkgd_scale = 17; if (ping_file_depth <= 8) bkgd_scale *= 257; ping_background->red *= bkgd_scale; ping_background->green *= bkgd_scale; ping_background->blue *= bkgd_scale; if (logging != MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG bKGD chunk, raw ping_background=(%d,%d,%d).\n" " bkgd_scale=%d. ping_background=(%d,%d,%d).", ping_background->red,ping_background->green, ping_background->blue, bkgd_scale,ping_background->red, ping_background->green,ping_background->blue); } image->background_color.red= ScaleShortToQuantum(ping_background->red); image->background_color.green= ScaleShortToQuantum(ping_background->green); image->background_color.blue= ScaleShortToQuantum(ping_background->blue); image->background_color.opacity=OpaqueOpacity; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->background_color=(%.20g,%.20g,%.20g).", (double) image->background_color.red, (double) image->background_color.green, (double) image->background_color.blue); } #endif /* PNG_READ_bKGD_SUPPORTED */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { /* Image has a tRNS chunk. */ int max_sample; size_t one=1; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG tRNS chunk."); max_sample = (int) ((one << ping_file_depth) - 1); if ((ping_color_type == PNG_COLOR_TYPE_GRAY && (int)ping_trans_color->gray > max_sample) || (ping_color_type == PNG_COLOR_TYPE_RGB && ((int)ping_trans_color->red > max_sample || (int)ping_trans_color->green > max_sample || (int)ping_trans_color->blue > max_sample))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Ignoring PNG tRNS chunk with out-of-range sample."); png_free_data(ping, ping_info, PNG_FREE_TRNS, 0); png_set_invalid(ping,ping_info,PNG_INFO_tRNS); image->matte=MagickFalse; } else { int scale_to_short; scale_to_short = 65535L/((1UL << ping_file_depth)-1); /* Scale transparent_color to short */ transparent_color.red= scale_to_short*ping_trans_color->red; transparent_color.green= scale_to_short*ping_trans_color->green; transparent_color.blue= scale_to_short*ping_trans_color->blue; transparent_color.opacity= scale_to_short*ping_trans_color->gray; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Raw tRNS graylevel = %d, scaled graylevel = %d.", ping_trans_color->gray,transparent_color.opacity); } transparent_color.red=transparent_color.opacity; transparent_color.green=transparent_color.opacity; transparent_color.blue=transparent_color.opacity; } } } #if defined(PNG_READ_sBIT_SUPPORTED) if (mng_info->have_global_sbit) { if (!png_get_valid(ping,ping_info,PNG_INFO_sBIT)) png_set_sBIT(ping,ping_info,&mng_info->global_sbit); } #endif num_passes=png_set_interlace_handling(ping); png_read_update_info(ping,ping_info); ping_rowbytes=png_get_rowbytes(ping,ping_info); /* Initialize image structure. */ mng_info->image_box.left=0; mng_info->image_box.right=(ssize_t) ping_width; mng_info->image_box.top=0; mng_info->image_box.bottom=(ssize_t) ping_height; if (mng_info->mng_type == 0) { mng_info->mng_width=ping_width; mng_info->mng_height=ping_height; mng_info->frame=mng_info->image_box; mng_info->clip=mng_info->image_box; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } image->compression=ZipCompression; image->columns=ping_width; image->rows=ping_height; if (((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || ((int) ping_bit_depth < 16 && (int) ping_color_type == PNG_COLOR_TYPE_GRAY)) { size_t one; image->storage_class=PseudoClass; one=1; image->colors=one << ping_file_depth; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->colors > 256) image->colors=256; #else if (image->colors > 65536L) image->colors=65536L; #endif if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); image->colors=(size_t) number_colors; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG PLTE chunk: number_colors: %d.",number_colors); } } if (image->storage_class == PseudoClass) { /* Initialize image colormap. */ if (AcquireImageColormap(image,image->colors) == MagickFalse) png_error(ping,"Memory allocation failed"); if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { png_colorp palette; (void) png_get_PLTE(ping,ping_info,&palette,&number_colors); for (i=0; i < (ssize_t) number_colors; i++) { image->colormap[i].red=ScaleCharToQuantum(palette[i].red); image->colormap[i].green=ScaleCharToQuantum(palette[i].green); image->colormap[i].blue=ScaleCharToQuantum(palette[i].blue); } for ( ; i < (ssize_t) image->colors; i++) { image->colormap[i].red=0; image->colormap[i].green=0; image->colormap[i].blue=0; } } else { Quantum scale; scale = 65535/((1UL << ping_file_depth)-1); #if (MAGICKCORE_QUANTUM_DEPTH > 16) scale = ScaleShortToQuantum(scale); #endif for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=(Quantum) (i*scale); image->colormap[i].green=(Quantum) (i*scale); image->colormap[i].blue=(Quantum) (i*scale); } } } /* Set some properties for reporting by "identify" */ { char msg[MaxTextExtent]; /* encode ping_width, ping_height, ping_file_depth, ping_color_type, ping_interlace_method in value */ (void) FormatLocaleString(msg,MaxTextExtent, "%d, %d",(int) ping_width, (int) ping_height); (void) SetImageProperty(image,"png:IHDR.width,height",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d",(int) ping_file_depth); (void) SetImageProperty(image,"png:IHDR.bit_depth",msg); (void) FormatLocaleString(msg,MaxTextExtent,"%d (%s)", (int) ping_color_type, Magick_ColorType_from_PNG_ColorType((int)ping_color_type)); (void) SetImageProperty(image,"png:IHDR.color_type",msg); if (ping_interlace_method == 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Not interlaced)", (int) ping_interlace_method); } else if (ping_interlace_method == 1) { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Adam7 method)", (int) ping_interlace_method); } else { (void) FormatLocaleString(msg,MaxTextExtent,"%d (Unknown method)", (int) ping_interlace_method); } (void) SetImageProperty(image,"png:IHDR.interlace_method",msg); if (number_colors != 0) { (void) FormatLocaleString(msg,MaxTextExtent,"%d", (int) number_colors); (void) SetImageProperty(image,"png:PLTE.number_colors",msg); } } #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,ping_info); #endif /* Read image scanlines. */ if (image->delay != 0) mng_info->scenes_found++; if ((mng_info->mng_type == 0 && (image->ping != MagickFalse)) || ( (image_info->number_scenes != 0) && (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)))) { /* This happens later in non-ping decodes */ if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) image->storage_class=DirectClass; image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping PNG image data for scene %.20g",(double) mng_info->scenes_found-1); png_destroy_read_struct(&ping,&ping_info,&end_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()."); return(image); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG IDAT chunk(s)"); if (num_passes > 1) pixel_info=AcquireVirtualMemory(image->rows,ping_rowbytes* sizeof(*ping_pixels)); else pixel_info=AcquireVirtualMemory(ping_rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Memory allocation failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting PNG pixels to pixel packets"); /* Convert PNG pixels to pixel packets. */ { MagickBooleanType found_transparent_pixel; found_transparent_pixel=MagickFalse; if (image->storage_class == DirectClass) { QuantumInfo *quantum_info; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Failed to allocate quantum_info"); (void) SetQuantumEndian(image,quantum_info,MSBEndian); for (pass=0; pass < num_passes; pass++) { /* Convert image to DirectClass pixel packets. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; for (y=0; y < (ssize_t) image->rows; y++) { if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; else { if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayAlphaQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBAQuantum,ping_pixels+row_offset,exception); else if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, IndexQuantum,ping_pixels+row_offset,exception); else /* ping_color_type == PNG_COLOR_TYPE_RGB */ (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RGBQuantum,ping_pixels+row_offset,exception); } if (found_transparent_pixel == MagickFalse) { /* Is there a transparent pixel in the row? */ if (y== 0 && logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Looking for cheap transparent pixel"); for (x=(ssize_t) image->columns-1; x >= 0; x--) { if ((ping_color_type == PNG_COLOR_TYPE_RGBA || ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) && (GetPixelOpacity(q) != OpaqueOpacity)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } if ((ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_GRAY) && (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ...got one."); found_transparent_pixel = MagickTrue; break; } q++; } } if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag, (MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } } quantum_info=DestroyQuantumInfo(quantum_info); } else /* image->storage_class != DirectClass */ for (pass=0; pass < num_passes; pass++) { Quantum *quantum_scanline; register Quantum *r; /* Convert grayscale image to PseudoClass pixel packets. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Converting grayscale pixels to pixel packets"); image->matte=ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA ? MagickTrue : MagickFalse; quantum_scanline=(Quantum *) AcquireQuantumMemory(image->columns, (image->matte ? 2 : 1)*sizeof(*quantum_scanline)); if (quantum_scanline == (Quantum *) NULL) png_error(ping,"Memory allocation failed"); for (y=0; y < (ssize_t) image->rows; y++) { Quantum alpha; if (num_passes > 1) row_offset=ping_rowbytes*y; else row_offset=0; png_read_row(ping,ping_pixels+row_offset,NULL); if (pass < num_passes-1) continue; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); p=ping_pixels+row_offset; r=quantum_scanline; switch (ping_bit_depth) { case 8: { if (ping_color_type == 4) for (x=(ssize_t) image->columns-1; x >= 0; x--) { *r++=*p++; /* In image.h, OpaqueOpacity is 0 * TransparentOpacity is QuantumRange * In a PNG datastream, Opaque is QuantumRange * and Transparent is 0. */ alpha=ScaleCharToQuantum((unsigned char)*p++); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } else for (x=(ssize_t) image->columns-1; x >= 0; x--) *r++=*p++; break; } case 16: { for (x=(ssize_t) image->columns-1; x >= 0; x--) { #if (MAGICKCORE_QUANTUM_DEPTH >= 16) size_t quantum; if (image->colors > 256) quantum=((*p++) << 8); else quantum=0; quantum|=(*p++); *r=ScaleShortToQuantum(quantum); r++; if (ping_color_type == 4) { if (image->colors > 256) quantum=((*p++) << 8); else quantum=0; quantum|=(*p++); alpha=ScaleShortToQuantum(quantum); SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; q++; } #else /* MAGICKCORE_QUANTUM_DEPTH == 8 */ *r++=(*p++); p++; /* strip low byte */ if (ping_color_type == 4) { alpha=*p++; SetPixelAlpha(q,alpha); if (alpha != QuantumRange-OpaqueOpacity) found_transparent_pixel = MagickTrue; p++; q++; } #endif } break; } default: break; } /* Transfer image scanline. */ r=quantum_scanline; for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,*r++); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (num_passes == 1) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } if (num_passes != 1) { status=SetImageProgress(image,LoadImageTag,pass,num_passes); if (status == MagickFalse) break; } quantum_scanline=(Quantum *) RelinquishMagickMemory(quantum_scanline); } image->matte=found_transparent_pixel; if (logging != MagickFalse) { if (found_transparent_pixel != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found transparent pixel"); else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No transparent pixel was found"); ping_color_type&=0x03; } } } if (image->storage_class == PseudoClass) { MagickBooleanType matte; matte=image->matte; image->matte=MagickFalse; (void) SyncImage(image); image->matte=matte; } png_read_end(ping,end_info); if (image_info->number_scenes != 0 && mng_info->scenes_found-1 < (ssize_t) image_info->first_scene && image->delay != 0) { png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); image->colors=2; (void) SetImageBackgroundColor(image); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage() early."); return(image); } if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) { ClassType storage_class; /* Image has a transparent background. */ storage_class=image->storage_class; image->matte=MagickTrue; /* Balfour fix from imagemagick discourse server, 5 Feb 2010 */ if (storage_class == PseudoClass) { if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { for (x=0; x < ping_num_trans; x++) { image->colormap[x].opacity = ScaleCharToQuantum((unsigned char)(255-ping_trans_alpha[x])); } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY) { for (x=0; x < (int) image->colors; x++) { if (ScaleQuantumToShort(image->colormap[x].red) == transparent_color.opacity) { image->colormap[x].opacity = (Quantum) TransparentOpacity; } } } (void) SyncImage(image); } #if 1 /* Should have already been done above, but glennrp problem P10 * needs this. */ else { for (y=0; y < (ssize_t) image->rows; y++) { image->storage_class=storage_class; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); /* Caution: on a Q8 build, this does not distinguish between * 16-bit colors that differ only in the low byte */ for (x=(ssize_t) image->columns-1; x >= 0; x--) { if (ScaleQuantumToShort(GetPixelRed(q)) == transparent_color.red && ScaleQuantumToShort(GetPixelGreen(q)) == transparent_color.green && ScaleQuantumToShort(GetPixelBlue(q)) == transparent_color.blue) { SetPixelOpacity(q,TransparentOpacity); } #if 0 /* I have not found a case where this is needed. */ else { SetPixelOpacity(q)=(Quantum) OpaqueOpacity; } #endif q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif image->storage_class=DirectClass; } if ((ping_color_type == PNG_COLOR_TYPE_GRAY) || (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA)) { double image_gamma = image->gamma; (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->gamma=%f",(float) image_gamma); if (image_gamma > 0.75) { /* Set image->rendering_intent to Undefined, * image->colorspace to GRAY, and reset image->chromaticity. */ image->intensity = Rec709LuminancePixelIntensityMethod; SetImageColorspace(image,GRAYColorspace); } else { RenderingIntent save_rendering_intent = image->rendering_intent; ChromaticityInfo save_chromaticity = image->chromaticity; SetImageColorspace(image,GRAYColorspace); image->rendering_intent = save_rendering_intent; image->chromaticity = save_chromaticity; } image->gamma = image_gamma; } (void)LogMagickEvent(CoderEvent,GetMagickModule(), " image->colorspace=%d",(int) image->colorspace); for (j = 0; j < 2; j++) { if (j == 0) status = png_get_text(ping,ping_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; else status = png_get_text(ping,end_info,&text,&num_text) != 0 ? MagickTrue : MagickFalse; if (status != MagickFalse) for (i=0; i < (ssize_t) num_text; i++) { /* Check for a profile */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading PNG text chunk"); if (strlen(text[i].key) > 16 && memcmp(text[i].key, "Raw profile type ",17) == 0) { const char *value; value=GetImageOption(image_info,"profile:skip"); if (IsOptionMember(text[i].key+17,value) == MagickFalse) { (void) Magick_png_read_raw_profile(ping,image,image_info,text, (int) i); num_raw_profiles++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Read raw profile %s",text[i].key+17); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping raw profile %s",text[i].key+17); } } else { char *value; length=text[i].text_length; value=(char *) AcquireQuantumMemory(length+MaxTextExtent, sizeof(*value)); if (value == (char *) NULL) png_error(ping,"Memory allocation failed"); *value='\0'; (void) ConcatenateMagickString(value,text[i].text,length+2); /* Don't save "density" or "units" property if we have a pHYs * chunk */ if (!png_get_valid(ping,ping_info,PNG_INFO_pHYs) || (LocaleCompare(text[i].key,"density") != 0 && LocaleCompare(text[i].key,"units") != 0)) (void) SetImageProperty(image,text[i].key,value); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " length: %lu\n" " Keyword: %s", (unsigned long) length, text[i].key); } value=DestroyString(value); } } num_text_total += num_text; } #ifdef MNG_OBJECT_BUFFERS /* Store the object if necessary. */ if (object_id && !mng_info->frozen[object_id]) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) { /* create a new object buffer. */ mng_info->ob[object_id]=(MngBuffer *) AcquireMagickMemory(sizeof(MngBuffer)); if (mng_info->ob[object_id] != (MngBuffer *) NULL) { mng_info->ob[object_id]->image=(Image *) NULL; mng_info->ob[object_id]->reference_count=1; } } if ((mng_info->ob[object_id] == (MngBuffer *) NULL) || mng_info->ob[object_id]->frozen) { if (mng_info->ob[object_id] == (MngBuffer *) NULL) png_error(ping,"Memory allocation failed"); if (mng_info->ob[object_id]->frozen) png_error(ping,"Cannot overwrite frozen MNG object buffer"); } else { if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image=DestroyImage (mng_info->ob[object_id]->image); mng_info->ob[object_id]->image=CloneImage(image,0,0,MagickTrue, &image->exception); if (mng_info->ob[object_id]->image != (Image *) NULL) mng_info->ob[object_id]->image->file=(FILE *) NULL; else png_error(ping, "Cloning image for object buffer failed"); if (ping_width > 250000L || ping_height > 250000L) png_error(ping,"PNG Image dimensions are too large."); mng_info->ob[object_id]->width=ping_width; mng_info->ob[object_id]->height=ping_height; mng_info->ob[object_id]->color_type=ping_color_type; mng_info->ob[object_id]->sample_depth=ping_bit_depth; mng_info->ob[object_id]->interlace_method=ping_interlace_method; mng_info->ob[object_id]->compression_method= ping_compression_method; mng_info->ob[object_id]->filter_method=ping_filter_method; if (png_get_valid(ping,ping_info,PNG_INFO_PLTE)) { png_colorp plte; /* Copy the PLTE to the object buffer. */ png_get_PLTE(ping,ping_info,&plte,&number_colors); mng_info->ob[object_id]->plte_length=number_colors; for (i=0; i < number_colors; i++) { mng_info->ob[object_id]->plte[i]=plte[i]; } } else mng_info->ob[object_id]->plte_length=0; } } #endif /* Set image->matte to MagickTrue if the input colortype supports * alpha or if a valid tRNS chunk is present, no matter whether there * is actual transparency present. */ image->matte=(((int) ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) || ((int) ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (png_get_valid(ping,ping_info,PNG_INFO_tRNS))) ? MagickTrue : MagickFalse; #if 0 /* I'm not sure what's wrong here but it does not work. */ if (image->matte != MagickFalse) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) (void) SetImageType(image,GrayscaleMatteType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteMatteType); else (void) SetImageType(image,TrueColorMatteType); } else { if (ping_color_type == PNG_COLOR_TYPE_GRAY) (void) SetImageType(image,GrayscaleType); else if (ping_color_type == PNG_COLOR_TYPE_PALETTE) (void) SetImageType(image,PaletteType); else (void) SetImageType(image,TrueColorType); } #endif /* Set more properties for identify to retrieve */ { char msg[MaxTextExtent]; if (num_text_total != 0) { /* libpng doesn't tell us whether they were tEXt, zTXt, or iTXt */ (void) FormatLocaleString(msg,MaxTextExtent, "%d tEXt/zTXt/iTXt chunks were found", num_text_total); (void) SetImageProperty(image,"png:text",msg); } if (num_raw_profiles != 0) { (void) FormatLocaleString(msg,MaxTextExtent, "%d were found", num_raw_profiles); (void) SetImageProperty(image,"png:text-encoded profiles",msg); } /* cHRM chunk: */ if (ping_found_cHRM != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Chromaticity, above)"); (void) SetImageProperty(image,"png:cHRM",msg); } /* bKGD chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_bKGD)) { (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found (see Background color, above)"); (void) SetImageProperty(image,"png:bKGD",msg); } (void) FormatLocaleString(msg,MaxTextExtent,"%s", "chunk was found"); /* iCCP chunk: */ if (ping_found_iCCP != MagickFalse) (void) SetImageProperty(image,"png:iCCP",msg); if (png_get_valid(ping,ping_info,PNG_INFO_tRNS)) (void) SetImageProperty(image,"png:tRNS",msg); #if defined(PNG_sRGB_SUPPORTED) /* sRGB chunk: */ if (ping_found_sRGB != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "intent=%d (%s)", (int) intent, Magick_RenderingIntentString_from_PNG_RenderingIntent(intent)); (void) SetImageProperty(image,"png:sRGB",msg); } #endif /* gAMA chunk: */ if (ping_found_gAMA != MagickFalse) { (void) FormatLocaleString(msg,MaxTextExtent, "gamma=%.8g (See Gamma, above)", file_gamma); (void) SetImageProperty(image,"png:gAMA",msg); } #if defined(PNG_pHYs_SUPPORTED) /* pHYs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_pHYs)) { (void) FormatLocaleString(msg,MaxTextExtent, "x_res=%.10g, y_res=%.10g, units=%d", (double) x_resolution,(double) y_resolution, unit_type); (void) SetImageProperty(image,"png:pHYs",msg); } #endif #if defined(PNG_oFFs_SUPPORTED) /* oFFs chunk: */ if (png_get_valid(ping,ping_info,PNG_INFO_oFFs)) { (void) FormatLocaleString(msg,MaxTextExtent,"x_off=%.20g, y_off=%.20g", (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:oFFs",msg); } #endif #if defined(PNG_tIME_SUPPORTED) read_tIME_chunk(image,ping,end_info); #endif /* caNv chunk: */ if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || (image->page.x != 0 || image->page.y != 0)) { (void) FormatLocaleString(msg,MaxTextExtent, "width=%.20g, height=%.20g, x_offset=%.20g, y_offset=%.20g", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); (void) SetImageProperty(image,"png:caNv",msg); } /* vpAg chunk: */ if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows)) { (void) FormatLocaleString(msg,MaxTextExtent, "width=%.20g, height=%.20g", (double) image->page.width,(double) image->page.height); (void) SetImageProperty(image,"png:vpAg",msg); } } /* Relinquish resources. */ png_destroy_read_struct(&ping,&ping_info,&end_info); pixel_info=RelinquishVirtualMemory(pixel_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block, revert to * Throwing an Exception when an error occurs. */ return(image); /* end of reading one PNG image */ } static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; ssize_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadPNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) ThrowReaderException(FileOpenError,"UnableToOpenFile"); /* Verify PNG signature. */ count=ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\211PNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOnePNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if ((image->columns == 0) || (image->rows == 0)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadPNGImage() with error."); ThrowReaderException(CorruptImageError,"CorruptImage"); } if ((IssRGBColorspace(image->colorspace) != MagickFalse) && ((image->gamma < .45) || (image->gamma > .46)) && !(image->chromaticity.red_primary.x>0.6399f && image->chromaticity.red_primary.x<0.6401f && image->chromaticity.red_primary.y>0.3299f && image->chromaticity.red_primary.y<0.3301f && image->chromaticity.green_primary.x>0.2999f && image->chromaticity.green_primary.x<0.3001f && image->chromaticity.green_primary.y>0.5999f && image->chromaticity.green_primary.y<0.6001f && image->chromaticity.blue_primary.x>0.1499f && image->chromaticity.blue_primary.x<0.1501f && image->chromaticity.blue_primary.y>0.0599f && image->chromaticity.blue_primary.y<0.0601f && image->chromaticity.white_point.x>0.3126f && image->chromaticity.white_point.x<0.3128f && image->chromaticity.white_point.y>0.3289f && image->chromaticity.white_point.y<0.3291f)) SetImageColorspace(image,RGBColorspace); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " page.w: %.20g, page.h: %.20g,page.x: %.20g, page.y: %.20g.", (double) image->page.width,(double) image->page.height, (double) image->page.x,(double) image->page.y); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadPNGImage()"); return(image); } #if defined(JNG_SUPPORTED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d O n e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadOneJNGImage() reads a JPEG Network Graphics (JNG) image file % (minus the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadOneJNGImage method is: % % Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o mng_info: Specifies a pointer to a MngInfo structure. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { Image *alpha_image, *color_image, *image, *jng_image; ImageInfo *alpha_image_info, *color_image_info; MagickBooleanType logging; int unique_filenames; ssize_t y; MagickBooleanType status; png_uint_32 jng_height, jng_width; png_byte jng_color_type, jng_image_sample_depth, jng_image_compression_method, jng_image_interlace_method, jng_alpha_sample_depth, jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method; register const PixelPacket *s; register ssize_t i, x; register PixelPacket *q; register unsigned char *p; unsigned int read_JSEP, reading_idat; size_t length; jng_alpha_compression_method=0; jng_alpha_sample_depth=8; jng_color_type=0; jng_height=0; jng_width=0; alpha_image=(Image *) NULL; color_image=(Image *) NULL; alpha_image_info=(ImageInfo *) NULL; color_image_info=(ImageInfo *) NULL; unique_filenames=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneJNGImage()"); image=mng_info->image; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireNextImage()"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; /* Signature bytes have already been read. */ read_JSEP=MagickFalse; reading_idat=MagickFalse; for (;;) { char type[MaxTextExtent]; unsigned char *chunk; unsigned int count; /* Read a new JNG chunk. */ status=SetImageProgress(image,LoadImagesTag,TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) break; type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=ReadBlobMSBLong(image); count=(unsigned int) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading JNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX || count == 0) ThrowReaderException(CorruptImageError,"CorruptImage"); p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) chunk[i]=(unsigned char) ReadBlobByte(image); p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ if (memcmp(type,mng_JHDR,4) == 0) { if (length == 16) { jng_width=(size_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); jng_height=(size_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); if ((jng_width == 0) || (jng_height == 0)) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); jng_color_type=p[8]; jng_image_sample_depth=p[9]; jng_image_compression_method=p[10]; jng_image_interlace_method=p[11]; image->interlace=jng_image_interlace_method != 0 ? PNGInterlace : NoInterlace; jng_alpha_sample_depth=p[12]; jng_alpha_compression_method=p[13]; jng_alpha_filter_method=p[14]; jng_alpha_interlace_method=p[15]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_width: %16lu, jng_height: %16lu\n" " jng_color_type: %16d, jng_image_sample_depth: %3d\n" " jng_image_compression_method:%3d", (unsigned long) jng_width, (unsigned long) jng_height, jng_color_type, jng_image_sample_depth, jng_image_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_image_interlace_method: %3d" " jng_alpha_sample_depth: %3d", jng_image_interlace_method, jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " jng_alpha_compression_method:%3d\n" " jng_alpha_filter_method: %3d\n" " jng_alpha_interlace_method: %3d", jng_alpha_compression_method, jng_alpha_filter_method, jng_alpha_interlace_method); } } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((reading_idat == MagickFalse) && (read_JSEP == MagickFalse) && ((memcmp(type,mng_JDAT,4) == 0) || (memcmp(type,mng_JdAA,4) == 0) || (memcmp(type,mng_IDAT,4) == 0) || (memcmp(type,mng_JDAA,4) == 0))) { /* o create color_image o open color_blob, attached to color_image o if (color type has alpha) open alpha_blob, attached to alpha_image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating color_blob."); color_image_info=(ImageInfo *)AcquireMagickMemory(sizeof(ImageInfo)); if (color_image_info == (ImageInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); GetImageInfo(color_image_info); color_image=AcquireImage(color_image_info); if (color_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) AcquireUniqueFilename(color_image->filename); unique_filenames++; status=OpenBlob(color_image_info,color_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { color_image=DestroyImage(color_image); return(DestroyImageList(image)); } if ((image_info->ping == MagickFalse) && (jng_color_type >= 12)) { alpha_image_info=(ImageInfo *) AcquireMagickMemory(sizeof(ImageInfo)); if (alpha_image_info == (ImageInfo *) NULL) { color_image=DestroyImage(color_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } GetImageInfo(alpha_image_info); alpha_image=AcquireImage(alpha_image_info); if (alpha_image == (Image *) NULL) { alpha_image_info=DestroyImageInfo(alpha_image_info); color_image=DestroyImage(color_image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating alpha_blob."); (void) AcquireUniqueFilename(alpha_image->filename); unique_filenames++; status=OpenBlob(alpha_image_info,alpha_image,WriteBinaryBlobMode, exception); if (status == MagickFalse) { alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); color_image=DestroyImage(color_image); return(DestroyImageList(image)); } if (jng_alpha_compression_method == 0) { unsigned char data[18]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing IHDR chunk to alpha_blob."); (void) WriteBlob(alpha_image,8,(const unsigned char *) "\211PNG\r\n\032\n"); (void) WriteBlobMSBULong(alpha_image,13L); PNGType(data,mng_IHDR); LogPNGChunk(logging,mng_IHDR,13L); PNGLong(data+4,jng_width); PNGLong(data+8,jng_height); data[12]=jng_alpha_sample_depth; data[13]=0; /* color_type gray */ data[14]=0; /* compression method 0 */ data[15]=0; /* filter_method 0 */ data[16]=0; /* interlace_method 0 */ (void) WriteBlob(alpha_image,17,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,17)); } } reading_idat=MagickTrue; } if (memcmp(type,mng_JDAT,4) == 0) { /* Copy chunk to color_image->blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAT chunk data to color_blob."); (void) WriteBlob(color_image,length,chunk); if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_IDAT,4) == 0) { png_byte data[5]; /* Copy IDAT header and chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying IDAT chunk data to alpha_blob."); (void) WriteBlobMSBULong(alpha_image,(size_t) length); PNGType(data,mng_IDAT); LogPNGChunk(logging,mng_IDAT,length); (void) WriteBlob(alpha_image,4,data); (void) WriteBlob(alpha_image,length,chunk); (void) WriteBlobMSBULong(alpha_image, crc32(crc32(0,data,4),chunk,(uInt) length)); } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_JDAA,4) == 0) || (memcmp(type,mng_JdAA,4) == 0)) { /* Copy chunk data to alpha_image->blob */ if (alpha_image != NULL && image_info->ping == MagickFalse) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying JDAA chunk data to alpha_blob."); (void) WriteBlob(alpha_image,length,chunk); } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_JSEP,4) == 0) { read_JSEP=MagickTrue; if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { if (length == 2) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=image->background_color.red; image->background_color.blue=image->background_color.red; } if (length == 6) { image->background_color.red=ScaleCharToQuantum(p[1]); image->background_color.green=ScaleCharToQuantum(p[3]); image->background_color.blue=ScaleCharToQuantum(p[5]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) image->gamma=((float) mng_get_long(p))*0.00001; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { if (length == 32) { image->chromaticity.white_point.x=0.00001*mng_get_long(p); image->chromaticity.white_point.y=0.00001*mng_get_long(&p[4]); image->chromaticity.red_primary.x=0.00001*mng_get_long(&p[8]); image->chromaticity.red_primary.y=0.00001*mng_get_long(&p[12]); image->chromaticity.green_primary.x=0.00001*mng_get_long(&p[16]); image->chromaticity.green_primary.y=0.00001*mng_get_long(&p[20]); image->chromaticity.blue_primary.x=0.00001*mng_get_long(&p[24]); image->chromaticity.blue_primary.y=0.00001*mng_get_long(&p[28]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { if (length == 1) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_oFFs,4) == 0) { if (length > 8) { image->page.x=(ssize_t) mng_get_long(p); image->page.y=(ssize_t) mng_get_long(&p[4]); if ((int) p[8] != 0) { image->page.x/=10000; image->page.y/=10000; } } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { image->x_resolution=(double) mng_get_long(p); image->y_resolution=(double) mng_get_long(&p[4]); if ((int) p[8] == PNG_RESOLUTION_METER) { image->units=PixelsPerCentimeterResolution; image->x_resolution=image->x_resolution/100.0f; image->y_resolution=image->y_resolution/100.0f; } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if 0 if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #endif if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (memcmp(type,mng_IEND,4)) continue; break; } /* IEND found */ /* Finish up reading image data: o read main image from color_blob. o close color_blob. o if (color_type has alpha) if alpha_encoding is PNG read secondary image from alpha_blob via ReadPNG if alpha_encoding is JPEG read secondary image from alpha_blob via ReadJPEG o close alpha_blob. o copy intensity of secondary image into opacity samples of main image. o destroy the secondary image. */ if (color_image_info == (ImageInfo *) NULL) { assert(color_image == (Image *) NULL); assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } if (color_image == (Image *) NULL) { assert(alpha_image == (Image *) NULL); return(DestroyImageList(image)); } (void) SeekBlob(color_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading jng_image from color_blob."); assert(color_image_info != (ImageInfo *) NULL); (void) FormatLocaleString(color_image_info->filename,MaxTextExtent,"%s", color_image->filename); color_image_info->ping=MagickFalse; /* To do: avoid this */ jng_image=ReadImage(color_image_info,exception); (void) RelinquishUniqueFileResource(color_image->filename); unique_filenames--; color_image=DestroyImage(color_image); color_image_info=DestroyImageInfo(color_image_info); if (jng_image == (Image *) NULL) return(DestroyImageList(image)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Copying jng_image pixels to main image."); image->columns=jng_width; image->rows=jng_height; length=image->columns*sizeof(PixelPacket); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1,&image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); (void) CopyMagickMemory(q,s,length); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } jng_image=DestroyImage(jng_image); if (image_info->ping == MagickFalse) { if (jng_color_type >= 12) { if (jng_alpha_compression_method == 0) { png_byte data[5]; (void) WriteBlobMSBULong(alpha_image,0x00000000L); PNGType(data,mng_IEND); LogPNGChunk(logging,mng_IEND,0L); (void) WriteBlob(alpha_image,4,data); (void) WriteBlobMSBULong(alpha_image,crc32(0,data,4)); } (void) SeekBlob(alpha_image,0,SEEK_SET); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading opacity from alpha_blob."); (void) FormatLocaleString(alpha_image_info->filename,MaxTextExtent, "%s",alpha_image->filename); jng_image=ReadImage(alpha_image_info,exception); if (jng_image != (Image *) NULL) for (y=0; y < (ssize_t) image->rows; y++) { s=GetVirtualPixels(jng_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (image->matte != MagickFalse) for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) SetPixelOpacity(q,QuantumRange- GetPixelRed(s)); else for (x=(ssize_t) image->columns; x != 0; x--,q++,s++) { SetPixelAlpha(q,GetPixelRed(s)); if (GetPixelOpacity(q) != OpaqueOpacity) image->matte=MagickTrue; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } (void) RelinquishUniqueFileResource(alpha_image->filename); unique_filenames--; alpha_image=DestroyImage(alpha_image); alpha_image_info=DestroyImageInfo(alpha_image_info); if (jng_image != (Image *) NULL) jng_image=DestroyImage(jng_image); } } /* Read the JNG image. */ if (mng_info->mng_type == 0) { mng_info->mng_width=jng_width; mng_info->mng_height=jng_height; } if (image->page.width == 0 && image->page.height == 0) { image->page.width=jng_width; image->page.height=jng_height; } if (image->page.x == 0 && image->page.y == 0) { image->page.x=mng_info->x_off[mng_info->object_id]; image->page.y=mng_info->y_off[mng_info->object_id]; } else { image->page.y=mng_info->y_off[mng_info->object_id]; } mng_info->image_found++; status=SetImageProgress(image,LoadImagesTag,2*TellBlob(image), 2*GetBlobSize(image)); if (status == MagickFalse) return(DestroyImageList(image)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage(); unique_filenames=%d",unique_filenames); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJNGImage() reads a JPEG Network Graphics (JNG) image file % (including the 8-byte signature) and returns it. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the ReadJNGImage method is: % % Image *ReadJNGImage(const ImageInfo *image_info, ExceptionInfo % *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadJNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; char magic_number[MaxTextExtent]; size_t count; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadJNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); if (LocaleCompare(image_info->magick,"JNG") != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Verify JNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (count < 8 || memcmp(magic_number,"\213JNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(*mng_info)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneJNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (image->columns == 0 || image->rows == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); ThrowReaderException(CorruptImageError,"CorruptImage"); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadJNGImage()"); return(image); } #endif static Image *ReadOneMNGImage(MngInfo* mng_info, const ImageInfo *image_info, ExceptionInfo *exception) { char page_geometry[MaxTextExtent]; Image *image; MagickBooleanType logging; volatile int first_mng_object, object_id, term_chunk_found, skip_to_iend; volatile ssize_t image_count=0; MagickBooleanType status; MagickOffsetType offset; MngBox default_fb, fb, previous_fb; #if defined(MNG_INSERT_LAYERS) PixelPacket mng_background_color; #endif register unsigned char *p; register ssize_t i; size_t count; ssize_t loop_level; volatile short skipping_loop; #if defined(MNG_INSERT_LAYERS) unsigned int mandatory_back=0; #endif volatile unsigned int #ifdef MNG_OBJECT_BUFFERS mng_background_object=0, #endif mng_type=0; /* 0: PNG or JNG; 1: MNG; 2: MNG-LC; 3: MNG-VLC */ size_t default_frame_timeout, frame_timeout, #if defined(MNG_INSERT_LAYERS) image_height, image_width, #endif length; /* These delays are all measured in image ticks_per_second, * not in MNG ticks_per_second */ volatile size_t default_frame_delay, final_delay, final_image_delay, frame_delay, #if defined(MNG_INSERT_LAYERS) insert_layers, #endif mng_iterations=1, simplicity=0, subframe_height=0, subframe_width=0; previous_fb.top=0; previous_fb.bottom=0; previous_fb.left=0; previous_fb.right=0; default_fb.top=0; default_fb.bottom=0; default_fb.left=0; default_fb.right=0; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter ReadOneMNGImage()"); image=mng_info->image; if (LocaleCompare(image_info->magick,"MNG") == 0) { char magic_number[MaxTextExtent]; /* Verify MNG signature. */ count=(size_t) ReadBlob(image,8,(unsigned char *) magic_number); if (memcmp(magic_number,"\212MNG\r\n\032\n",8) != 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize some nonzero members of the MngInfo structure. */ for (i=0; i < MNG_MAX_OBJECTS; i++) { mng_info->object_clip[i].right=(ssize_t) PNG_UINT_31_MAX; mng_info->object_clip[i].bottom=(ssize_t) PNG_UINT_31_MAX; } mng_info->exists[0]=MagickTrue; } skipping_loop=(-1); first_mng_object=MagickTrue; mng_type=0; #if defined(MNG_INSERT_LAYERS) insert_layers=MagickFalse; /* should be False when converting or mogrifying */ #endif default_frame_delay=0; default_frame_timeout=0; frame_delay=0; final_delay=1; mng_info->ticks_per_second=1UL*image->ticks_per_second; object_id=0; skip_to_iend=MagickFalse; term_chunk_found=MagickFalse; mng_info->framing_mode=1; #if defined(MNG_INSERT_LAYERS) mandatory_back=MagickFalse; #endif #if defined(MNG_INSERT_LAYERS) mng_background_color=image->background_color; #endif default_fb=mng_info->frame; previous_fb=mng_info->frame; do { char type[MaxTextExtent]; if (LocaleCompare(image_info->magick,"MNG") == 0) { unsigned char *chunk; /* Read a new chunk. */ type[0]='\0'; (void) ConcatenateMagickString(type,"errr",MaxTextExtent); length=ReadBlobMSBLong(image); count=(size_t) ReadBlob(image,4,(unsigned char *) type); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reading MNG chunk type %c%c%c%c, length: %.20g", type[0],type[1],type[2],type[3],(double) length); if (length > PNG_UINT_31_MAX) { status=MagickFalse; break; } if (count == 0) ThrowReaderException(CorruptImageError,"CorruptImage"); p=NULL; chunk=(unsigned char *) NULL; if (length != 0) { chunk=(unsigned char *) AcquireQuantumMemory(length,sizeof(*chunk)); if (chunk == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) length; i++) chunk[i]=(unsigned char) ReadBlobByte(image); p=chunk; } (void) ReadBlobMSBLong(image); /* read crc word */ #if !defined(JNG_SUPPORTED) if (memcmp(type,mng_JHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->jhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"JNGCompressNotSupported","`%s'",image->filename); mng_info->jhdr_warning++; } #endif if (memcmp(type,mng_DHDR,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->dhdr_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DeltaPNGNotSupported","`%s'",image->filename); mng_info->dhdr_warning++; } if (memcmp(type,mng_MEND,4) == 0) break; if (skip_to_iend) { if (memcmp(type,mng_IEND,4) == 0) skip_to_iend=MagickFalse; if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skip to IEND."); continue; } if (memcmp(type,mng_MHDR,4) == 0) { if (length != 28) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"CorruptImage"); } mng_info->mng_width=(size_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); mng_info->mng_height=(size_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG width: %.20g",(double) mng_info->mng_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " MNG height: %.20g",(double) mng_info->mng_height); } p+=8; mng_info->ticks_per_second=(size_t) mng_get_long(p); if (mng_info->ticks_per_second == 0) default_frame_delay=0; else default_frame_delay=1UL*image->ticks_per_second/ mng_info->ticks_per_second; frame_delay=default_frame_delay; simplicity=0; /* Skip nominal layer count, frame count, and play time */ p+=16; simplicity=(size_t) mng_get_long(p); mng_type=1; /* Full MNG */ if ((simplicity != 0) && ((simplicity | 11) == 11)) mng_type=2; /* LC */ if ((simplicity != 0) && ((simplicity | 9) == 9)) mng_type=3; /* VLC */ #if defined(MNG_INSERT_LAYERS) if (mng_type != 3) insert_layers=MagickTrue; #endif if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); mng_info->image=image; } if ((mng_info->mng_width > 65535L) || (mng_info->mng_height > 65535L)) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit"); } (void) FormatLocaleString(page_geometry,MaxTextExtent, "%.20gx%.20g+0+0",(double) mng_info->mng_width,(double) mng_info->mng_height); mng_info->frame.left=0; mng_info->frame.right=(ssize_t) mng_info->mng_width; mng_info->frame.top=0; mng_info->frame.bottom=(ssize_t) mng_info->mng_height; mng_info->clip=default_fb=previous_fb=mng_info->frame; for (i=0; i < MNG_MAX_OBJECTS; i++) mng_info->object_clip[i]=mng_info->frame; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_TERM,4) == 0) { int repeat=0; if (length != 0) repeat=p[0]; if (repeat == 3 && length > 8) { final_delay=(png_uint_32) mng_get_long(&p[2]); mng_iterations=(png_uint_32) mng_get_long(&p[6]); if (mng_iterations == PNG_UINT_31_MAX) mng_iterations=0; image->iterations=mng_iterations; term_chunk_found=MagickTrue; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " repeat=%d, final_delay=%.20g, iterations=%.20g", repeat,(double) final_delay, (double) image->iterations); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_DEFI,4) == 0) { if (mng_type == 3) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"DEFI chunk found in MNG-VLC datastream","`%s'", image->filename); if (length > 1) { object_id=(p[0] << 8) | p[1]; if (mng_type == 2 && object_id != 0) (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError,"Nonzero object_id in MNG-LC datastream", "`%s'", image->filename); if (object_id > MNG_MAX_OBJECTS) { /* Instead of using a warning we should allocate a larger MngInfo structure and continue. */ (void) ThrowMagickException(&image->exception, GetMagickModule(), CoderError, "object id too large","`%s'",image->filename); object_id=MNG_MAX_OBJECTS; } if (mng_info->exists[object_id]) if (mng_info->frozen[object_id]) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "DEFI cannot redefine a frozen MNG object","`%s'", image->filename); continue; } mng_info->exists[object_id]=MagickTrue; if (length > 2) mng_info->invisible[object_id]=p[2]; /* Extract object offset info. */ if (length > 11) { mng_info->x_off[object_id]=(ssize_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); mng_info->y_off[object_id]=(ssize_t) ((p[8] << 24) | (p[9] << 16) | (p[10] << 8) | p[11]); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_off[%d]: %.20g, y_off[%d]: %.20g", object_id,(double) mng_info->x_off[object_id], object_id,(double) mng_info->y_off[object_id]); } } /* Extract object clipping info. */ if (length > 27) mng_info->object_clip[object_id]= mng_read_box(mng_info->frame,0, &p[12]); } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_bKGD,4) == 0) { mng_info->have_global_bkgd=MagickFalse; if (length > 5) { mng_info->mng_global_bkgd.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_info->mng_global_bkgd.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_info->mng_global_bkgd.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_info->have_global_bkgd=MagickTrue; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_BACK,4) == 0) { #if defined(MNG_INSERT_LAYERS) if (length > 6) mandatory_back=p[6]; else mandatory_back=0; if (mandatory_back && length > 5) { mng_background_color.red= ScaleShortToQuantum((unsigned short) ((p[0] << 8) | p[1])); mng_background_color.green= ScaleShortToQuantum((unsigned short) ((p[2] << 8) | p[3])); mng_background_color.blue= ScaleShortToQuantum((unsigned short) ((p[4] << 8) | p[5])); mng_background_color.opacity=OpaqueOpacity; } #ifdef MNG_OBJECT_BUFFERS if (length > 8) mng_background_object=(p[7] << 8) | p[8]; #endif #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_PLTE,4) == 0) { /* Read global PLTE. */ if (length && (length < 769)) { if (mng_info->global_plte == (png_colorp) NULL) mng_info->global_plte=(png_colorp) AcquireQuantumMemory(256, sizeof(*mng_info->global_plte)); for (i=0; i < (ssize_t) (length/3); i++) { mng_info->global_plte[i].red=p[3*i]; mng_info->global_plte[i].green=p[3*i+1]; mng_info->global_plte[i].blue=p[3*i+2]; } mng_info->global_plte_length=(unsigned int) (length/3); } #ifdef MNG_LOOSE for ( ; i < 256; i++) { mng_info->global_plte[i].red=i; mng_info->global_plte[i].green=i; mng_info->global_plte[i].blue=i; } if (length != 0) mng_info->global_plte_length=256; #endif else mng_info->global_plte_length=0; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_tRNS,4) == 0) { /* read global tRNS */ if (length > 0 && length < 257) for (i=0; i < (ssize_t) length; i++) mng_info->global_trns[i]=p[i]; #ifdef MNG_LOOSE for ( ; i < 256; i++) mng_info->global_trns[i]=255; #endif mng_info->global_trns_length=(unsigned int) length; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_gAMA,4) == 0) { if (length == 4) { ssize_t igamma; igamma=mng_get_long(p); mng_info->global_gamma=((float) igamma)*0.00001; mng_info->have_global_gama=MagickTrue; } else mng_info->have_global_gama=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_cHRM,4) == 0) { /* Read global cHRM */ if (length == 32) { mng_info->global_chrm.white_point.x=0.00001*mng_get_long(p); mng_info->global_chrm.white_point.y=0.00001*mng_get_long(&p[4]); mng_info->global_chrm.red_primary.x=0.00001*mng_get_long(&p[8]); mng_info->global_chrm.red_primary.y=0.00001* mng_get_long(&p[12]); mng_info->global_chrm.green_primary.x=0.00001* mng_get_long(&p[16]); mng_info->global_chrm.green_primary.y=0.00001* mng_get_long(&p[20]); mng_info->global_chrm.blue_primary.x=0.00001* mng_get_long(&p[24]); mng_info->global_chrm.blue_primary.y=0.00001* mng_get_long(&p[28]); mng_info->have_global_chrm=MagickTrue; } else mng_info->have_global_chrm=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_sRGB,4) == 0) { /* Read global sRGB. */ if (length != 0) { mng_info->global_srgb_intent= Magick_RenderingIntent_from_PNG_RenderingIntent(p[0]); mng_info->have_global_srgb=MagickTrue; } else mng_info->have_global_srgb=MagickFalse; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_iCCP,4) == 0) { /* To do: */ /* Read global iCCP. */ if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_FRAM,4) == 0) { if (mng_type == 3) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"FRAM chunk found in MNG-VLC datastream","`%s'", image->filename); if ((mng_info->framing_mode == 2) || (mng_info->framing_mode == 4)) image->delay=frame_delay; frame_delay=default_frame_delay; frame_timeout=default_frame_timeout; fb=default_fb; if (length > 0) if (p[0]) mng_info->framing_mode=p[0]; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_mode=%d",mng_info->framing_mode); if (length > 6) { /* Note the delay and frame clipping boundaries. */ p++; /* framing mode */ while (*p && ((p-chunk) < (ssize_t) length)) p++; /* frame name */ p++; /* frame name terminator */ if ((p-chunk) < (ssize_t) (length-4)) { int change_delay, change_timeout, change_clipping; change_delay=(*p++); change_timeout=(*p++); change_clipping=(*p++); p++; /* change_sync */ if (change_delay && (p-chunk) < (ssize_t) (length-4)) { frame_delay=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_delay/=mng_info->ticks_per_second; else frame_delay=PNG_UINT_31_MAX; if (change_delay == 2) default_frame_delay=frame_delay; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_delay=%.20g",(double) frame_delay); } if (change_timeout && (p-chunk) < (ssize_t) (length-4)) { frame_timeout=1UL*image->ticks_per_second* mng_get_long(p); if (mng_info->ticks_per_second != 0) frame_timeout/=mng_info->ticks_per_second; else frame_timeout=PNG_UINT_31_MAX; if (change_timeout == 2) default_frame_timeout=frame_timeout; p+=4; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Framing_timeout=%.20g",(double) frame_timeout); } if (change_clipping && (p-chunk) < (ssize_t) (length-17)) { fb=mng_read_box(previous_fb,(char) p[0],&p[1]); p+=17; previous_fb=fb; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Frame_clip: L=%.20g R=%.20g T=%.20g B=%.20g", (double) fb.left,(double) fb.right,(double) fb.top, (double) fb.bottom); if (change_clipping == 2) default_fb=fb; } } } mng_info->clip=fb; mng_info->clip=mng_minimum_box(fb,mng_info->frame); subframe_width=(size_t) (mng_info->clip.right -mng_info->clip.left); subframe_height=(size_t) (mng_info->clip.bottom -mng_info->clip.top); /* Insert a background layer behind the frame if framing_mode is 4. */ #if defined(MNG_INSERT_LAYERS) if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " subframe_width=%.20g, subframe_height=%.20g",(double) subframe_width,(double) subframe_height); if (insert_layers && (mng_info->framing_mode == 4) && (subframe_width) && (subframe_height)) { /* Allocate next image structure. */ if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; image->delay=0; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert backgd layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLIP,4) == 0) { unsigned int first_object, last_object; /* Read CLIP. */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(int) first_object; i <= (int) last_object; i++) { if (mng_info->exists[i] && !mng_info->frozen[i]) { MngBox box; box=mng_info->object_clip[i]; if ((p-chunk) < (ssize_t) (length-17)) mng_info->object_clip[i]= mng_read_box(box,(char) p[0],&p[1]); } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_SAVE,4) == 0) { for (i=1; i < MNG_MAX_OBJECTS; i++) if (mng_info->exists[i]) { mng_info->frozen[i]=MagickTrue; #ifdef MNG_OBJECT_BUFFERS if (mng_info->ob[i] != (MngBuffer *) NULL) mng_info->ob[i]->frozen=MagickTrue; #endif } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if ((memcmp(type,mng_DISC,4) == 0) || (memcmp(type,mng_SEEK,4) == 0)) { /* Read DISC or SEEK. */ if ((length == 0) || !memcmp(type,mng_SEEK,4)) { for (i=1; i < MNG_MAX_OBJECTS; i++) MngInfoDiscardObject(mng_info,i); } else { register ssize_t j; for (j=1; j < (ssize_t) length; j+=2) { i=p[j-1] << 8 | p[j]; MngInfoDiscardObject(mng_info,i); } } if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_MOVE,4) == 0) { size_t first_object, last_object; /* read MOVE */ if (length > 3) { first_object=(p[0] << 8) | p[1]; last_object=(p[2] << 8) | p[3]; p+=4; for (i=(ssize_t) first_object; i <= (ssize_t) last_object; i++) { if (mng_info->exists[i] && !mng_info->frozen[i] && (p-chunk) < (ssize_t) (length-8)) { MngPair new_pair; MngPair old_pair; old_pair.a=mng_info->x_off[i]; old_pair.b=mng_info->y_off[i]; new_pair=mng_read_pair(old_pair,(int) p[0],&p[1]); mng_info->x_off[i]=new_pair.a; mng_info->y_off[i]=new_pair.b; } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_LOOP,4) == 0) { ssize_t loop_iters=1; if (length > 4) { loop_level=chunk[0]; mng_info->loop_active[loop_level]=1; /* mark loop active */ /* Record starting point. */ loop_iters=mng_get_long(&chunk[1]); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " LOOP level %.20g has %.20g iterations ", (double) loop_level, (double) loop_iters); if (loop_iters == 0) skipping_loop=loop_level; else { mng_info->loop_jump[loop_level]=TellBlob(image); mng_info->loop_count[loop_level]=loop_iters; } mng_info->loop_iteration[loop_level]=0; } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_ENDL,4) == 0) { if (length > 0) { loop_level=chunk[0]; if (skipping_loop > 0) { if (skipping_loop == loop_level) { /* Found end of zero-iteration loop. */ skipping_loop=(-1); mng_info->loop_active[loop_level]=0; } } else { if (mng_info->loop_active[loop_level] == 1) { mng_info->loop_count[loop_level]--; mng_info->loop_iteration[loop_level]++; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ENDL: LOOP level %.20g has %.20g remaining iters ", (double) loop_level,(double) mng_info->loop_count[loop_level]); if (mng_info->loop_count[loop_level] != 0) { offset=SeekBlob(image, mng_info->loop_jump[loop_level], SEEK_SET); if (offset < 0) { chunk=(unsigned char *) RelinquishMagickMemory( chunk); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } } else { short last_level; /* Finished loop. */ mng_info->loop_active[loop_level]=0; last_level=(-1); for (i=0; i < loop_level; i++) if (mng_info->loop_active[i] == 1) last_level=(short) i; loop_level=last_level; } } } } chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_CLON,4) == 0) { if (mng_info->clon_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"CLON is not implemented yet","`%s'", image->filename); mng_info->clon_warning++; } if (memcmp(type,mng_MAGN,4) == 0) { png_uint_16 magn_first, magn_last, magn_mb, magn_ml, magn_mr, magn_mt, magn_mx, magn_my, magn_methx, magn_methy; if (length > 1) magn_first=(p[0] << 8) | p[1]; else magn_first=0; if (length > 3) magn_last=(p[2] << 8) | p[3]; else magn_last=magn_first; #ifndef MNG_OBJECT_BUFFERS if (magn_first || magn_last) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "MAGN is not implemented yet for nonzero objects", "`%s'",image->filename); mng_info->magn_warning++; } #endif if (length > 4) magn_methx=p[4]; else magn_methx=0; if (length > 6) magn_mx=(p[5] << 8) | p[6]; else magn_mx=1; if (magn_mx == 0) magn_mx=1; if (length > 8) magn_my=(p[7] << 8) | p[8]; else magn_my=magn_mx; if (magn_my == 0) magn_my=1; if (length > 10) magn_ml=(p[9] << 8) | p[10]; else magn_ml=magn_mx; if (magn_ml == 0) magn_ml=1; if (length > 12) magn_mr=(p[11] << 8) | p[12]; else magn_mr=magn_mx; if (magn_mr == 0) magn_mr=1; if (length > 14) magn_mt=(p[13] << 8) | p[14]; else magn_mt=magn_my; if (magn_mt == 0) magn_mt=1; if (length > 16) magn_mb=(p[15] << 8) | p[16]; else magn_mb=magn_my; if (magn_mb == 0) magn_mb=1; if (length > 17) magn_methy=p[17]; else magn_methy=magn_methx; if (magn_methx > 5 || magn_methy > 5) if (mng_info->magn_warning == 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Unknown MAGN method in MNG datastream","`%s'", image->filename); mng_info->magn_warning++; } #ifdef MNG_OBJECT_BUFFERS /* Magnify existing objects in the range magn_first to magn_last */ #endif if (magn_first == 0 || magn_last == 0) { /* Save the magnification factors for object 0 */ mng_info->magn_mb=magn_mb; mng_info->magn_ml=magn_ml; mng_info->magn_mr=magn_mr; mng_info->magn_mt=magn_mt; mng_info->magn_mx=magn_mx; mng_info->magn_my=magn_my; mng_info->magn_methx=magn_methx; mng_info->magn_methy=magn_methy; } } if (memcmp(type,mng_PAST,4) == 0) { if (mng_info->past_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"PAST is not implemented yet","`%s'", image->filename); mng_info->past_warning++; } if (memcmp(type,mng_SHOW,4) == 0) { if (mng_info->show_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"SHOW is not implemented yet","`%s'", image->filename); mng_info->show_warning++; } if (memcmp(type,mng_sBIT,4) == 0) { if (length < 4) mng_info->have_global_sbit=MagickFalse; else { mng_info->global_sbit.gray=p[0]; mng_info->global_sbit.red=p[0]; mng_info->global_sbit.green=p[1]; mng_info->global_sbit.blue=p[2]; mng_info->global_sbit.alpha=p[3]; mng_info->have_global_sbit=MagickTrue; } } if (memcmp(type,mng_pHYs,4) == 0) { if (length > 8) { mng_info->global_x_pixels_per_unit= (size_t) mng_get_long(p); mng_info->global_y_pixels_per_unit= (size_t) mng_get_long(&p[4]); mng_info->global_phys_unit_type=p[8]; mng_info->have_global_phys=MagickTrue; } else mng_info->have_global_phys=MagickFalse; } if (memcmp(type,mng_pHYg,4) == 0) { if (mng_info->phyg_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"pHYg is not implemented.","`%s'",image->filename); mng_info->phyg_warning++; } if (memcmp(type,mng_BASI,4) == 0) { skip_to_iend=MagickTrue; if (mng_info->basi_warning == 0) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"BASI is not implemented yet","`%s'", image->filename); mng_info->basi_warning++; #ifdef MNG_BASI_SUPPORTED if (length > 11) { basi_width=(size_t) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); basi_height=(size_t) ((p[4] << 24) | (p[5] << 16) | (p[6] << 8) | p[7]); basi_color_type=p[8]; basi_compression_method=p[9]; basi_filter_type=p[10]; basi_interlace_method=p[11]; } if (length > 13) basi_red=(p[12] << 8) & p[13]; else basi_red=0; if (length > 15) basi_green=(p[14] << 8) & p[15]; else basi_green=0; if (length > 17) basi_blue=(p[16] << 8) & p[17]; else basi_blue=0; if (length > 19) basi_alpha=(p[18] << 8) & p[19]; else { if (basi_sample_depth == 16) basi_alpha=65535L; else basi_alpha=255; } if (length > 20) basi_viewable=p[20]; else basi_viewable=0; #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } if (memcmp(type,mng_IHDR,4) #if defined(JNG_SUPPORTED) && memcmp(type,mng_JHDR,4) #endif ) { /* Not an IHDR or JHDR chunk */ if (length != 0) chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } /* Process IHDR */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing %c%c%c%c chunk",type[0],type[1],type[2],type[3]); mng_info->exists[object_id]=MagickTrue; mng_info->viewable[object_id]=MagickTrue; if (mng_info->invisible[object_id]) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping invisible object"); skip_to_iend=MagickTrue; chunk=(unsigned char *) RelinquishMagickMemory(chunk); continue; } #if defined(MNG_INSERT_LAYERS) if (length < 8) { chunk=(unsigned char *) RelinquishMagickMemory(chunk); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } image_width=(size_t) mng_get_long(p); image_height=(size_t) mng_get_long(&p[4]); #endif chunk=(unsigned char *) RelinquishMagickMemory(chunk); /* Insert a transparent background layer behind the entire animation if it is not full screen. */ #if defined(MNG_INSERT_LAYERS) if (insert_layers && mng_type && first_mng_object) { if ((mng_info->clip.left > 0) || (mng_info->clip.top > 0) || (image_width < mng_info->mng_width) || (mng_info->clip.right < (ssize_t) mng_info->mng_width) || (image_height < mng_info->mng_height) || (mng_info->clip.bottom < (ssize_t) mng_info->mng_height)) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; /* Make a background rectangle. */ image->delay=0; image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Inserted transparent background layer, W=%.20g, H=%.20g", (double) mng_info->mng_width,(double) mng_info->mng_height); } } /* Insert a background layer behind the upcoming image if framing_mode is 3, and we haven't already inserted one. */ if (insert_layers && (mng_info->framing_mode == 3) && (subframe_width) && (subframe_height) && (simplicity == 0 || (simplicity & 0x08))) { if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; if (term_chunk_found) { image->start_loop=MagickTrue; image->iterations=mng_iterations; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; image->delay=0; image->columns=subframe_width; image->rows=subframe_height; image->page.width=subframe_width; image->page.height=subframe_height; image->page.x=mng_info->clip.left; image->page.y=mng_info->clip.top; image->background_color=mng_background_color; image->matte=MagickFalse; (void) SetImageBackgroundColor(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Insert background layer, L=%.20g, R=%.20g T=%.20g, B=%.20g", (double) mng_info->clip.left,(double) mng_info->clip.right, (double) mng_info->clip.top,(double) mng_info->clip.bottom); } #endif /* MNG_INSERT_LAYERS */ first_mng_object=MagickFalse; if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } mng_info->image=image; status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; if (term_chunk_found) { image->start_loop=MagickTrue; term_chunk_found=MagickFalse; } else image->start_loop=MagickFalse; if (mng_info->framing_mode == 1 || mng_info->framing_mode == 3) { image->delay=frame_delay; frame_delay=default_frame_delay; } else image->delay=0; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=mng_info->x_off[object_id]; image->page.y=mng_info->y_off[object_id]; image->iterations=mng_iterations; /* Seek back to the beginning of the IHDR or JHDR chunk's length field. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Seeking back to beginning of %c%c%c%c chunk",type[0],type[1], type[2],type[3]); offset=SeekBlob(image,-((ssize_t) length+12),SEEK_CUR); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } mng_info->image=image; mng_info->mng_type=mng_type; mng_info->object_id=object_id; if (memcmp(type,mng_IHDR,4) == 0) image=ReadOnePNGImage(mng_info,image_info,exception); #if defined(JNG_SUPPORTED) else image=ReadOneJNGImage(mng_info,image_info,exception); #endif if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadJNGImage() with error"); return((Image *) NULL); } if (image->columns == 0 || image->rows == 0) { (void) CloseBlob(image); return(DestroyImageList(image)); } mng_info->image=image; if (mng_type) { MngBox crop_box; if (mng_info->magn_methx || mng_info->magn_methy) { png_uint_32 magnified_height, magnified_width; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Processing MNG MAGN chunk"); if (mng_info->magn_methx == 1) { magnified_width=mng_info->magn_ml; if (image->columns > 1) magnified_width += mng_info->magn_mr; if (image->columns > 2) magnified_width += (png_uint_32) ((image->columns-2)*(mng_info->magn_mx)); } else { magnified_width=(png_uint_32) image->columns; if (image->columns > 1) magnified_width += mng_info->magn_ml-1; if (image->columns > 2) magnified_width += mng_info->magn_mr-1; if (image->columns > 3) magnified_width += (png_uint_32) ((image->columns-3)*(mng_info->magn_mx-1)); } if (mng_info->magn_methy == 1) { magnified_height=mng_info->magn_mt; if (image->rows > 1) magnified_height += mng_info->magn_mb; if (image->rows > 2) magnified_height += (png_uint_32) ((image->rows-2)*(mng_info->magn_my)); } else { magnified_height=(png_uint_32) image->rows; if (image->rows > 1) magnified_height += mng_info->magn_mt-1; if (image->rows > 2) magnified_height += mng_info->magn_mb-1; if (image->rows > 3) magnified_height += (png_uint_32) ((image->rows-3)*(mng_info->magn_my-1)); } if (magnified_height > image->rows || magnified_width > image->columns) { Image *large_image; int yy; ssize_t m, y; register ssize_t x; register PixelPacket *n, *q; PixelPacket *next, *prev; png_uint_16 magn_methx, magn_methy; /* Allocate next image structure. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocate magnified image"); AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); large_image=SyncNextImageInList(image); large_image->columns=magnified_width; large_image->rows=magnified_height; magn_methx=mng_info->magn_methx; magn_methy=mng_info->magn_methy; #if (MAGICKCORE_QUANTUM_DEPTH > 16) #define QM unsigned short if (magn_methx != 1 || magn_methy != 1) { /* Scale pixels to unsigned shorts to prevent overflow of intermediate values of interpolations */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleQuantumToShort( GetPixelRed(q))); SetPixelGreen(q,ScaleQuantumToShort( GetPixelGreen(q))); SetPixelBlue(q,ScaleQuantumToShort( GetPixelBlue(q))); SetPixelOpacity(q,ScaleQuantumToShort( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #else #define QM Quantum #endif if (image->matte != MagickFalse) (void) SetImageBackgroundColor(large_image); else { large_image->background_color.opacity=OpaqueOpacity; (void) SetImageBackgroundColor(large_image); if (magn_methx == 4) magn_methx=2; if (magn_methx == 5) magn_methx=3; if (magn_methy == 4) magn_methy=2; if (magn_methy == 5) magn_methy=3; } /* magnify the rows into the right side of the large image */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the rows to %.20g",(double) large_image->rows); m=(ssize_t) mng_info->magn_mt; yy=0; length=(size_t) image->columns; next=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*next)); prev=(PixelPacket *) AcquireQuantumMemory(length,sizeof(*prev)); if ((prev == (PixelPacket *) NULL) || (next == (PixelPacket *) NULL)) { image=DestroyImageList(image); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } n=GetAuthenticPixels(image,0,0,image->columns,1,exception); (void) CopyMagickMemory(next,n,length); for (y=0; y < (ssize_t) image->rows; y++) { if (y == 0) m=(ssize_t) mng_info->magn_mt; else if (magn_methy > 1 && y == (ssize_t) image->rows-2) m=(ssize_t) mng_info->magn_mb; else if (magn_methy <= 1 && y == (ssize_t) image->rows-1) m=(ssize_t) mng_info->magn_mb; else if (magn_methy > 1 && y == (ssize_t) image->rows-1) m=1; else m=(ssize_t) mng_info->magn_my; n=prev; prev=next; next=n; if (y < (ssize_t) image->rows-1) { n=GetAuthenticPixels(image,0,y+1,image->columns,1, exception); (void) CopyMagickMemory(next,n,length); } for (i=0; i < m; i++, yy++) { register PixelPacket *pixels; assert(yy < (ssize_t) large_image->rows); pixels=prev; n=next; q=GetAuthenticPixels(large_image,0,yy,large_image->columns, 1,exception); q+=(large_image->columns-image->columns); for (x=(ssize_t) image->columns-1; x >= 0; x--) { /* To do: get color as function of indexes[x] */ /* if (image->storage_class == PseudoClass) { } */ if (magn_methy <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methy == 2 || magn_methy == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } else { /* Interpolate */ SetPixelRed(q, ((QM) (((ssize_t) (2*i*(GetPixelRed(n) -GetPixelRed(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelRed(pixels))))); SetPixelGreen(q, ((QM) (((ssize_t) (2*i*(GetPixelGreen(n) -GetPixelGreen(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelGreen(pixels))))); SetPixelBlue(q, ((QM) (((ssize_t) (2*i*(GetPixelBlue(n) -GetPixelBlue(pixels)+m))/ ((ssize_t) (m*2)) +GetPixelBlue(pixels))))); if (image->matte != MagickFalse) SetPixelOpacity(q, ((QM) (((ssize_t) (2*i*(GetPixelOpacity(n) -GetPixelOpacity(pixels)+m)) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))))); } if (magn_methy == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) SetPixelOpacity(q, (*pixels).opacity+0); else SetPixelOpacity(q, (*n).opacity+0); } } else /* if (magn_methy == 3 || magn_methy == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methy == 5) { SetPixelOpacity(q, (QM) (((ssize_t) (2*i* (GetPixelOpacity(n) -GetPixelOpacity(pixels)) +m))/((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } n++; q++; pixels++; } /* x */ if (SyncAuthenticPixels(large_image,exception) == 0) break; } /* i */ } /* y */ prev=(PixelPacket *) RelinquishMagickMemory(prev); next=(PixelPacket *) RelinquishMagickMemory(next); length=image->columns; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Delete original image"); DeleteImageFromList(&image); image=large_image; mng_info->image=image; /* magnify the columns */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Magnify the columns to %.20g",(double) image->columns); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *pixels; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); pixels=q+(image->columns-length); n=pixels+1; for (x=(ssize_t) (image->columns-length); x < (ssize_t) image->columns; x++) { /* To do: Rewrite using Get/Set***PixelComponent() */ if (x == (ssize_t) (image->columns-length)) m=(ssize_t) mng_info->magn_ml; else if (magn_methx > 1 && x == (ssize_t) image->columns-2) m=(ssize_t) mng_info->magn_mr; else if (magn_methx <= 1 && x == (ssize_t) image->columns-1) m=(ssize_t) mng_info->magn_mr; else if (magn_methx > 1 && x == (ssize_t) image->columns-1) m=1; else m=(ssize_t) mng_info->magn_mx; for (i=0; i < m; i++) { if (magn_methx <= 1) { /* replicate previous */ SetPixelRGBO(q,(pixels)); } else if (magn_methx == 2 || magn_methx == 4) { if (i == 0) { SetPixelRGBO(q,(pixels)); } /* To do: Rewrite using Get/Set***PixelComponent() */ else { /* Interpolate */ SetPixelRed(q, (QM) ((2*i*( GetPixelRed(n) -GetPixelRed(pixels))+m) /((ssize_t) (m*2))+ GetPixelRed(pixels))); SetPixelGreen(q, (QM) ((2*i*( GetPixelGreen(n) -GetPixelGreen(pixels))+m) /((ssize_t) (m*2))+ GetPixelGreen(pixels))); SetPixelBlue(q, (QM) ((2*i*( GetPixelBlue(n) -GetPixelBlue(pixels))+m) /((ssize_t) (m*2))+ GetPixelBlue(pixels))); if (image->matte != MagickFalse) SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m) /((ssize_t) (m*2))+ GetPixelOpacity(pixels))); } if (magn_methx == 4) { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelOpacity(q, GetPixelOpacity(pixels)+0); } else { SetPixelOpacity(q, GetPixelOpacity(n)+0); } } } else /* if (magn_methx == 3 || magn_methx == 5) */ { /* Replicate nearest */ if (i <= ((m+1) << 1)) { SetPixelRGBO(q,(pixels)); } else { SetPixelRGBO(q,(n)); } if (magn_methx == 5) { /* Interpolate */ SetPixelOpacity(q, (QM) ((2*i*( GetPixelOpacity(n) -GetPixelOpacity(pixels))+m)/ ((ssize_t) (m*2)) +GetPixelOpacity(pixels))); } } q++; } n++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } #if (MAGICKCORE_QUANTUM_DEPTH > 16) if (magn_methx != 1 || magn_methy != 1) { /* Rescale pixels to Quantum */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); for (x=(ssize_t) image->columns-1; x >= 0; x--) { SetPixelRed(q,ScaleShortToQuantum( GetPixelRed(q))); SetPixelGreen(q,ScaleShortToQuantum( GetPixelGreen(q))); SetPixelBlue(q,ScaleShortToQuantum( GetPixelBlue(q))); SetPixelOpacity(q,ScaleShortToQuantum( GetPixelOpacity(q))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } #endif if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished MAGN processing"); } } /* Crop_box is with respect to the upper left corner of the MNG. */ crop_box.left=mng_info->image_box.left+mng_info->x_off[object_id]; crop_box.right=mng_info->image_box.right+mng_info->x_off[object_id]; crop_box.top=mng_info->image_box.top+mng_info->y_off[object_id]; crop_box.bottom=mng_info->image_box.bottom+mng_info->y_off[object_id]; crop_box=mng_minimum_box(crop_box,mng_info->clip); crop_box=mng_minimum_box(crop_box,mng_info->frame); crop_box=mng_minimum_box(crop_box,mng_info->object_clip[object_id]); if ((crop_box.left != (mng_info->image_box.left +mng_info->x_off[object_id])) || (crop_box.right != (mng_info->image_box.right +mng_info->x_off[object_id])) || (crop_box.top != (mng_info->image_box.top +mng_info->y_off[object_id])) || (crop_box.bottom != (mng_info->image_box.bottom +mng_info->y_off[object_id]))) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Crop the PNG image"); if ((crop_box.left < crop_box.right) && (crop_box.top < crop_box.bottom)) { Image *im; RectangleInfo crop_info; /* Crop_info is with respect to the upper left corner of the image. */ crop_info.x=(crop_box.left-mng_info->x_off[object_id]); crop_info.y=(crop_box.top-mng_info->y_off[object_id]); crop_info.width=(size_t) (crop_box.right-crop_box.left); crop_info.height=(size_t) (crop_box.bottom-crop_box.top); image->page.width=image->columns; image->page.height=image->rows; image->page.x=0; image->page.y=0; im=CropImage(image,&crop_info,exception); if (im != (Image *) NULL) { image->columns=im->columns; image->rows=im->rows; im=DestroyImage(im); image->page.width=image->columns; image->page.height=image->rows; image->page.x=crop_box.left; image->page.y=crop_box.top; } } else { /* No pixels in crop area. The MNG spec still requires a layer, though, so make a single transparent pixel in the top left corner. */ image->columns=1; image->rows=1; image->colors=2; (void) SetImageBackgroundColor(image); image->page.width=1; image->page.height=1; image->page.x=0; image->page.y=0; } } #ifndef PNG_READ_EMPTY_PLTE_SUPPORTED image=mng_info->image; #endif } #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy, and promote any depths > 8 to 16. */ if (image->depth > 16) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif GetImageException(image,exception); if (image_info->number_scenes != 0) { if (mng_info->scenes_found > (ssize_t) (image_info->first_scene+image_info->number_scenes)) break; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading image datastream."); } while (LocaleCompare(image_info->magick,"MNG") == 0); (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Finished reading all image datastreams."); #if defined(MNG_INSERT_LAYERS) if (insert_layers && !mng_info->image_found && (mng_info->mng_width) && (mng_info->mng_height)) { /* Insert a background layer if nothing else was found. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No images found. Inserting a background layer."); if (GetAuthenticPixelQueue(image) != (PixelPacket *) NULL) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocation failed, returning NULL."); return(DestroyImageList(image)); } image=SyncNextImageInList(image); } image->columns=mng_info->mng_width; image->rows=mng_info->mng_height; image->page.width=mng_info->mng_width; image->page.height=mng_info->mng_height; image->page.x=0; image->page.y=0; image->background_color=mng_background_color; image->matte=MagickFalse; if (image_info->ping == MagickFalse) (void) SetImageBackgroundColor(image); mng_info->image_found++; } #endif image->iterations=mng_iterations; if (mng_iterations == 1) image->start_loop=MagickTrue; while (GetPreviousImageInList(image) != (Image *) NULL) { image_count++; if (image_count > 10*mng_info->image_found) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," No beginning"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted, beginning of list not found", "`%s'",image_info->filename); return(DestroyImageList(image)); } image=GetPreviousImageInList(image); if (GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Corrupt list"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"Linked list is corrupted; next_image is NULL","`%s'", image_info->filename); } } if (mng_info->ticks_per_second && mng_info->image_found > 1 && GetNextImageInList(image) == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " First image null"); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"image->next for first image is NULL but shouldn't be.", "`%s'",image_info->filename); } if (mng_info->image_found == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No visible images found."); (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError,"No visible images in file","`%s'",image_info->filename); return(DestroyImageList(image)); } if (mng_info->ticks_per_second) final_delay=1UL*MagickMax(image->ticks_per_second,1L)* final_delay/mng_info->ticks_per_second; else image->start_loop=MagickTrue; /* Find final nonzero image delay */ final_image_delay=0; while (GetNextImageInList(image) != (Image *) NULL) { if (image->delay) final_image_delay=image->delay; image=GetNextImageInList(image); } if (final_delay < final_image_delay) final_delay=final_image_delay; image->delay=final_delay; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->delay=%.20g, final_delay=%.20g",(double) image->delay, (double) final_delay); if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Before coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g",(double) image->delay); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g",(double) scene++,(double) image->delay); } } image=GetFirstImageInList(image); #ifdef MNG_COALESCE_LAYERS if (insert_layers) { Image *next_image, *next; size_t scene; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule()," Coalesce Images"); scene=image->scene; next_image=CoalesceImages(image,&image->exception); if (next_image == (Image *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); image=DestroyImageList(image); image=next_image; for (next=image; next != (Image *) NULL; next=next_image) { next->page.width=mng_info->mng_width; next->page.height=mng_info->mng_height; next->page.x=0; next->page.y=0; next->scene=scene++; next_image=GetNextImageInList(next); if (next_image == (Image *) NULL) break; if (next->delay == 0) { scene--; next_image->previous=GetPreviousImageInList(next); if (GetPreviousImageInList(next) == (Image *) NULL) image=next_image; else next->previous->next=next_image; next=DestroyImage(next); } } } #endif while (GetNextImageInList(image) != (Image *) NULL) image=GetNextImageInList(image); image->dispose=BackgroundDispose; if (logging != MagickFalse) { int scene; scene=0; image=GetFirstImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " After coalesce:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene 0 delay=%.20g dispose=%.20g",(double) image->delay, (double) image->dispose); while (GetNextImageInList(image) != (Image *) NULL) { image=GetNextImageInList(image); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " scene %.20g delay=%.20g dispose=%.20g",(double) scene++, (double) image->delay,(double) image->dispose); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit ReadOneJNGImage();"); return(image); } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter ReadMNGImage()"); image=AcquireImage(image_info); mng_info=(MngInfo *) NULL; status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) return((Image *) NULL); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; image=ReadOneMNGImage(mng_info,image_info,exception); mng_info=MngInfoFreeStruct(mng_info); if (image == (Image *) NULL) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "exit ReadMNGImage() with error"); return((Image *) NULL); } (void) CloseBlob(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit ReadMNGImage()"); return(GetFirstImageInList(image)); } #else /* PNG_LIBPNG_VER > 10011 */ static Image *ReadPNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); (void) ThrowMagickException(exception,GetMagickModule(),CoderError, "PNG library is too old","`%s'",image_info->filename); return(Image *) NULL; } static Image *ReadMNGImage(const ImageInfo *image_info,ExceptionInfo *exception) { return(ReadPNGImage(image_info,exception)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPNGImage() adds properties for the PNG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPNGImage method is: % % size_t RegisterPNGImage(void) % */ ModuleExport size_t RegisterPNGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char *PNGNote= { "See http://www.libpng.org/ for details about the PNG format." }, *JNGNote= { "See http://www.libpng.org/pub/mng/ for details about the JNG\n" "format." }, *MNGNote= { "See http://www.libpng.org/pub/mng/ for details about the MNG\n" "format." }; *version='\0'; #if defined(PNG_LIBPNG_VER_STRING) (void) ConcatenateMagickString(version,"libpng ",MaxTextExtent); (void) ConcatenateMagickString(version,PNG_LIBPNG_VER_STRING,MaxTextExtent); if (LocaleCompare(PNG_LIBPNG_VER_STRING,png_get_header_ver(NULL)) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,png_get_libpng_ver(NULL), MaxTextExtent); } #endif entry=SetMagickInfo("MNG"); entry->seekable_stream=MagickTrue; /* To do: eliminate this. */ #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadMNGImage; entry->encoder=(EncodeImageHandler *) WriteMNGImage; #endif entry->magick=(IsImageFormatHandler *) IsMNG; entry->description=ConstantString("Multiple-image Network Graphics"); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("video/x-mng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(MNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("Portable Network Graphics"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); if (*version != '\0') entry->version=ConstantString(version); entry->note=ConstantString(PNGNote); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG8"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString( "8-bit indexed with optional binary transparency"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG24"); *version='\0'; #if defined(ZLIB_VERSION) (void) ConcatenateMagickString(version,"zlib ",MaxTextExtent); (void) ConcatenateMagickString(version,ZLIB_VERSION,MaxTextExtent); if (LocaleCompare(ZLIB_VERSION,zlib_version) != 0) { (void) ConcatenateMagickString(version,",",MaxTextExtent); (void) ConcatenateMagickString(version,zlib_version,MaxTextExtent); } #endif if (*version != '\0') entry->version=ConstantString(version); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 24-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG32"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 32-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG48"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or binary transparent 48-bit RGB"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG64"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString("opaque or transparent 64-bit RGBA"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PNG00"); #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadPNGImage; entry->encoder=(EncodeImageHandler *) WritePNGImage; #endif entry->magick=(IsImageFormatHandler *) IsPNG; entry->adjoin=MagickFalse; entry->description=ConstantString( "PNG inheriting bit-depth, color-type from original if possible"); entry->mime_type=ConstantString("image/png"); entry->module=ConstantString("PNG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JNG"); #if defined(JNG_SUPPORTED) #if defined(MAGICKCORE_PNG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJNGImage; entry->encoder=(EncodeImageHandler *) WriteJNGImage; #endif #endif entry->magick=(IsImageFormatHandler *) IsJNG; entry->adjoin=MagickFalse; entry->description=ConstantString("JPEG Network Graphics"); entry->mime_type=ConstantString("image/x-jng"); entry->module=ConstantString("PNG"); entry->note=ConstantString(JNGNote); (void) RegisterMagickInfo(entry); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE ping_semaphore=AllocateSemaphoreInfo(); #endif return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPNGImage() removes format registrations made by the % PNG module from the list of supported formats. % % The format of the UnregisterPNGImage method is: % % UnregisterPNGImage(void) % */ ModuleExport void UnregisterPNGImage(void) { (void) UnregisterMagickInfo("MNG"); (void) UnregisterMagickInfo("PNG"); (void) UnregisterMagickInfo("PNG8"); (void) UnregisterMagickInfo("PNG24"); (void) UnregisterMagickInfo("PNG32"); (void) UnregisterMagickInfo("PNG48"); (void) UnregisterMagickInfo("PNG64"); (void) UnregisterMagickInfo("PNG00"); (void) UnregisterMagickInfo("JNG"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE if (ping_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&ping_semaphore); #endif } #if defined(MAGICKCORE_PNG_DELEGATE) #if PNG_LIBPNG_VER > 10011 /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteMNGImage() writes an image in the Portable Network Graphics % Group's "Multiple-image Network Graphics" encoded image format. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteMNGImage method is: % % MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % % To do (as of version 5.5.2, November 26, 2002 -- glennrp -- see also % "To do" under ReadPNGImage): % % Preserve all unknown and not-yet-handled known chunks found in input % PNG file and copy them into output PNG files according to the PNG % copying rules. % % Write the iCCP chunk at MNG level when (icc profile length > 0) % % Improve selection of color type (use indexed-colour or indexed-colour % with tRNS when 256 or fewer unique RGBA values are present). % % Figure out what to do with "dispose=<restore-to-previous>" (dispose == 3) % This will be complicated if we limit ourselves to generating MNG-LC % files. For now we ignore disposal method 3 and simply overlay the next % image on it. % % Check for identical PLTE's or PLTE/tRNS combinations and use a % global MNG PLTE or PLTE/tRNS combination when appropriate. % [mostly done 15 June 1999 but still need to take care of tRNS] % % Check for identical sRGB and replace with a global sRGB (and remove % gAMA/cHRM if sRGB is found; check for identical gAMA/cHRM and % replace with global gAMA/cHRM (or with sRGB if appropriate; replace % local gAMA/cHRM with local sRGB if appropriate). % % Check for identical sBIT chunks and write global ones. % % Provide option to skip writing the signature tEXt chunks. % % Use signatures to detect identical objects and reuse the first % instance of such objects instead of writing duplicate objects. % % Use a smaller-than-32k value of compression window size when % appropriate. % % Encode JNG datastreams. Mostly done as of 5.5.2; need to write % ancillary text chunks and save profiles. % % Provide an option to force LC files (to ensure exact framing rate) % instead of VLC. % % Provide an option to force VLC files instead of LC, even when offsets % are present. This will involve expanding the embedded images with a % transparent region at the top and/or left. */ static void Magick_png_write_raw_profile(const ImageInfo *image_info,png_struct *ping, png_info *ping_info, unsigned char *profile_type, unsigned char *profile_description, unsigned char *profile_data, png_uint_32 length) { png_textp text; register ssize_t i; unsigned char *sp; png_charp dp; png_uint_32 allocated_length, description_length; unsigned char hex[16]={'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; if (LocaleNCompare((char *) profile_type+1, "ng-chunk-",9) == 0) return; if (image_info->verbose) { (void) printf("writing raw profile: type=%s, length=%.20g\n", (char *) profile_type, (double) length); } #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping,(png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif description_length=(png_uint_32) strlen((const char *) profile_description); allocated_length=(png_uint_32) (length*2 + (length >> 5) + 20 + description_length); #if PNG_LIBPNG_VER >= 10400 text[0].text=(png_charp) png_malloc(ping, (png_alloc_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_alloc_size_t) 80); #else text[0].text=(png_charp) png_malloc(ping, (png_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_size_t) 80); #endif text[0].key[0]='\0'; (void) ConcatenateMagickString(text[0].key, "Raw profile type ",MaxTextExtent); (void) ConcatenateMagickString(text[0].key,(const char *) profile_type,62); sp=profile_data; dp=text[0].text; *dp++='\n'; (void) CopyMagickString(dp,(const char *) profile_description, allocated_length); dp+=description_length; *dp++='\n'; (void) FormatLocaleString(dp,allocated_length- (png_size_t) (dp-text[0].text),"%8lu ",(unsigned long) length); dp+=8; for (i=0; i < (ssize_t) length; i++) { if (i%36 == 0) *dp++='\n'; *(dp++)=(char) hex[((*sp >> 4) & 0x0f)]; *(dp++)=(char) hex[((*sp++ ) & 0x0f)]; } *dp++='\n'; *dp='\0'; text[0].text_length=(png_size_t) (dp-text[0].text); text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? -1 : 0; if (text[0].text_length <= allocated_length) png_set_text(ping,ping_info,text,1); png_free(ping,text[0].text); png_free(ping,text[0].key); png_free(ping,text); } static MagickBooleanType Magick_png_write_chunk_from_profile(Image *image, const char *string, MagickBooleanType logging) { char *name; const StringInfo *profile; unsigned char *data; png_uint_32 length; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (const StringInfo *) NULL) { StringInfo *ping_profile; if (LocaleNCompare(name,string,11) == 0) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Found %s profile",name); ping_profile=CloneStringInfo(profile); data=GetStringInfoDatum(ping_profile), length=(png_uint_32) GetStringInfoLength(ping_profile); data[4]=data[3]; data[3]=data[2]; data[2]=data[1]; data[1]=data[0]; (void) WriteBlobMSBULong(image,length-5); /* data length */ (void) WriteBlob(image,length-1,data+1); (void) WriteBlobMSBULong(image,crc32(0,data+1,(uInt) length-1)); ping_profile=DestroyStringInfo(ping_profile); } } name=GetNextImageProfile(image); } return(MagickTrue); } #if defined(PNG_tIME_SUPPORTED) static void write_tIME_chunk(Image *image,png_struct *ping,png_info *info, const char *date) { unsigned int day, hour, minute, month, second, year; png_time ptime; time_t ttime; if (date != (const char *) NULL) { if (sscanf(date,"%d-%d-%dT%d:%d:%dZ",&year,&month,&day,&hour,&minute, &second) != 6) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderError, "Invalid date format specified for png:tIME","`%s'", image->filename); return; } ptime.year=(png_uint_16) year; ptime.month=(png_byte) month; ptime.day=(png_byte) day; ptime.hour=(png_byte) hour; ptime.minute=(png_byte) minute; ptime.second=(png_byte) second; } else { time(&ttime); png_convert_from_time_t(&ptime,ttime); } png_set_tIME(ping,info,&ptime); } #endif /* Write one PNG image */ static MagickBooleanType WriteOnePNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { char s[2]; char im_vers[32], libpng_runv[32], libpng_vers[32], zlib_runv[32], zlib_vers[32]; const char *name, *property, *value; const StringInfo *profile; int num_passes, pass, ping_wrote_caNv; png_byte ping_trans_alpha[256]; png_color palette[257]; png_color_16 ping_background, ping_trans_color; png_info *ping_info; png_struct *ping; png_uint_32 ping_height, ping_width; ssize_t y; MagickBooleanType image_matte, logging, matte, ping_have_blob, ping_have_cheap_transparency, ping_have_color, ping_have_non_bw, ping_have_PLTE, ping_have_bKGD, ping_have_eXIf, ping_have_iCCP, ping_have_pHYs, ping_have_sRGB, ping_have_tRNS, ping_exclude_bKGD, ping_exclude_cHRM, ping_exclude_date, /* ping_exclude_EXIF, */ ping_exclude_eXIf, ping_exclude_gAMA, ping_exclude_iCCP, /* ping_exclude_iTXt, */ ping_exclude_oFFs, ping_exclude_pHYs, ping_exclude_sRGB, ping_exclude_tEXt, ping_exclude_tIME, /* ping_exclude_tRNS, */ ping_exclude_vpAg, ping_exclude_caNv, ping_exclude_zCCP, /* hex-encoded iCCP */ ping_exclude_zTXt, ping_preserve_colormap, ping_preserve_iCCP, ping_need_colortype_warning, status, tried_332, tried_333, tried_444; MemoryInfo *volatile pixel_info; QuantumInfo *quantum_info; register ssize_t i, x; unsigned char *ping_pixels; volatile int image_colors, ping_bit_depth, ping_color_type, ping_interlace_method, ping_compression_method, ping_filter_method, ping_num_trans; volatile size_t image_depth, old_bit_depth; size_t quality, rowbytes, save_image_depth; int j, number_colors, number_opaque, number_semitransparent, number_transparent, ping_pHYs_unit_type; png_uint_32 ping_pHYs_x_resolution, ping_pHYs_y_resolution; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOnePNGImage()"); /* Define these outside of the following "if logging()" block so they will * show in debuggers. */ *im_vers='\0'; (void) ConcatenateMagickString(im_vers, MagickLibVersionText,MaxTextExtent); (void) ConcatenateMagickString(im_vers, MagickLibAddendum,MaxTextExtent); *libpng_vers='\0'; (void) ConcatenateMagickString(libpng_vers, PNG_LIBPNG_VER_STRING,32); *libpng_runv='\0'; (void) ConcatenateMagickString(libpng_runv, png_get_libpng_ver(NULL),32); *zlib_vers='\0'; (void) ConcatenateMagickString(zlib_vers, ZLIB_VERSION,32); *zlib_runv='\0'; (void) ConcatenateMagickString(zlib_runv, zlib_version,32); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," IM version = %s", im_vers); (void) LogMagickEvent(CoderEvent,GetMagickModule()," Libpng version = %s", libpng_vers); if (LocaleCompare(libpng_vers,libpng_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", libpng_runv); } (void) LogMagickEvent(CoderEvent,GetMagickModule()," Zlib version = %s", zlib_vers); if (LocaleCompare(zlib_vers,zlib_runv) != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule()," running with %s", zlib_runv); } } /* Initialize some stuff */ ping_bit_depth=0, ping_color_type=0, ping_interlace_method=0, ping_compression_method=0, ping_filter_method=0, ping_num_trans = 0; ping_background.red = 0; ping_background.green = 0; ping_background.blue = 0; ping_background.gray = 0; ping_background.index = 0; ping_trans_color.red=0; ping_trans_color.green=0; ping_trans_color.blue=0; ping_trans_color.gray=0; ping_pHYs_unit_type = 0; ping_pHYs_x_resolution = 0; ping_pHYs_y_resolution = 0; ping_have_blob=MagickFalse; ping_have_cheap_transparency=MagickFalse; ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; ping_have_PLTE=MagickFalse; ping_have_bKGD=MagickFalse; ping_have_eXIf=MagickTrue; ping_have_iCCP=MagickFalse; ping_have_pHYs=MagickFalse; ping_have_sRGB=MagickFalse; ping_have_tRNS=MagickFalse; ping_exclude_bKGD=mng_info->ping_exclude_bKGD; ping_exclude_caNv=mng_info->ping_exclude_caNv; ping_exclude_cHRM=mng_info->ping_exclude_cHRM; ping_exclude_date=mng_info->ping_exclude_date; /* ping_exclude_EXIF=mng_info->ping_exclude_EXIF; */ ping_exclude_eXIf=mng_info->ping_exclude_eXIf; ping_exclude_gAMA=mng_info->ping_exclude_gAMA; ping_exclude_iCCP=mng_info->ping_exclude_iCCP; /* ping_exclude_iTXt=mng_info->ping_exclude_iTXt; */ ping_exclude_oFFs=mng_info->ping_exclude_oFFs; ping_exclude_pHYs=mng_info->ping_exclude_pHYs; ping_exclude_sRGB=mng_info->ping_exclude_sRGB; ping_exclude_tEXt=mng_info->ping_exclude_tEXt; ping_exclude_tIME=mng_info->ping_exclude_tIME; /* ping_exclude_tRNS=mng_info->ping_exclude_tRNS; */ ping_exclude_vpAg=mng_info->ping_exclude_vpAg; ping_exclude_zCCP=mng_info->ping_exclude_zCCP; /* hex-encoded iCCP in zTXt */ ping_exclude_zTXt=mng_info->ping_exclude_zTXt; ping_preserve_colormap = mng_info->ping_preserve_colormap; ping_preserve_iCCP = mng_info->ping_preserve_iCCP; ping_need_colortype_warning = MagickFalse; property=(const char *) NULL; /* Recognize the ICC sRGB profile and convert it to the sRGB chunk, * i.e., eliminate the ICC profile and set image->rendering_intent. * Note that this will not involve any changes to the actual pixels * but merely passes information to applications that read the resulting * PNG image. * * To do: recognize other variants of the sRGB profile, using the CRC to * verify all recognized variants including the 7 already known. * * Work around libpng16+ rejecting some "known invalid sRGB profiles". * * Use something other than image->rendering_intent to record the fact * that the sRGB profile was found. * * Record the ICC version (currently v2 or v4) of the incoming sRGB ICC * profile. Record the Blackpoint Compensation, if any. */ if (ping_exclude_sRGB == MagickFalse && ping_preserve_iCCP == MagickFalse) { char *name; const StringInfo *profile; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { int icheck, got_crc=0; png_uint_32 length, profile_crc=0; unsigned char *data; length=(png_uint_32) GetStringInfoLength(profile); for (icheck=0; sRGB_info[icheck].len > 0; icheck++) { if (length == sRGB_info[icheck].len) { if (got_crc == 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile (potentially sRGB)", (unsigned long) length); data=GetStringInfoDatum(profile); profile_crc=crc32(0,data,length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " with crc=%8x",(unsigned int) profile_crc); got_crc++; } if (profile_crc == sRGB_info[icheck].crc) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " It is sRGB with rendering intent = %s", Magick_RenderingIntentString_from_PNG_RenderingIntent( sRGB_info[icheck].intent)); if (image->rendering_intent==UndefinedIntent) { image->rendering_intent= Magick_RenderingIntent_from_PNG_RenderingIntent( sRGB_info[icheck].intent); } ping_exclude_iCCP = MagickTrue; ping_exclude_zCCP = MagickTrue; ping_have_sRGB = MagickTrue; break; } } } if (sRGB_info[icheck].len == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Got a %lu-byte ICC profile not recognized as sRGB", (unsigned long) length); } } name=GetNextImageProfile(image); } } number_opaque = 0; number_semitransparent = 0; number_transparent = 0; if (logging != MagickFalse) { if (image->storage_class == UndefinedClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=UndefinedClass"); if (image->storage_class == DirectClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=DirectClass"); if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->storage_class=PseudoClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->magick= %s",image_info->magick); (void) LogMagickEvent(CoderEvent,GetMagickModule(), image->taint ? " image->taint=MagickTrue": " image->taint=MagickFalse"); } if (image->storage_class == PseudoClass && (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (mng_info->write_png_colortype != 1 && mng_info->write_png_colortype != 5))) { (void) SyncImage(image); image->storage_class = DirectClass; } if (ping_preserve_colormap == MagickFalse) { if (image->storage_class != PseudoClass && image->colormap != NULL) { /* Free the bogus colormap; it can cause trouble later */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Freeing bogus colormap"); (void) RelinquishMagickMemory(image->colormap); image->colormap=NULL; } } if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace); /* Sometimes we get PseudoClass images whose RGB values don't match the colors in the colormap. This code syncs the RGB values. */ if (image->depth <= 8 && image->taint && image->storage_class == PseudoClass) (void) SyncImage(image); #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (image->depth > 8) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reducing PNG bit depth to 8 since this is a Q8 build."); image->depth=8; } #endif /* Respect the -depth option */ if (image->depth < 4) { register PixelPacket *r; ExceptionInfo *exception; exception=(&image->exception); if (image->depth > 2) { /* Scale to 4-bit */ LBR04PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR04PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR04PacketRGBO(image->colormap[i]); } } } else if (image->depth > 1) { /* Scale to 2-bit */ LBR02PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR02PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR02PacketRGBO(image->colormap[i]); } } } else { /* Scale to 1-bit */ LBR01PacketRGBO(image->background_color); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { LBR01PixelRGBO(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } if (image->storage_class == PseudoClass && image->colormap != NULL) { for (i=0; i < (ssize_t) image->colors; i++) { LBR01PacketRGBO(image->colormap[i]); } } } } /* To do: set to next higher multiple of 8 */ if (image->depth < 8) image->depth=8; #if (MAGICKCORE_QUANTUM_DEPTH > 16) /* PNG does not handle depths greater than 16 so reduce it even * if lossy */ if (image->depth > 8) image->depth=16; #endif #if (MAGICKCORE_QUANTUM_DEPTH > 8) if (image->depth > 8) { /* To do: fill low byte properly */ image->depth=16; } if (image->depth == 16 && mng_info->write_png_depth != 16) if (mng_info->write_png8 || LosslessReduceDepthOK(image) != MagickFalse) image->depth = 8; #endif image_colors = (int) image->colors; if (mng_info->write_png_colortype && (mng_info->write_png_colortype > 4 || (mng_info->write_png_depth >= 8 && mng_info->write_png_colortype < 4 && image->matte == MagickFalse))) { /* Avoid the expensive BUILD_PALETTE operation if we're sure that we * are not going to need the result. */ number_opaque = (int) image->colors; if (mng_info->write_png_colortype == 1 || mng_info->write_png_colortype == 5) ping_have_color=MagickFalse; else ping_have_color=MagickTrue; ping_have_non_bw=MagickFalse; if (image->matte != MagickFalse) { number_transparent = 2; number_semitransparent = 1; } else { number_transparent = 0; number_semitransparent = 0; } } if (mng_info->write_png_colortype < 7) { /* BUILD_PALETTE * * Normally we run this just once, but in the case of writing PNG8 * we reduce the transparency to binary and run again, then if there * are still too many colors we reduce to a simple 4-4-4-1, then 3-3-3-1 * RGBA palette and run again, and then to a simple 3-3-2-1 RGBA * palette. Then (To do) we take care of a final reduction that is only * needed if there are still 256 colors present and one of them has both * transparent and opaque instances. */ tried_332 = MagickFalse; tried_333 = MagickFalse; tried_444 = MagickFalse; for (j=0; j<6; j++) { /* * Sometimes we get DirectClass images that have 256 colors or fewer. * This code will build a colormap. * * Also, sometimes we get PseudoClass images with an out-of-date * colormap. This code will replace the colormap with a new one. * Sometimes we get PseudoClass images that have more than 256 colors. * This code will delete the colormap and change the image to * DirectClass. * * If image->matte is MagickFalse, we ignore the opacity channel * even though it sometimes contains left-over non-opaque values. * * Also we gather some information (number of opaque, transparent, * and semitransparent pixels, and whether the image has any non-gray * pixels or only black-and-white pixels) that we might need later. * * Even if the user wants to force GrayAlpha or RGBA (colortype 4 or 6) * we need to check for bogus non-opaque values, at least. */ ExceptionInfo *exception; int n; PixelPacket opaque[260], semitransparent[260], transparent[260]; register IndexPacket *indexes; register const PixelPacket *s, *q; register PixelPacket *r; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Enter BUILD_PALETTE:"); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->columns=%.20g",(double) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->rows=%.20g",(double) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); if (image->storage_class == PseudoClass && image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Original colormap:"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < 256; i++) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } for (i=image->colors - 10; i < (ssize_t) image->colors; i++) { if (i > 255) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d",(int) image->colors); if (image->colors == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " (zero means unknown)"); if (ping_preserve_colormap == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Regenerate the colormap"); } exception=(&image->exception); image_colors=0; number_opaque = 0; number_semitransparent = 0; number_transparent = 0; for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (image->matte == MagickFalse || GetPixelOpacity(q) == OpaqueOpacity) { if (number_opaque < 259) { if (number_opaque == 0) { GetPixelRGB(q, opaque); opaque[0].opacity=OpaqueOpacity; number_opaque=1; } for (i=0; i< (ssize_t) number_opaque; i++) { if (IsColorEqual(q, opaque+i)) break; } if (i == (ssize_t) number_opaque && number_opaque < 259) { number_opaque++; GetPixelRGB(q, opaque+i); opaque[i].opacity=OpaqueOpacity; } } } else if (q->opacity == TransparentOpacity) { if (number_transparent < 259) { if (number_transparent == 0) { GetPixelRGBO(q, transparent); ping_trans_color.red= (unsigned short) GetPixelRed(q); ping_trans_color.green= (unsigned short) GetPixelGreen(q); ping_trans_color.blue= (unsigned short) GetPixelBlue(q); ping_trans_color.gray= (unsigned short) GetPixelRed(q); number_transparent = 1; } for (i=0; i< (ssize_t) number_transparent; i++) { if (IsColorEqual(q, transparent+i)) break; } if (i == (ssize_t) number_transparent && number_transparent < 259) { number_transparent++; GetPixelRGBO(q, transparent+i); } } } else { if (number_semitransparent < 259) { if (number_semitransparent == 0) { GetPixelRGBO(q, semitransparent); number_semitransparent = 1; } for (i=0; i< (ssize_t) number_semitransparent; i++) { if (IsColorEqual(q, semitransparent+i) && GetPixelOpacity(q) == semitransparent[i].opacity) break; } if (i == (ssize_t) number_semitransparent && number_semitransparent < 259) { number_semitransparent++; GetPixelRGBO(q, semitransparent+i); } } } q++; } } if (mng_info->write_png8 == MagickFalse && ping_exclude_bKGD == MagickFalse) { /* Add the background color to the palette, if it * isn't already there. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Check colormap for background (%d,%d,%d)", (int) image->background_color.red, (int) image->background_color.green, (int) image->background_color.blue); } for (i=0; i<number_opaque; i++) { if (opaque[i].red == image->background_color.red && opaque[i].green == image->background_color.green && opaque[i].blue == image->background_color.blue) break; } if (number_opaque < 259 && i == number_opaque) { opaque[i] = image->background_color; ping_background.index = i; number_opaque++; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d",(int) i); } } else if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in the colormap to add background color"); } image_colors=number_opaque+number_transparent+number_semitransparent; if (logging != MagickFalse) { if (image_colors > 256) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has more than 256 colors"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has %d colors",image_colors); } if (ping_preserve_colormap != MagickFalse) break; if (mng_info->write_png_colortype != 7) /* We won't need this info */ { ping_have_color=MagickFalse; ping_have_non_bw=MagickFalse; if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "incompatible colorspace"); ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; } if(image_colors > 256) { for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != GetPixelGreen(s) || GetPixelRed(s) != GetPixelBlue(s)) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } s++; } if (ping_have_color != MagickFalse) break; /* Worst case is black-and-white; we are looking at every * pixel twice. */ if (ping_have_non_bw == MagickFalse) { s=q; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelRed(s) != 0 && GetPixelRed(s) != QuantumRange) { ping_have_non_bw=MagickTrue; break; } s++; } } } } } if (image_colors < 257) { PixelPacket colormap[260]; /* * Initialize image colormap. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Sort the new colormap"); /* Sort palette, transparent first */; n = 0; for (i=0; i<number_transparent; i++) colormap[n++] = transparent[i]; for (i=0; i<number_semitransparent; i++) colormap[n++] = semitransparent[i]; for (i=0; i<number_opaque; i++) colormap[n++] = opaque[i]; ping_background.index += (number_transparent + number_semitransparent); /* image_colors < 257; search the colormap instead of the pixels * to get ping_have_color and ping_have_non_bw */ for (i=0; i<n; i++) { if (ping_have_color == MagickFalse) { if (colormap[i].red != colormap[i].green || colormap[i].red != colormap[i].blue) { ping_have_color=MagickTrue; ping_have_non_bw=MagickTrue; break; } } if (ping_have_non_bw == MagickFalse) { if (colormap[i].red != 0 && colormap[i].red != QuantumRange) ping_have_non_bw=MagickTrue; } } if ((mng_info->ping_exclude_tRNS == MagickFalse || (number_transparent == 0 && number_semitransparent == 0)) && (((mng_info->write_png_colortype-1) == PNG_COLOR_TYPE_PALETTE) || (mng_info->write_png_colortype == 0))) { if (logging != MagickFalse) { if (n != (ssize_t) image_colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_colors (%d) and n (%d) don't match", image_colors, n); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " AcquireImageColormap"); } image->colors = image_colors; if (AcquireImageColormap(image,image_colors) == MagickFalse) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); for (i=0; i< (ssize_t) image_colors; i++) image->colormap[i] = colormap[i]; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d (%d)", (int) image->colors, image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Update the pixel indexes"); } /* Sync the pixel indices with the new colormap */ for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i< (ssize_t) image_colors; i++) { if ((image->matte == MagickFalse || image->colormap[i].opacity == GetPixelOpacity(q)) && image->colormap[i].red == GetPixelRed(q) && image->colormap[i].green == GetPixelGreen(q) && image->colormap[i].blue == GetPixelBlue(q)) { SetPixelIndex(indexes+x,i); break; } } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->colors=%d", (int) image->colors); if (image->colormap != NULL) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " i (red,green,blue,opacity)"); for (i=0; i < (ssize_t) image->colors; i++) { if (i < 300 || i >= (ssize_t) image->colors - 10) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %d (%d,%d,%d,%d)", (int) i, (int) image->colormap[i].red, (int) image->colormap[i].green, (int) image->colormap[i].blue, (int) image->colormap[i].opacity); } } } if (number_transparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent = %d", number_transparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_transparent > 256"); if (number_opaque < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque = %d", number_opaque); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_opaque > 256"); if (number_semitransparent < 257) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent = %d", number_semitransparent); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " number_semitransparent > 256"); if (ping_have_non_bw == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are black or white"); else if (ping_have_color == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " All pixels and the background are gray"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " At least one pixel or the background is non-gray"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Exit BUILD_PALETTE:"); } if (mng_info->write_png8 == MagickFalse) break; /* Make any reductions necessary for the PNG8 format */ if (image_colors <= 256 && image_colors != 0 && image->colormap != NULL && number_semitransparent == 0 && number_transparent <= 1) break; /* PNG8 can't have semitransparent colors so we threshold the * opacity to 0 or OpaqueOpacity, and PNG8 can only have one * transparent color so if more than one is transparent we merge * them into image->background_color. */ if (number_semitransparent != 0 || number_transparent > 1) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Thresholding the alpha channel to binary"); for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) > TransparentOpacity/2) { SetPixelOpacity(r,TransparentOpacity); SetPixelRgb(r,&image->background_color); } else SetPixelOpacity(r,OpaqueOpacity); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image_colors != 0 && image_colors <= 256 && image->colormap != NULL) for (i=0; i<image_colors; i++) image->colormap[i].opacity = (image->colormap[i].opacity > TransparentOpacity/2 ? TransparentOpacity : OpaqueOpacity); } continue; } /* PNG8 can't have more than 256 colors so we quantize the pixels and * background color to the 4-4-4-1, 3-3-3-1 or 3-3-2-1 palette. If the * image is mostly gray, the 4-4-4-1 palette is likely to end up with 256 * colors or less. */ if (tried_444 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 4-4-4"); tried_444 = MagickTrue; LBR04PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 4-4-4"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR04PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 4-4-4"); for (i=0; i<image_colors; i++) { LBR04PacketRGB(image->colormap[i]); } } continue; } if (tried_333 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-3"); tried_333 = MagickTrue; LBR03PacketRGB(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-3-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR03PixelRGB(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-3-1"); for (i=0; i<image_colors; i++) { LBR03PacketRGB(image->colormap[i]); } } continue; } if (tried_332 == MagickFalse && (image_colors == 0 || image_colors > 256)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the background color to 3-3-2"); tried_332 = MagickTrue; /* Red and green were already done so we only quantize the blue * channel */ LBR02PacketBlue(image->background_color); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(r) == OpaqueOpacity) LBR02PixelBlue(r); r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else /* Should not reach this; colormap already exists and must be <= 256 */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Quantizing the colormap to 3-3-2-1"); for (i=0; i<image_colors; i++) { LBR02PacketBlue(image->colormap[i]); } } continue; } if (image_colors == 0 || image_colors > 256) { /* Take care of special case with 256 opaque colors + 1 transparent * color. We don't need to quantize to 2-3-2-1; we only need to * eliminate one color, so we'll merge the two darkest red * colors (0x49, 0, 0) -> (0x24, 0, 0). */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red background colors to 3-3-2-1"); if (ScaleQuantumToChar(image->background_color.red) == 0x49 && ScaleQuantumToChar(image->background_color.green) == 0x00 && ScaleQuantumToChar(image->background_color.blue) == 0x00) { image->background_color.red=ScaleCharToQuantum(0x24); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Merging two dark red pixel colors to 3-3-2-1"); if (image->colormap == NULL) { for (y=0; y < (ssize_t) image->rows; y++) { r=GetAuthenticPixels(image,0,y,image->columns,1, exception); if (r == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (ScaleQuantumToChar(GetPixelRed(r)) == 0x49 && ScaleQuantumToChar(GetPixelGreen(r)) == 0x00 && ScaleQuantumToChar(GetPixelBlue(r)) == 0x00 && GetPixelOpacity(r) == OpaqueOpacity) { SetPixelRed(r,ScaleCharToQuantum(0x24)); } r++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else { for (i=0; i<image_colors; i++) { if (ScaleQuantumToChar(image->colormap[i].red) == 0x49 && ScaleQuantumToChar(image->colormap[i].green) == 0x00 && ScaleQuantumToChar(image->colormap[i].blue) == 0x00) { image->colormap[i].red=ScaleCharToQuantum(0x24); } } } } } } /* END OF BUILD_PALETTE */ /* If we are excluding the tRNS chunk and there is transparency, * then we must write a Gray-Alpha (color-type 4) or RGBA (color-type 6) * PNG. */ if (mng_info->ping_exclude_tRNS != MagickFalse && (number_transparent != 0 || number_semitransparent != 0)) { unsigned int colortype=mng_info->write_png_colortype; if (ping_have_color == MagickFalse) mng_info->write_png_colortype = 5; else mng_info->write_png_colortype = 7; if (colortype != 0 && mng_info->write_png_colortype != colortype) ping_need_colortype_warning=MagickTrue; } /* See if cheap transparency is possible. It is only possible * when there is a single transparent color, no semitransparent * color, and no opaque color that has the same RGB components * as the transparent color. We only need this information if * we are writing a PNG with colortype 0 or 2, and we have not * excluded the tRNS chunk. */ if (number_transparent == 1 && mng_info->write_png_colortype < 4) { ping_have_cheap_transparency = MagickTrue; if (number_semitransparent != 0) ping_have_cheap_transparency = MagickFalse; else if (image_colors == 0 || image_colors > 256 || image->colormap == NULL) { ExceptionInfo *exception; register const PixelPacket *q; exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { q=GetVirtualPixels(image,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity != TransparentOpacity && (unsigned short) GetPixelRed(q) == ping_trans_color.red && (unsigned short) GetPixelGreen(q) == ping_trans_color.green && (unsigned short) GetPixelBlue(q) == ping_trans_color.blue) { ping_have_cheap_transparency = MagickFalse; break; } q++; } if (ping_have_cheap_transparency == MagickFalse) break; } } else { /* Assuming that image->colormap[0] is the one transparent color * and that all others are opaque. */ if (image_colors > 1) for (i=1; i<image_colors; i++) if (image->colormap[i].red == image->colormap[0].red && image->colormap[i].green == image->colormap[0].green && image->colormap[i].blue == image->colormap[0].blue) { ping_have_cheap_transparency = MagickFalse; break; } } if (logging != MagickFalse) { if (ping_have_cheap_transparency == MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is not possible."); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Cheap transparency is possible."); } } else ping_have_cheap_transparency = MagickFalse; image_depth=image->depth; quantum_info = (QuantumInfo *) NULL; number_colors=0; image_colors=(int) image->colors; image_matte=image->matte; if (mng_info->write_png_colortype < 5) mng_info->IsPalette=image->storage_class == PseudoClass && image_colors <= 256 && image->colormap != NULL; else mng_info->IsPalette = MagickFalse; if ((mng_info->write_png_colortype == 4 || mng_info->write_png8) && (image->colors == 0 || image->colormap == NULL)) { (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderError, "Cannot write PNG8 or color-type 3; colormap is NULL", "`%s'",image->filename); return(MagickFalse); } /* Allocate the PNG structures */ #ifdef PNG_USER_MEM_SUPPORTED ping=png_create_write_struct_2(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler,(void *) NULL, (png_malloc_ptr) Magick_png_malloc,(png_free_ptr) Magick_png_free); #else ping=png_create_write_struct(PNG_LIBPNG_VER_STRING,image, MagickPNGErrorHandler,MagickPNGWarningHandler); #endif if (ping == (png_struct *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); ping_info=png_create_info_struct(ping); if (ping_info == (png_info *) NULL) { png_destroy_write_struct(&ping,(png_info **) NULL); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } png_set_write_fn(ping,image,png_put_data,png_flush_data); pixel_info=(MemoryInfo *) NULL; if (setjmp(png_jmpbuf(ping))) { /* PNG write failed. */ #ifdef PNG_DEBUG if (image_info->verbose) (void) printf("PNG write has failed.\n"); #endif png_destroy_write_struct(&ping,&ping_info); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); return(MagickFalse); } /* { For navigation to end of SETJMP-protected block. Within this * block, use png_error() instead of Throwing an Exception, to ensure * that libpng is able to clean up, and that the semaphore is unlocked. */ #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE LockSemaphoreInfo(ping_semaphore); #endif #ifdef PNG_BENIGN_ERRORS_SUPPORTED /* Allow benign errors */ png_set_benign_errors(ping, 1); #endif #ifdef PNG_SET_USER_LIMITS_SUPPORTED /* Reject images with too many rows or columns */ png_set_user_limits(ping, (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(WidthResource)), (png_uint_32) MagickMin(0x7fffffffL, GetMagickResourceLimit(HeightResource))); #endif /* PNG_SET_USER_LIMITS_SUPPORTED */ /* Prepare PNG for writing. */ #if defined(PNG_MNG_FEATURES_SUPPORTED) if (mng_info->write_mng) { (void) png_permit_mng_features(ping,PNG_ALL_MNG_FEATURES); # ifdef PNG_WRITE_CHECK_FOR_INVALID_INDEX_SUPPORTED /* Disable new libpng-1.5.10 feature when writing a MNG because * zero-length PLTE is OK */ png_set_check_for_invalid_index (ping, 0); # endif } #else # ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if (mng_info->write_mng) png_permit_empty_plte(ping,MagickTrue); # endif #endif x=0; ping_width=(png_uint_32) image->columns; ping_height=(png_uint_32) image->rows; if (mng_info->write_png8 || mng_info->write_png24 || mng_info->write_png32) image_depth=8; if (mng_info->write_png48 || mng_info->write_png64) image_depth=16; if (mng_info->write_png_depth != 0) image_depth=mng_info->write_png_depth; /* Adjust requested depth to next higher valid depth if necessary */ if (image_depth > 8) image_depth=16; if ((image_depth > 4) && (image_depth < 8)) image_depth=8; if (image_depth == 3) image_depth=4; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " width=%.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " height=%.20g",(double) ping_height); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_matte=%.20g",(double) image->matte); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth=%.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative ping_bit_depth=%.20g",(double) image_depth); } save_image_depth=image_depth; ping_bit_depth=(png_byte) save_image_depth; #if defined(PNG_pHYs_SUPPORTED) if (ping_exclude_pHYs == MagickFalse) { if ((image->x_resolution != 0) && (image->y_resolution != 0) && (!mng_info->write_mng || !mng_info->equal_physs)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); if (image->units == PixelsPerInchResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution= (png_uint_32) ((100.0*image->x_resolution+0.5)/2.54); ping_pHYs_y_resolution= (png_uint_32) ((100.0*image->y_resolution+0.5)/2.54); } else if (image->units == PixelsPerCentimeterResolution) { ping_pHYs_unit_type=PNG_RESOLUTION_METER; ping_pHYs_x_resolution=(png_uint_32) (100.0*image->x_resolution+0.5); ping_pHYs_y_resolution=(png_uint_32) (100.0*image->y_resolution+0.5); } else { ping_pHYs_unit_type=PNG_RESOLUTION_UNKNOWN; ping_pHYs_x_resolution=(png_uint_32) image->x_resolution; ping_pHYs_y_resolution=(png_uint_32) image->y_resolution; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Set up PNG pHYs chunk: xres: %.20g, yres: %.20g, units: %d.", (double) ping_pHYs_x_resolution,(double) ping_pHYs_y_resolution, (int) ping_pHYs_unit_type); ping_have_pHYs = MagickTrue; } } #endif if (ping_exclude_bKGD == MagickFalse) { if ((!mng_info->adjoin || !mng_info->equal_backgrounds)) { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_background.red=(png_uint_16) (ScaleQuantumToShort(image->background_color.red) & mask); ping_background.green=(png_uint_16) (ScaleQuantumToShort(image->background_color.green) & mask); ping_background.blue=(png_uint_16) (ScaleQuantumToShort(image->background_color.blue) & mask); ping_background.gray=(png_uint_16) ping_background.green; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (1)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth=%d",ping_bit_depth); } ping_have_bKGD = MagickTrue; } /* Select the color type. */ matte=image_matte; old_bit_depth=0; if (mng_info->IsPalette && mng_info->write_png8) { /* To do: make this a function cause it's used twice, except for reducing the sample depth from 8. */ number_colors=image_colors; ping_have_tRNS=MagickFalse; /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors (%d)", number_colors, image_colors); for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), #if MAGICKCORE_QUANTUM_DEPTH == 8 " %3ld (%3d,%3d,%3d)", #else " %5ld (%5d,%5d,%5d)", #endif (long) i,palette[i].red,palette[i].green,palette[i].blue); } ping_have_PLTE=MagickTrue; image_depth=ping_bit_depth; ping_num_trans=0; if (matte != MagickFalse) { /* Identify which colormap entry is transparent. */ assert(number_colors <= 256); assert(image->colormap != NULL); for (i=0; i < (ssize_t) number_transparent; i++) ping_trans_alpha[i]=0; ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else ping_have_tRNS=MagickTrue; } if (ping_exclude_bKGD == MagickFalse) { /* * Identify which colormap entry is the background color. */ for (i=0; i < (ssize_t) MagickMax(1L*number_colors-1L,1L); i++) if (IsPNGColorEqual(ping_background,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background_color index is %d", (int) ping_background.index); } } } /* end of write_png8 */ else if (mng_info->write_png_colortype == 1) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; } else if (mng_info->write_png24 || mng_info->write_png48 || mng_info->write_png_colortype == 3) { image_matte=MagickFalse; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; } else if (mng_info->write_png32 || mng_info->write_png64 || mng_info->write_png_colortype == 7) { image_matte=MagickTrue; ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; } else /* mng_info->write_pngNN not specified */ { image_depth=ping_bit_depth; if (mng_info->write_png_colortype != 0) { ping_color_type=(png_byte) mng_info->write_png_colortype-1; if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) image_matte=MagickTrue; else image_matte=MagickFalse; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG colortype %d was specified:",(int) ping_color_type); } else /* write_png_colortype not specified */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selecting PNG colortype:"); ping_color_type=(png_byte) ((matte != MagickFalse)? PNG_COLOR_TYPE_RGB_ALPHA:PNG_COLOR_TYPE_RGB); if (image_info->type == TrueColorType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } if (image_info->type == TrueColorMatteType) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB_ALPHA; image_matte=MagickTrue; } if (image_info->type == PaletteType || image_info->type == PaletteMatteType) ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (mng_info->write_png_colortype == 0 && image_info->type == UndefinedType) { if (ping_have_color == MagickFalse) { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_GRAY_ALPHA; image_matte=MagickTrue; } } else { if (image_matte == MagickFalse) { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGB; image_matte=MagickFalse; } else { ping_color_type=(png_byte) PNG_COLOR_TYPE_RGBA; image_matte=MagickTrue; } } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Selected PNG colortype=%d",ping_color_type); if (ping_bit_depth < 8) { if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA || ping_color_type == PNG_COLOR_TYPE_RGB || ping_color_type == PNG_COLOR_TYPE_RGB_ALPHA) ping_bit_depth=8; } old_bit_depth=ping_bit_depth; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->matte == MagickFalse && ping_have_non_bw == MagickFalse) ping_bit_depth=1; } if (ping_color_type == PNG_COLOR_TYPE_PALETTE) { size_t one = 1; ping_bit_depth=1; if (image->colors == 0) { /* DO SOMETHING */ png_error(ping,"image has 0 colors"); } while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) image_colors); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG bit depth: %d",ping_bit_depth); } if (ping_bit_depth < (int) mng_info->write_png_depth) ping_bit_depth = mng_info->write_png_depth; } image_depth=ping_bit_depth; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Tentative PNG color type: %s (%.20g)", PngColorTypeToString(ping_color_type), (double) ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_info->type: %.20g",(double) image_info->type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image_depth: %.20g",(double) image_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image->depth: %.20g",(double) image->depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_bit_depth: %.20g",(double) ping_bit_depth); } if (matte != MagickFalse) { if (mng_info->IsPalette) { if (mng_info->write_png_colortype == 0) { ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; if (ping_have_color != MagickFalse) ping_color_type=PNG_COLOR_TYPE_RGBA; } /* * Determine if there is any transparent color. */ if (number_transparent + number_semitransparent == 0) { /* No transparent pixels are present. Change 4 or 6 to 0 or 2. */ image_matte=MagickFalse; if (mng_info->write_png_colortype == 0) ping_color_type&=0x03; } else { unsigned int mask; mask=0xffff; if (ping_bit_depth == 8) mask=0x00ff; if (ping_bit_depth == 4) mask=0x000f; if (ping_bit_depth == 2) mask=0x0003; if (ping_bit_depth == 1) mask=0x0001; ping_trans_color.red=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].red) & mask); ping_trans_color.green=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].green) & mask); ping_trans_color.blue=(png_uint_16) (ScaleQuantumToShort(image->colormap[0].blue) & mask); ping_trans_color.gray=(png_uint_16) (ScaleQuantumToShort(ClampToQuantum(GetPixelLuma(image, image->colormap))) & mask); ping_trans_color.index=(png_byte) 0; ping_have_tRNS=MagickTrue; } if (ping_have_tRNS != MagickFalse) { /* * Determine if there is one and only one transparent color * and if so if it is fully transparent. */ if (ping_have_cheap_transparency == MagickFalse) ping_have_tRNS=MagickFalse; } if (ping_have_tRNS != MagickFalse) { if (mng_info->write_png_colortype == 0) ping_color_type &= 0x03; /* changes 4 or 6 to 0 or 2 */ if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } else { if (image_depth == 8) { ping_trans_color.red&=0xff; ping_trans_color.green&=0xff; ping_trans_color.blue&=0xff; ping_trans_color.gray&=0xff; } } } matte=image_matte; if (ping_have_tRNS != MagickFalse) image_matte=MagickFalse; if ((mng_info->IsPalette) && mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE && ping_have_color == MagickFalse && (image_matte == MagickFalse || image_depth >= 8)) { size_t one=1; if (image_matte != MagickFalse) ping_color_type=PNG_COLOR_TYPE_GRAY_ALPHA; else if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_GRAY_ALPHA) { ping_color_type=PNG_COLOR_TYPE_GRAY; if (save_image_depth == 16 && image_depth == 8) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (0)"); } ping_trans_color.gray*=0x0101; } } if (image_depth > MAGICKCORE_QUANTUM_DEPTH) image_depth=MAGICKCORE_QUANTUM_DEPTH; if ((image_colors == 0) || ((ssize_t) (image_colors-1) > (ssize_t) MaxColormapSize)) image_colors=(int) (one << image_depth); if (image_depth > 8) ping_bit_depth=16; else { ping_bit_depth=8; if ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { if(!mng_info->write_png_depth) { ping_bit_depth=1; while ((int) (one << ping_bit_depth) < (ssize_t) image_colors) ping_bit_depth <<= 1; } } else if (ping_color_type == PNG_COLOR_TYPE_GRAY && image_colors < 17 && mng_info->IsPalette) { /* Check if grayscale is reducible */ int depth_4_ok=MagickTrue, depth_2_ok=MagickTrue, depth_1_ok=MagickTrue; for (i=0; i < (ssize_t) image_colors; i++) { unsigned char intensity; intensity=ScaleQuantumToChar(image->colormap[i].red); if ((intensity & 0x0f) != ((intensity & 0xf0) >> 4)) depth_4_ok=depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x03) != ((intensity & 0x0c) >> 2)) depth_2_ok=depth_1_ok=MagickFalse; else if ((intensity & 0x01) != ((intensity & 0x02) >> 1)) depth_1_ok=MagickFalse; } if (depth_1_ok && mng_info->write_png_depth <= 1) ping_bit_depth=1; else if (depth_2_ok && mng_info->write_png_depth <= 2) ping_bit_depth=2; else if (depth_4_ok && mng_info->write_png_depth <= 4) ping_bit_depth=4; } } image_depth=ping_bit_depth; } else if (mng_info->IsPalette) { number_colors=image_colors; if (image_depth <= 8) { /* Set image palette. */ ping_color_type=(png_byte) PNG_COLOR_TYPE_PALETTE; if (!(mng_info->have_write_global_plte && matte == MagickFalse)) { for (i=0; i < (ssize_t) number_colors; i++) { palette[i].red=ScaleQuantumToChar(image->colormap[i].red); palette[i].green=ScaleQuantumToChar(image->colormap[i].green); palette[i].blue=ScaleQuantumToChar(image->colormap[i].blue); } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up PLTE chunk with %d colors", number_colors); ping_have_PLTE=MagickTrue; } /* color_type is PNG_COLOR_TYPE_PALETTE */ if (mng_info->write_png_depth == 0) { size_t one; ping_bit_depth=1; one=1; while ((one << ping_bit_depth) < (size_t) number_colors) ping_bit_depth <<= 1; } ping_num_trans=0; if (matte != MagickFalse) { /* * Set up trans_colors array. */ assert(number_colors <= 256); ping_num_trans=(unsigned short) (number_transparent + number_semitransparent); if (ping_num_trans == 0) ping_have_tRNS=MagickFalse; else { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color (1)"); } ping_have_tRNS=MagickTrue; for (i=0; i < ping_num_trans; i++) { ping_trans_alpha[i]= (png_byte) (255- ScaleQuantumToChar(image->colormap[i].opacity)); } } } } } else { if (image_depth < 8) image_depth=8; if ((save_image_depth == 16) && (image_depth == 8)) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color from (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } ping_trans_color.red*=0x0101; ping_trans_color.green*=0x0101; ping_trans_color.blue*=0x0101; ping_trans_color.gray*=0x0101; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to (%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } if (ping_bit_depth < (ssize_t) mng_info->write_png_depth) ping_bit_depth = (ssize_t) mng_info->write_png_depth; /* Adjust background and transparency samples in sub-8-bit grayscale files. */ if (ping_bit_depth < 8 && ping_color_type == PNG_COLOR_TYPE_GRAY) { png_uint_16 maxval; size_t one=1; maxval=(png_uint_16) ((one << ping_bit_depth)-1); if (ping_exclude_bKGD == MagickFalse) { ping_background.gray=(png_uint_16) ((maxval/65535.)*(ScaleQuantumToShort((Quantum) GetPixelLuma(image,&image->background_color)))+.5); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk (2)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.index is %d", (int) ping_background.index); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_background.gray is %d", (int) ping_background.gray); } ping_have_bKGD = MagickTrue; } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scaling ping_trans_color.gray from %d", (int)ping_trans_color.gray); ping_trans_color.gray=(png_uint_16) ((maxval/255.)*( ping_trans_color.gray)+.5); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " to %d", (int)ping_trans_color.gray); } if (ping_exclude_bKGD == MagickFalse) { if (mng_info->IsPalette && (int) ping_color_type == PNG_COLOR_TYPE_PALETTE) { /* Identify which colormap entry is the background color. */ number_colors=image_colors; for (i=0; i < (ssize_t) MagickMax(1L*number_colors,1L); i++) if (IsPNGColorEqual(image->background_color,image->colormap[i])) break; ping_background.index=(png_byte) i; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk with index=%d",(int) i); } if (i < (ssize_t) number_colors) { ping_have_bKGD = MagickTrue; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background =(%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); } } else /* Can't happen */ { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " No room in PLTE to add bKGD color"); ping_have_bKGD = MagickFalse; } } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color type: %s (%d)", PngColorTypeToString(ping_color_type), ping_color_type); /* Initialize compression level and filtering. */ if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up deflate compression"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression buffer size: 32768"); } png_set_compression_buffer_size(ping,32768L); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression mem level: 9"); png_set_compression_mem_level(ping, 9); /* Untangle the "-quality" setting: Undefined is 0; the default is used. Default is 75 10's digit: 0 or omitted: Use Z_HUFFMAN_ONLY strategy with the zlib default compression level 1-9: the zlib compression level 1's digit: 0-4: the PNG filter method 5: libpng adaptive filtering if compression level > 5 libpng filter type "none" if compression level <= 5 or if image is grayscale or palette 6: libpng adaptive filtering 7: "LOCO" filtering (intrapixel differing) if writing a MNG, otherwise "none". Did not work in IM-6.7.0-9 and earlier because of a missing "else". 8: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), adaptive filtering. Unused prior to IM-6.7.0-10, was same as 6 9: Z_RLE strategy (or Z_HUFFMAN_ONLY if quality < 10), no PNG filters Unused prior to IM-6.7.0-10, was same as 6 Note that using the -quality option, not all combinations of PNG filter type, zlib compression level, and zlib compression strategy are possible. This is addressed by using "-define png:compression-strategy", etc., which takes precedence over -quality. */ quality=image_info->quality == UndefinedCompressionQuality ? 75UL : image_info->quality; if (quality <= 9) { if (mng_info->write_png_compression_strategy == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; } else if (mng_info->write_png_compression_level == 0) { int level; level=(int) MagickMin((ssize_t) quality/10,9); mng_info->write_png_compression_level = level+1; } if (mng_info->write_png_compression_strategy == 0) { if ((quality %10) == 8 || (quality %10) == 9) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy=Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif } if (mng_info->write_png_compression_filter == 0) mng_info->write_png_compression_filter=((int) quality % 10) + 1; if (logging != MagickFalse) { if (mng_info->write_png_compression_level) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression level: %d", (int) mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_strategy) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Compression strategy: %d", (int) mng_info->write_png_compression_strategy-1); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up filtering"); if (mng_info->write_png_compression_filter == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: ADAPTIVE"); else if (mng_info->write_png_compression_filter == 0 || mng_info->write_png_compression_filter == 1) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: NONE"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Base filter method: %d", (int) mng_info->write_png_compression_filter-1); } if (mng_info->write_png_compression_level != 0) png_set_compression_level(ping,mng_info->write_png_compression_level-1); if (mng_info->write_png_compression_filter == 6) { if (((int) ping_color_type == PNG_COLOR_TYPE_GRAY) || ((int) ping_color_type == PNG_COLOR_TYPE_PALETTE) || (quality < 50)) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); } else if (mng_info->write_png_compression_filter == 7 || mng_info->write_png_compression_filter == 10) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_ALL_FILTERS); else if (mng_info->write_png_compression_filter == 8) { #if defined(PNG_MNG_FEATURES_SUPPORTED) && defined(PNG_INTRAPIXEL_DIFFERENCING) if (mng_info->write_mng) { if (((int) ping_color_type == PNG_COLOR_TYPE_RGB) || ((int) ping_color_type == PNG_COLOR_TYPE_RGBA)) ping_filter_method=PNG_INTRAPIXEL_DIFFERENCING; } #endif png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); } else if (mng_info->write_png_compression_filter == 9) png_set_filter(ping,PNG_FILTER_TYPE_BASE,PNG_NO_FILTERS); else if (mng_info->write_png_compression_filter != 0) png_set_filter(ping,PNG_FILTER_TYPE_BASE, mng_info->write_png_compression_filter-1); if (mng_info->write_png_compression_strategy != 0) png_set_compression_strategy(ping, mng_info->write_png_compression_strategy-1); ping_interlace_method=image_info->interlace != NoInterlace; if (mng_info->write_mng) png_set_sig_bytes(ping,8); /* Bail out if cannot meet defined png:bit-depth or png:color-type */ if (mng_info->write_png_colortype != 0) { if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY) if (ping_have_color != MagickFalse) { ping_color_type = PNG_COLOR_TYPE_RGB; if (ping_bit_depth < 8) ping_bit_depth=8; } if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_GRAY_ALPHA) if (ping_have_color != MagickFalse) ping_color_type = PNG_COLOR_TYPE_RGB_ALPHA; } if (ping_need_colortype_warning != MagickFalse || ((mng_info->write_png_depth && (int) mng_info->write_png_depth != ping_bit_depth) || (mng_info->write_png_colortype && ((int) mng_info->write_png_colortype-1 != ping_color_type && mng_info->write_png_colortype != 7 && !(mng_info->write_png_colortype == 5 && ping_color_type == 0))))) { if (logging != MagickFalse) { if (ping_need_colortype_warning != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image has transparency but tRNS chunk was excluded"); } if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth=%u, Computed depth=%u", mng_info->write_png_depth, ping_bit_depth); } if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type=%u, Computed color type=%u", mng_info->write_png_colortype-1, ping_color_type); } } png_warning(ping, "Cannot write image with defined png:bit-depth or png:color-type."); } if (image_matte != MagickFalse && image->matte == MagickFalse) { /* Add an opaque matte channel */ image->matte = MagickTrue; (void) SetImageOpacity(image,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Added an opaque matte channel"); } if (number_transparent != 0 || number_semitransparent != 0) { if (ping_color_type < 4) { ping_have_tRNS=MagickTrue; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting ping_have_tRNS=MagickTrue."); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG header chunks"); png_set_IHDR(ping,ping_info,ping_width,ping_height, ping_bit_depth,ping_color_type, ping_interlace_method,ping_compression_method, ping_filter_method); if (ping_color_type == 3 && ping_have_PLTE != MagickFalse) { if (mng_info->have_write_global_plte && matte == MagickFalse) { png_set_PLTE(ping,ping_info,NULL,0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up empty PLTE chunk"); } else png_set_PLTE(ping,ping_info,palette,number_colors); if (logging != MagickFalse) { for (i=0; i< (ssize_t) number_colors; i++) { if (i < ping_num_trans) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d), tRNS[%d] = (%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue, (int) i, (int) ping_trans_alpha[i]); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PLTE[%d] = (%d,%d,%d)", (int) i, (int) palette[i].red, (int) palette[i].green, (int) palette[i].blue); } } } /* Only write the iCCP chunk if we are not writing the sRGB chunk. */ if (ping_exclude_sRGB != MagickFalse || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if ((ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) && (ping_exclude_iCCP == MagickFalse || ping_exclude_zCCP == MagickFalse)) { ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { #ifdef PNG_WRITE_iCCP_SUPPORTED if ((LocaleCompare(name,"ICC") == 0) || (LocaleCompare(name,"ICM") == 0)) { if (ping_exclude_iCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up iCCP chunk"); png_set_iCCP(ping,ping_info,(const png_charp) name,0, #if (PNG_LIBPNG_VER < 10500) (png_charp) GetStringInfoDatum(profile), #else (const png_byte *) GetStringInfoDatum(profile), #endif (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } else #endif if (ping_exclude_zCCP == MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up zTXT chunk with uuencoded ICC"); Magick_png_write_raw_profile(image_info,ping,ping_info, (unsigned char *) name,(unsigned char *) name, GetStringInfoDatum(profile), (png_uint_32) GetStringInfoLength(profile)); ping_have_iCCP = MagickTrue; } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk with %s profile",name); name=GetNextImageProfile(image); } } } #if defined(PNG_WRITE_sRGB_SUPPORTED) if ((mng_info->have_write_global_srgb == 0) && ping_have_iCCP != MagickTrue && (ping_have_sRGB != MagickFalse || png_get_valid(ping,ping_info,PNG_INFO_sRGB))) { if (ping_exclude_sRGB == MagickFalse) { /* Note image rendering intent. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up sRGB chunk"); (void) png_set_sRGB(ping,ping_info,( Magick_RenderingIntent_to_PNG_RenderingIntent( image->rendering_intent))); ping_have_sRGB = MagickTrue; } } if ((!mng_info->write_mng) || (!png_get_valid(ping,ping_info,PNG_INFO_sRGB))) #endif { if (ping_exclude_gAMA == MagickFalse && ping_have_iCCP == MagickFalse && ping_have_sRGB == MagickFalse && (ping_exclude_sRGB == MagickFalse || (image->gamma < .45 || image->gamma > .46))) { if ((mng_info->have_write_global_gama == 0) && (image->gamma != 0.0)) { /* Note image gamma. To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up gAMA chunk"); png_set_gAMA(ping,ping_info,image->gamma); } } if (ping_exclude_cHRM == MagickFalse && ping_have_sRGB == MagickFalse) { if ((mng_info->have_write_global_chrm == 0) && (image->chromaticity.red_primary.x != 0.0)) { /* Note image chromaticity. Note: if cHRM+gAMA == sRGB write sRGB instead. */ PrimaryInfo bp, gp, rp, wp; wp=image->chromaticity.white_point; rp=image->chromaticity.red_primary; gp=image->chromaticity.green_primary; bp=image->chromaticity.blue_primary; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up cHRM chunk"); png_set_cHRM(ping,ping_info,wp.x,wp.y,rp.x,rp.y,gp.x,gp.y, bp.x,bp.y); } } } if (ping_exclude_bKGD == MagickFalse) { if (ping_have_bKGD != MagickFalse) { png_set_bKGD(ping,ping_info,&ping_background); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up bKGD chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " background color = (%d,%d,%d)", (int) ping_background.red, (int) ping_background.green, (int) ping_background.blue); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " index = %d, gray=%d", (int) ping_background.index, (int) ping_background.gray); } } } if (ping_exclude_pHYs == MagickFalse) { if (ping_have_pHYs != MagickFalse) { png_set_pHYs(ping,ping_info, ping_pHYs_x_resolution, ping_pHYs_y_resolution, ping_pHYs_unit_type); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up pHYs chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " x_resolution=%lu", (unsigned long) ping_pHYs_x_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " y_resolution=%lu", (unsigned long) ping_pHYs_y_resolution); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " unit_type=%lu", (unsigned long) ping_pHYs_unit_type); } } } #if defined(PNG_tIME_SUPPORTED) if (ping_exclude_tIME == MagickFalse) { const char *timestamp; if (image->taint == MagickFalse) { timestamp=GetImageOption(image_info,"png:tIME"); if (timestamp == (const char *) NULL) timestamp=GetImageProperty(image,"png:tIME"); } else { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Reset tIME in tainted image"); timestamp=GetImageProperty(image,"date:modify"); } if (timestamp != (const char *) NULL) write_tIME_chunk(image,ping,ping_info,timestamp); } #endif if (mng_info->need_blob != MagickFalse) { if (OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception) == MagickFalse) png_error(ping,"WriteBlob Failed"); ping_have_blob=MagickTrue; (void) ping_have_blob; } png_write_info_before_PLTE(ping, ping_info); if (ping_have_tRNS != MagickFalse && ping_color_type < 4) { if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Calling png_set_tRNS with num_trans=%d",ping_num_trans); } if (ping_color_type == 3) (void) png_set_tRNS(ping, ping_info, ping_trans_alpha, ping_num_trans, NULL); else { (void) png_set_tRNS(ping, ping_info, NULL, 0, &ping_trans_color); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS color =(%d,%d,%d)", (int) ping_trans_color.red, (int) ping_trans_color.green, (int) ping_trans_color.blue); } } } /* write any png-chunk-b profiles */ (void) Magick_png_write_chunk_from_profile(image,"PNG-chunk-b",logging); png_write_info(ping,ping_info); /* write any PNG-chunk-m profiles */ (void) Magick_png_write_chunk_from_profile(image,"PNG-chunk-m",logging); ping_wrote_caNv = MagickFalse; /* write caNv chunk */ if (ping_exclude_caNv == MagickFalse) { if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows) || image->page.x != 0 || image->page.y != 0) { unsigned char chunk[20]; (void) WriteBlobMSBULong(image,16L); /* data length=8 */ PNGType(chunk,mng_caNv); LogPNGChunk(logging,mng_caNv,16L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); PNGsLong(chunk+12,(png_int_32) image->page.x); PNGsLong(chunk+16,(png_int_32) image->page.y); (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); ping_wrote_caNv = MagickTrue; } } #if defined(PNG_oFFs_SUPPORTED) if (ping_exclude_oFFs == MagickFalse && ping_wrote_caNv == MagickFalse) { if (image->page.x || image->page.y) { png_set_oFFs(ping,ping_info,(png_int_32) image->page.x, (png_int_32) image->page.y, 0); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up oFFs chunk with x=%d, y=%d, units=0", (int) image->page.x, (int) image->page.y); } } #endif /* write vpAg chunk (deprecated, replaced by caNv) */ if (ping_exclude_vpAg == MagickFalse && ping_wrote_caNv == MagickFalse) { if ((image->page.width != 0 && image->page.width != image->columns) || (image->page.height != 0 && image->page.height != image->rows)) { unsigned char chunk[14]; (void) WriteBlobMSBULong(image,9L); /* data length=8 */ PNGType(chunk,mng_vpAg); LogPNGChunk(logging,mng_vpAg,9L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); chunk[12]=0; /* unit = pixels */ (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } } #if (PNG_LIBPNG_VER == 10206) /* avoid libpng-1.2.6 bug by setting PNG_HAVE_IDAT flag */ #define PNG_HAVE_IDAT 0x04 ping->mode |= PNG_HAVE_IDAT; #undef PNG_HAVE_IDAT #endif png_set_packing(ping); /* Allocate memory. */ rowbytes=image->columns; if (image_depth > 8) rowbytes*=2; switch (ping_color_type) { case PNG_COLOR_TYPE_RGB: rowbytes*=3; break; case PNG_COLOR_TYPE_GRAY_ALPHA: rowbytes*=2; break; case PNG_COLOR_TYPE_RGBA: rowbytes*=4; break; default: break; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Allocating %.20g bytes of memory for pixels",(double) rowbytes); } pixel_info=AcquireVirtualMemory(rowbytes,sizeof(*ping_pixels)); if (pixel_info == (MemoryInfo *) NULL) png_error(ping,"Allocation of memory for pixels failed"); ping_pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); /* Initialize image scanlines. */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) png_error(ping,"Memory allocation for quantum_info failed"); quantum_info->format=UndefinedQuantumFormat; SetQuantumDepth(image,quantum_info,image_depth); (void) SetQuantumEndian(image,quantum_info,MSBEndian); num_passes=png_set_interlace_handling(ping); if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (mng_info->IsPalette || (image_info->type == BilevelType)) && image_matte == MagickFalse && ping_have_non_bw == MagickFalse) { /* Palette, Bilevel, or Opaque Monochrome */ register const PixelPacket *p; SetQuantumDepth(image,quantum_info,8); for (pass=0; pass < num_passes; pass++) { /* Convert PseudoClass image to a PNG monochrome image. */ for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (0)"); p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (mng_info->IsPalette) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); if (mng_info->write_png_colortype-1 == PNG_COLOR_TYPE_PALETTE && mng_info->write_png_depth && mng_info->write_png_depth != old_bit_depth) { /* Undo pixel scaling */ for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) (*(ping_pixels+i) >> (8-old_bit_depth)); } } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); } if (mng_info->write_png_colortype-1 != PNG_COLOR_TYPE_PALETTE) for (i=0; i < (ssize_t) image->columns; i++) *(ping_pixels+i)=(unsigned char) ((*(ping_pixels+i) > 127) ? 255 : 0); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (1)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else /* Not Palette, Bilevel, or Opaque Monochrome */ { if ((!mng_info->write_png8 && !mng_info->write_png24 && !mng_info->write_png48 && !mng_info->write_png64 && !mng_info->write_png32) && (image_matte != MagickFalse || (ping_bit_depth >= MAGICKCORE_QUANTUM_DEPTH)) && (mng_info->IsPalette) && ping_have_color == MagickFalse) { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (mng_info->IsPalette) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY PNG pixels (2)"); } else /* PNG_COLOR_TYPE_GRAY_ALPHA */ { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (2)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels,&image->exception); } if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (2)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } else { register const PixelPacket *p; for (pass=0; pass < num_passes; pass++) { if ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) { for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { if (image->storage_class == DirectClass) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RedQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (3)"); } else if (image_matte != MagickFalse) (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBAQuantum,ping_pixels,&image->exception); else (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,RGBQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of pixels (3)"); png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } else /* not ((image_depth > 8) || mng_info->write_png24 || mng_info->write_png32 || mng_info->write_png48 || mng_info->write_png64 || (!mng_info->write_png8 && !mng_info->IsPalette)) */ { if ((ping_color_type != PNG_COLOR_TYPE_GRAY) && (ping_color_type != PNG_COLOR_TYPE_GRAY_ALPHA)) { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is not GRAY or GRAY_ALPHA",pass); SetQuantumDepth(image,quantum_info,8); image_depth=8; } for (y=0; y < (ssize_t) image->rows; y++) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pass %d, Image Is RGB, 16-bit GRAY, or GRAY_ALPHA",pass); p=GetVirtualPixels(image,0,y,image->columns,1, &image->exception); if (p == (const PixelPacket *) NULL) break; if (ping_color_type == PNG_COLOR_TYPE_GRAY) { SetQuantumDepth(image,quantum_info,image->depth); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayQuantum,ping_pixels,&image->exception); } else if (ping_color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { if (logging != MagickFalse && y == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing GRAY_ALPHA PNG pixels (4)"); (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,GrayAlphaQuantum,ping_pixels, &image->exception); } else { (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info,IndexQuantum,ping_pixels,&image->exception); if (logging != MagickFalse && y <= 2) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing row of non-gray pixels (4)"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " ping_pixels[0]=%d,ping_pixels[1]=%d", (int)ping_pixels[0],(int)ping_pixels[1]); } } png_write_row(ping,ping_pixels); status=SetImageProgress(image,SaveImageTag, (MagickOffsetType) (pass * image->rows + y), num_passes * image->rows); if (status == MagickFalse) break; } } } } } if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Wrote PNG image data"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Width: %.20g",(double) ping_width); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Height: %.20g",(double) ping_height); if (mng_info->write_png_depth) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:bit-depth: %d",mng_info->write_png_depth); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG bit-depth written: %d",ping_bit_depth); if (mng_info->write_png_colortype) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Defined png:color-type: %d",mng_info->write_png_colortype-1); } (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG color-type written: %d",ping_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " PNG Interlace method: %d",ping_interlace_method); } /* Generate text chunks after IDAT. */ if (ping_exclude_tEXt == MagickFalse || ping_exclude_zTXt == MagickFalse) { ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { png_textp text; value=GetImageProperty(image,property); /* Don't write any "png:" or "jpeg:" properties; those are just for * "identify" or for passing through to another JPEG */ if ((LocaleNCompare(property,"png:",4) != 0 && LocaleNCompare(property,"jpeg:",5) != 0) && /* Suppress density and units if we wrote a pHYs chunk */ (ping_exclude_pHYs != MagickFalse || LocaleCompare(property,"density") != 0 || LocaleCompare(property,"units") != 0) && /* Suppress the IM-generated Date:create and Date:modify */ (ping_exclude_date == MagickFalse || LocaleNCompare(property, "Date:",5) != 0)) { if (value != (const char *) NULL) { #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping, (png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif text[0].key=(char *) property; text[0].text=(char *) value; text[0].text_length=strlen(value); if (ping_exclude_tEXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_zTXt; else if (ping_exclude_zTXt != MagickFalse) text[0].compression=PNG_TEXT_COMPRESSION_NONE; else { text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? PNG_TEXT_COMPRESSION_NONE : PNG_TEXT_COMPRESSION_zTXt ; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Setting up text chunk"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " keyword: '%s'",text[0].key); } png_set_text(ping,ping_info,text,1); png_free(ping,text); } } property=GetNextImageProperty(image); } } /* write any PNG-chunk-e profiles */ (void) Magick_png_write_chunk_from_profile(image,"PNG-chunk-e",logging); /* write exIf profile */ if (ping_have_eXIf != MagickFalse && ping_exclude_eXIf == MagickFalse) { char *name; ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { if (LocaleCompare(name,"exif") == 0) { const StringInfo *profile; profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { png_uint_32 length; unsigned char chunk[4], *data; StringInfo *ping_profile; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Have eXIf profile"); ping_profile=CloneStringInfo(profile); data=GetStringInfoDatum(ping_profile), length=(png_uint_32) GetStringInfoLength(ping_profile); #if 0 /* eXIf chunk is registered */ PNGType(chunk,mng_eXIf); #else /* eXIf chunk not yet registered; write exIf instead */ PNGType(chunk,mng_exIf); #endif if (length < 7) break; /* othewise crashes */ /* skip the "Exif\0\0" JFIF Exif Header ID */ length -= 6; LogPNGChunk(logging,chunk,length); (void) WriteBlobMSBULong(image,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,data+6); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4), data+6, (uInt) length)); break; } } name=GetNextImageProfile(image); } } if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG end info"); png_write_end(ping,ping_info); if (mng_info->need_fram && (int) image->dispose == BackgroundDispose) { if (mng_info->page.x || mng_info->page.y || (ping_width != mng_info->page.width) || (ping_height != mng_info->page.height)) { unsigned char chunk[32]; /* Write FRAM 4 with clipping boundaries followed by FRAM 1. */ (void) WriteBlobMSBULong(image,27L); /* data length=27 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,27L); chunk[4]=4; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=1; /* flag for changing delay, for next frame only */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=1; /* flag for changing frame clipping for next frame */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) (0L)); /* temporary 0 delay */ chunk[14]=0; /* clipping boundaries delta type */ PNGLong(chunk+15,(png_uint_32) (mng_info->page.x)); /* left cb */ PNGLong(chunk+19, (png_uint_32) (mng_info->page.x + ping_width)); PNGLong(chunk+23,(png_uint_32) (mng_info->page.y)); /* top cb */ PNGLong(chunk+27, (png_uint_32) (mng_info->page.y + ping_height)); (void) WriteBlob(image,31,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,31)); mng_info->old_framing_mode=4; mng_info->framing_mode=1; } else mng_info->framing_mode=3; } if (mng_info->write_mng && !mng_info->need_fram && ((int) image->dispose == 3)) png_error(ping, "Cannot convert GIF with disposal method 3 to MNG-LC"); /* Free PNG resources. */ png_destroy_write_struct(&ping,&ping_info); pixel_info=RelinquishVirtualMemory(pixel_info); /* Store bit depth actually written */ s[0]=(char) ping_bit_depth; s[1]='\0'; (void) SetImageProperty(image,"png:bit-depth-written",s); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOnePNGImage()"); #ifdef IMPNG_SETJMP_NOT_THREAD_SAFE UnlockSemaphoreInfo(ping_semaphore); #endif /* } for navigation to beginning of SETJMP-protected block. Revert to * Throwing an Exception when an error occurs. */ return(MagickTrue); /* End write one PNG image */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePNGImage() writes a Portable Network Graphics (PNG) or % Multiple-image Network Graphics (MNG) image file. % % MNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WritePNGImage method is: % % MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % % Returns MagickTrue on success, MagickFalse on failure. % % Communicating with the PNG encoder: % % While the datastream written is always in PNG format and normally would % be given the "png" file extension, this method also writes the following % pseudo-formats which are subsets of png: % % o PNG8: An 8-bit indexed PNG datastream is written. If the image has % a depth greater than 8, the depth is reduced. If transparency % is present, the tRNS chunk must only have values 0 and 255 % (i.e., transparency is binary: fully opaque or fully % transparent). If other values are present they will be % 50%-thresholded to binary transparency. If more than 256 % colors are present, they will be quantized to the 4-4-4-1, % 3-3-3-1, or 3-3-2-1 palette. The underlying RGB color % of any resulting fully-transparent pixels is changed to % the image's background color. % % If you want better quantization or dithering of the colors % or alpha than that, you need to do it before calling the % PNG encoder. The pixels contain 8-bit indices even if % they could be represented with 1, 2, or 4 bits. Grayscale % images will be written as indexed PNG files even though the % PNG grayscale type might be slightly more efficient. Please % note that writing to the PNG8 format may result in loss % of color and alpha data. % % o PNG24: An 8-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. The only loss incurred % is reduction of sample depth to 8. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG32: An 8-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 255. The alpha % channel is present even if the image is fully opaque. % The only loss in data is the reduction of the sample depth % to 8. % % o PNG48: A 16-bit per sample RGB PNG datastream is written. The tRNS % chunk can be present to convey binary transparency by naming % one of the colors as transparent. If the image has more % than one transparent color, has semitransparent pixels, or % has an opaque pixel with the same RGB components as the % transparent color, an image is not written. % % o PNG64: A 16-bit per sample RGBA PNG is written. Partial % transparency is permitted, i.e., the alpha sample for % each pixel can have any value from 0 to 65535. The alpha % channel is present even if the image is fully opaque. % % o PNG00: A PNG that inherits its colortype and bit-depth from the input % image, if the input was a PNG, is written. If these values % cannot be found, or if the pixels have been changed in a way % that makes this impossible, then "PNG00" falls back to the % regular "PNG" format. % % o -define: For more precise control of the PNG output, you can use the % Image options "png:bit-depth" and "png:color-type". These % can be set from the commandline with "-define" and also % from the application programming interfaces. The options % are case-independent and are converted to lowercase before % being passed to this encoder. % % png:color-type can be 0, 2, 3, 4, or 6. % % When png:color-type is 0 (Grayscale), png:bit-depth can % be 1, 2, 4, 8, or 16. % % When png:color-type is 2 (RGB), png:bit-depth can % be 8 or 16. % % When png:color-type is 3 (Indexed), png:bit-depth can % be 1, 2, 4, or 8. This refers to the number of bits % used to store the index. The color samples always have % bit-depth 8 in indexed PNG files. % % When png:color-type is 4 (Gray-Matte) or 6 (RGB-Matte), % png:bit-depth can be 8 or 16. % % If the image cannot be written without loss with the % requested bit-depth and color-type, a PNG file will not % be written, a warning will be issued, and the encoder will % return MagickFalse. % % Since image encoders should not be responsible for the "heavy lifting", % the user should make sure that ImageMagick has already reduced the % image depth and number of colors and limit transparency to binary % transparency prior to attempting to write the image with depth, color, % or transparency limitations. % % To do: Enforce the previous paragraph. % % Note that another definition, "png:bit-depth-written" exists, but it % is not intended for external use. It is only used internally by the % PNG encoder to inform the JNG encoder of the depth of the alpha channel. % % It is possible to request that the PNG encoder write previously-formatted % ancillary chunks in the output PNG file, using the "-profile" commandline % option as shown below or by setting the profile via a programming % interface: % % -profile PNG-chunk-x:<file> % % where x is a location flag and <file> is a file containing the chunk % name in the first 4 bytes, then a colon (":"), followed by the chunk data. % This encoder will compute the chunk length and CRC, so those must not % be included in the file. % % "x" can be "b" (before PLTE), "m" (middle, i.e., between PLTE and IDAT), % or "e" (end, i.e., after IDAT). If you want to write multiple chunks % of the same type, then add a short unique string after the "x" to prevent % subsequent profiles from overwriting the preceding ones, e.g., % % -profile PNG-chunk-b01:file01 -profile PNG-chunk-b02:file02 % % As of version 6.6.6 the following optimizations are always done: % % o 32-bit depth is reduced to 16. % o 16-bit depth is reduced to 8 if all pixels contain samples whose % high byte and low byte are identical. % o Palette is sorted to remove unused entries and to put a % transparent color first, if BUILD_PNG_PALETTE is defined. % o Opaque matte channel is removed when writing an indexed PNG. % o Grayscale images are reduced to 1, 2, or 4 bit depth if % this can be done without loss and a larger bit depth N was not % requested via the "-define png:bit-depth=N" option. % o If matte channel is present but only one transparent color is % present, RGB+tRNS is written instead of RGBA % o Opaque matte channel is removed (or added, if color-type 4 or 6 % was requested when converting an opaque image). % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType excluding, logging, status; MngInfo *mng_info; const char *value; int source; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WritePNGImage()"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; mng_info->equal_backgrounds=MagickTrue; /* See if user has requested a specific PNG subformat */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; mng_info->write_png48=LocaleCompare(image_info->magick,"PNG48") == 0; mng_info->write_png64=LocaleCompare(image_info->magick,"PNG64") == 0; value=GetImageOption(image_info,"png:format"); if (value != (char *) NULL || LocaleCompare(image_info->magick,"PNG00") == 0) { mng_info->write_png8 = MagickFalse; mng_info->write_png24 = MagickFalse; mng_info->write_png32 = MagickFalse; mng_info->write_png48 = MagickFalse; mng_info->write_png64 = MagickFalse; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Format=%s",value); if (LocaleCompare(value,"png8") == 0) mng_info->write_png8 = MagickTrue; else if (LocaleCompare(value,"png24") == 0) mng_info->write_png24 = MagickTrue; else if (LocaleCompare(value,"png32") == 0) mng_info->write_png32 = MagickTrue; else if (LocaleCompare(value,"png48") == 0) mng_info->write_png48 = MagickTrue; else if (LocaleCompare(value,"png64") == 0) mng_info->write_png64 = MagickTrue; else if ((LocaleCompare(value,"png00") == 0) || LocaleCompare(image_info->magick,"PNG00") == 0) { /* Retrieve png:IHDR.bit-depth-orig and png:IHDR.color-type-orig */ value=GetImageProperty(image,"png:IHDR.bit-depth-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited bit depth=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; } value=GetImageProperty(image,"png:IHDR.color-type-orig"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png00 inherited color type=%s",value); if (value != (char *) NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; } } } if (mng_info->write_png8) { mng_info->write_png_colortype = /* 3 */ 4; mng_info->write_png_depth = 8; image->depth = 8; } if (mng_info->write_png24) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 8; image->depth = 8; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png32) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 8; image->depth = 8; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } if (mng_info->write_png48) { mng_info->write_png_colortype = /* 2 */ 3; mng_info->write_png_depth = 16; image->depth = 16; if (image->matte != MagickFalse) (void) SetImageType(image,TrueColorMatteType); else (void) SetImageType(image,TrueColorType); (void) SyncImage(image); } if (mng_info->write_png64) { mng_info->write_png_colortype = /* 6 */ 7; mng_info->write_png_depth = 16; image->depth = 16; image->matte = MagickTrue; (void) SetImageType(image,TrueColorMatteType); (void) SyncImage(image); } value=GetImageOption(image_info,"png:bit-depth"); if (value != (char *) NULL) { if (LocaleCompare(value,"1") == 0) mng_info->write_png_depth = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_depth = 2; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_depth = 4; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_depth = 8; else if (LocaleCompare(value,"16") == 0) mng_info->write_png_depth = 16; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:bit-depth", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:bit-depth=%d was defined.\n",mng_info->write_png_depth); } value=GetImageOption(image_info,"png:color-type"); if (value != (char *) NULL) { /* We must store colortype+1 because 0 is a valid colortype */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_colortype = 1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_colortype = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_colortype = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_colortype = 5; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_colortype = 7; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:color-type", "=%s",value); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:color-type=%d was defined.\n",mng_info->write_png_colortype-1); } /* Check for chunks to be excluded: * * The default is to not exclude any known chunks except for any * listed in the "unused_chunks" array, above. * * Chunks can be listed for exclusion via a "png:exclude-chunk" * define (in the image properties or in the image artifacts) * or via a mng_info member. For convenience, in addition * to or instead of a comma-separated list of chunks, the * "exclude-chunk" string can be simply "all" or "none". * * The exclude-chunk define takes priority over the mng_info. * * A "png:include-chunk" define takes priority over both the * mng_info and the "png:exclude-chunk" define. Like the * "exclude-chunk" string, it can define "all" or "none" as * well as a comma-separated list. Chunks that are unknown to * ImageMagick are always excluded, regardless of their "copy-safe" * status according to the PNG specification, and even if they * appear in the "include-chunk" list. Such defines appearing among * the image options take priority over those found among the image * artifacts. * * Finally, all chunks listed in the "unused_chunks" array are * automatically excluded, regardless of the other instructions * or lack thereof. * * if you exclude sRGB but not gAMA (recommended), then sRGB chunk * will not be written and the gAMA chunk will only be written if it * is not between .45 and .46, or approximately (1.0/2.2). * * If you exclude tRNS and the image has transparency, the colortype * is forced to be 4 or 6 (GRAY_ALPHA or RGB_ALPHA). * * The -strip option causes StripImage() to set the png:include-chunk * artifact to "none,trns,gama". */ mng_info->ping_exclude_bKGD=MagickFalse; mng_info->ping_exclude_caNv=MagickFalse; mng_info->ping_exclude_cHRM=MagickFalse; mng_info->ping_exclude_date=MagickFalse; mng_info->ping_exclude_eXIf=MagickFalse; mng_info->ping_exclude_EXIF=MagickFalse; /* hex-encoded EXIF in zTXt */ mng_info->ping_exclude_gAMA=MagickFalse; mng_info->ping_exclude_iCCP=MagickFalse; /* mng_info->ping_exclude_iTXt=MagickFalse; */ mng_info->ping_exclude_oFFs=MagickFalse; mng_info->ping_exclude_pHYs=MagickFalse; mng_info->ping_exclude_sRGB=MagickFalse; mng_info->ping_exclude_tEXt=MagickFalse; mng_info->ping_exclude_tIME=MagickFalse; mng_info->ping_exclude_tRNS=MagickFalse; mng_info->ping_exclude_vpAg=MagickFalse; mng_info->ping_exclude_zCCP=MagickFalse; /* hex-encoded iCCP in zTXt */ mng_info->ping_exclude_zTXt=MagickFalse; mng_info->ping_preserve_colormap=MagickFalse; value=GetImageOption(image_info,"png:preserve-colormap"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-colormap"); if (value != NULL) mng_info->ping_preserve_colormap=MagickTrue; mng_info->ping_preserve_iCCP=MagickFalse; value=GetImageOption(image_info,"png:preserve-iCCP"); if (value == NULL) value=GetImageArtifact(image,"png:preserve-iCCP"); if (value != NULL) mng_info->ping_preserve_iCCP=MagickTrue; /* These compression-level, compression-strategy, and compression-filter * defines take precedence over values from the -quality option. */ value=GetImageOption(image_info,"png:compression-level"); if (value == NULL) value=GetImageArtifact(image,"png:compression-level"); if (value != NULL) { /* To do: use a "LocaleInteger:()" function here. */ /* We have to add 1 to everything because 0 is a valid input, * and we want to use 0 (the default) to mean undefined. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_level = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_level = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_level = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_level = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_level = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_level = 6; else if (LocaleCompare(value,"6") == 0) mng_info->write_png_compression_level = 7; else if (LocaleCompare(value,"7") == 0) mng_info->write_png_compression_level = 8; else if (LocaleCompare(value,"8") == 0) mng_info->write_png_compression_level = 9; else if (LocaleCompare(value,"9") == 0) mng_info->write_png_compression_level = 10; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-level", "=%s",value); } value=GetImageOption(image_info,"png:compression-strategy"); if (value == NULL) value=GetImageArtifact(image,"png:compression-strategy"); if (value != NULL) { if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_strategy = Z_FILTERED+1; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_strategy = Z_HUFFMAN_ONLY+1; else if (LocaleCompare(value,"3") == 0) #ifdef Z_RLE /* Z_RLE was added to zlib-1.2.0 */ mng_info->write_png_compression_strategy = Z_RLE+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else if (LocaleCompare(value,"4") == 0) #ifdef Z_FIXED /* Z_FIXED was added to zlib-1.2.2.2 */ mng_info->write_png_compression_strategy = Z_FIXED+1; #else mng_info->write_png_compression_strategy = Z_DEFAULT_STRATEGY+1; #endif else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-strategy", "=%s",value); } value=GetImageOption(image_info,"png:compression-filter"); if (value == NULL) value=GetImageArtifact(image,"png:compression-filter"); if (value != NULL) { /* To do: combinations of filters allowed by libpng * masks 0x08 through 0xf8 * * Implement this as a comma-separated list of 0,1,2,3,4,5 * where 5 is a special case meaning PNG_ALL_FILTERS. */ if (LocaleCompare(value,"0") == 0) mng_info->write_png_compression_filter = 1; else if (LocaleCompare(value,"1") == 0) mng_info->write_png_compression_filter = 2; else if (LocaleCompare(value,"2") == 0) mng_info->write_png_compression_filter = 3; else if (LocaleCompare(value,"3") == 0) mng_info->write_png_compression_filter = 4; else if (LocaleCompare(value,"4") == 0) mng_info->write_png_compression_filter = 5; else if (LocaleCompare(value,"5") == 0) mng_info->write_png_compression_filter = 6; else (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "ignoring invalid defined png:compression-filter", "=%s",value); } for (source=0; source<8; source++) { value = NULL; if (source == 0) value=GetImageOption(image_info,"png:exclude-chunks"); if (source == 1) value=GetImageArtifact(image,"png:exclude-chunks"); if (source == 2) value=GetImageOption(image_info,"png:exclude-chunk"); if (source == 3) value=GetImageArtifact(image,"png:exclude-chunk"); if (source == 4) value=GetImageOption(image_info,"png:include-chunks"); if (source == 5) value=GetImageArtifact(image,"png:include-chunks"); if (source == 6) value=GetImageOption(image_info,"png:include-chunk"); if (source == 7) value=GetImageArtifact(image,"png:include-chunk"); if (value == NULL) continue; if (source < 4) excluding = MagickTrue; else excluding = MagickFalse; if (logging != MagickFalse) { if (source == 0 || source == 2) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image options.\n", value); else if (source == 1 || source == 3) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:exclude-chunk=%s found in image artifacts.\n", value); else if (source == 4 || source == 6) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image options.\n", value); else /* if (source == 5 || source == 7) */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " png:include-chunk=%s found in image artifacts.\n", value); } if (IsOptionMember("all",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding; mng_info->ping_exclude_caNv=excluding; mng_info->ping_exclude_cHRM=excluding; mng_info->ping_exclude_date=excluding; mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; mng_info->ping_exclude_gAMA=excluding; mng_info->ping_exclude_iCCP=excluding; /* mng_info->ping_exclude_iTXt=excluding; */ mng_info->ping_exclude_oFFs=excluding; mng_info->ping_exclude_pHYs=excluding; mng_info->ping_exclude_sRGB=excluding; mng_info->ping_exclude_tIME=excluding; mng_info->ping_exclude_tEXt=excluding; mng_info->ping_exclude_tRNS=excluding; mng_info->ping_exclude_vpAg=excluding; mng_info->ping_exclude_zCCP=excluding; mng_info->ping_exclude_zTXt=excluding; } if (IsOptionMember("none",value) != MagickFalse) { mng_info->ping_exclude_bKGD=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_caNv=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_cHRM=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_date=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_eXIf=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_EXIF=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_gAMA=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_iCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; /* mng_info->ping_exclude_iTXt=!excluding; */ mng_info->ping_exclude_oFFs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_pHYs=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_sRGB=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tEXt=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tIME=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_tRNS=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_vpAg=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zCCP=excluding != MagickFalse ? MagickFalse : MagickTrue; mng_info->ping_exclude_zTXt=excluding != MagickFalse ? MagickFalse : MagickTrue; } if (IsOptionMember("bkgd",value) != MagickFalse) mng_info->ping_exclude_bKGD=excluding; if (IsOptionMember("caNv",value) != MagickFalse) mng_info->ping_exclude_caNv=excluding; if (IsOptionMember("chrm",value) != MagickFalse) mng_info->ping_exclude_cHRM=excluding; if (IsOptionMember("date",value) != MagickFalse) mng_info->ping_exclude_date=excluding; if (IsOptionMember("exif",value) != MagickFalse) { mng_info->ping_exclude_EXIF=excluding; mng_info->ping_exclude_eXIf=excluding; } if (IsOptionMember("gama",value) != MagickFalse) mng_info->ping_exclude_gAMA=excluding; if (IsOptionMember("iccp",value) != MagickFalse) mng_info->ping_exclude_iCCP=excluding; #if 0 if (IsOptionMember("itxt",value) != MagickFalse) mng_info->ping_exclude_iTXt=excluding; #endif if (IsOptionMember("offs",value) != MagickFalse) mng_info->ping_exclude_oFFs=excluding; if (IsOptionMember("phys",value) != MagickFalse) mng_info->ping_exclude_pHYs=excluding; if (IsOptionMember("srgb",value) != MagickFalse) mng_info->ping_exclude_sRGB=excluding; if (IsOptionMember("text",value) != MagickFalse) mng_info->ping_exclude_tEXt=excluding; if (IsOptionMember("time",value) != MagickFalse) mng_info->ping_exclude_tIME=excluding; if (IsOptionMember("trns",value) != MagickFalse) mng_info->ping_exclude_tRNS=excluding; if (IsOptionMember("vpag",value) != MagickFalse) mng_info->ping_exclude_vpAg=excluding; if (IsOptionMember("zccp",value) != MagickFalse) mng_info->ping_exclude_zCCP=excluding; if (IsOptionMember("ztxt",value) != MagickFalse) mng_info->ping_exclude_zTXt=excluding; } if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Chunks to be excluded from the output png:"); if (mng_info->ping_exclude_bKGD != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " bKGD"); if (mng_info->ping_exclude_caNv != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " caNv"); if (mng_info->ping_exclude_cHRM != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " cHRM"); if (mng_info->ping_exclude_date != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " date"); if (mng_info->ping_exclude_EXIF != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " EXIF"); if (mng_info->ping_exclude_eXIf != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " eXIf"); if (mng_info->ping_exclude_gAMA != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " gAMA"); if (mng_info->ping_exclude_iCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iCCP"); #if 0 if (mng_info->ping_exclude_iTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " iTXt"); #endif if (mng_info->ping_exclude_oFFs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " oFFs"); if (mng_info->ping_exclude_pHYs != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " pHYs"); if (mng_info->ping_exclude_sRGB != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " sRGB"); if (mng_info->ping_exclude_tEXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tEXt"); if (mng_info->ping_exclude_tIME != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tIME"); if (mng_info->ping_exclude_tRNS != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " tRNS"); if (mng_info->ping_exclude_vpAg != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " vpAg"); if (mng_info->ping_exclude_zCCP != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zCCP"); if (mng_info->ping_exclude_zTXt != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " zTXt"); } mng_info->need_blob = MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WritePNGImage()"); return(status); } #if defined(JNG_SUPPORTED) /* Write one JNG image */ static MagickBooleanType WriteOneJNGImage(MngInfo *mng_info, const ImageInfo *image_info,Image *image) { Image *jpeg_image; ImageInfo *jpeg_image_info; int unique_filenames; MagickBooleanType logging, status; size_t length; unsigned char *blob, chunk[80], *p; unsigned int jng_alpha_compression_method, jng_alpha_sample_depth, jng_color_type, transparent; size_t jng_alpha_quality, jng_quality; logging=LogMagickEvent(CoderEvent,GetMagickModule(), " Enter WriteOneJNGImage()"); blob=(unsigned char *) NULL; jpeg_image=(Image *) NULL; jpeg_image_info=(ImageInfo *) NULL; length=0; unique_filenames=0; status=MagickTrue; transparent=image_info->type==GrayscaleMatteType || image_info->type==TrueColorMatteType || image->matte != MagickFalse; jng_alpha_sample_depth = 0; jng_quality=image_info->quality == 0UL ? 75UL : image_info->quality%1000; jng_alpha_compression_method=image->compression==JPEGCompression? 8 : 0; jng_alpha_quality=image_info->quality == 0UL ? 75UL : image_info->quality; if (jng_alpha_quality >= 1000) jng_alpha_quality /= 1000; if (transparent != 0) { jng_color_type=14; /* Create JPEG blob, image, and image_info */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info for opacity."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); status=SeparateImageChannel(jpeg_image,OpacityChannel); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); status=NegateImage(jpeg_image,MagickFalse); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_image->matte=MagickFalse; jpeg_image_info->type=GrayscaleType; jpeg_image->quality=jng_alpha_quality; (void) SetImageType(jpeg_image,GrayscaleType); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent, "%s",jpeg_image->filename); } else { jng_alpha_compression_method=0; jng_color_type=10; jng_alpha_sample_depth=0; } /* To do: check bit depth of PNG alpha channel */ /* Check if image is grayscale. */ if (image_info->type != TrueColorMatteType && image_info->type != TrueColorType && SetImageGray(image,&image->exception)) jng_color_type-=2; if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Quality = %d",(int) jng_quality); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Color Type = %d",jng_color_type); if (transparent != 0) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Compression = %d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Depth = %d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG Alpha Quality = %d",(int) jng_alpha_quality); } } if (transparent != 0) { if (jng_alpha_compression_method==0) { const char *value; /* Encode opacity as a grayscale PNG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating PNG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); length=0; (void) CopyMagickString(jpeg_image_info->magick,"PNG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"PNG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; /* Exclude all ancillary chunks */ (void) SetImageArtifact(jpeg_image,"png:exclude-chunks","all"); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); /* Retrieve sample depth used */ value=GetImageProperty(jpeg_image,"png:bit-depth-written"); if (value != (char *) NULL) jng_alpha_sample_depth= (unsigned int) value[0]; } else { /* Encode opacity as a grayscale JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating JPEG blob for alpha."); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); jpeg_image_info->interlace=NoInterlace; blob=ImageToBlob(jpeg_image_info,jpeg_image,&length, &image->exception); jng_alpha_sample_depth=8; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); } /* Destroy JPEG image and image_info */ jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); } /* Write JHDR chunk */ (void) WriteBlobMSBULong(image,16L); /* chunk data length=16 */ PNGType(chunk,mng_JHDR); LogPNGChunk(logging,mng_JHDR,16L); PNGLong(chunk+4,(png_uint_32) image->columns); PNGLong(chunk+8,(png_uint_32) image->rows); chunk[12]=jng_color_type; chunk[13]=8; /* sample depth */ chunk[14]=8; /*jng_image_compression_method */ chunk[15]=(unsigned char) (image_info->interlace == NoInterlace ? 0 : 8); chunk[16]=jng_alpha_sample_depth; chunk[17]=jng_alpha_compression_method; chunk[18]=0; /*jng_alpha_filter_method */ chunk[19]=0; /*jng_alpha_interlace_method */ (void) WriteBlob(image,20,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,20)); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG width:%15lu",(unsigned long) image->columns); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG height:%14lu",(unsigned long) image->rows); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG color type:%10d",jng_color_type); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG sample depth:%8d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG compression:%9d",8); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG interlace:%11d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha depth:%9d",jng_alpha_sample_depth); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha compression:%3d",jng_alpha_compression_method); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha filter:%8d",0); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " JNG alpha interlace:%5d",0); } /* Write any JNG-chunk-b profiles */ (void) Magick_png_write_chunk_from_profile(image,"JNG-chunk-b",logging); /* Write leading ancillary chunks */ if (transparent != 0) { /* Write JNG bKGD chunk */ unsigned char blue, green, red; ssize_t num_bytes; if (jng_color_type == 8 || jng_color_type == 12) num_bytes=6L; else num_bytes=10L; (void) WriteBlobMSBULong(image,(size_t) (num_bytes-4L)); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,(size_t) (num_bytes-4L)); red=ScaleQuantumToChar(image->background_color.red); green=ScaleQuantumToChar(image->background_color.green); blue=ScaleQuantumToChar(image->background_color.blue); *(chunk+4)=0; *(chunk+5)=red; *(chunk+6)=0; *(chunk+7)=green; *(chunk+8)=0; *(chunk+9)=blue; (void) WriteBlob(image,(size_t) num_bytes,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) num_bytes)); } if ((image->colorspace == sRGBColorspace || image->rendering_intent)) { /* Write JNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { if (image->gamma != 0.0) { /* Write JNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); } if ((mng_info->equal_chrms == MagickFalse) && (image->chromaticity.red_primary.x != 0.0)) { PrimaryInfo primary; /* Write JNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); } } if (image->x_resolution && image->y_resolution && !mng_info->equal_physs) { /* Write JNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (mng_info->write_mng == 0 && (image->page.x || image->page.y)) { /* Write JNG oFFs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_oFFs); LogPNGChunk(logging,mng_oFFs,9L); PNGsLong(chunk+4,(ssize_t) (image->page.x)); PNGsLong(chunk+8,(ssize_t) (image->page.y)); chunk[12]=0; (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (mng_info->write_mng == 0 && (image->page.width || image->page.height)) { (void) WriteBlobMSBULong(image,9L); /* data length=8 */ PNGType(chunk,mng_vpAg); LogPNGChunk(logging,mng_vpAg,9L); PNGLong(chunk+4,(png_uint_32) image->page.width); PNGLong(chunk+8,(png_uint_32) image->page.height); chunk[12]=0; /* unit = pixels */ (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } if (transparent != 0) { if (jng_alpha_compression_method==0) { register ssize_t i; size_t len; /* Write IDAT chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write IDAT chunks from blob, length=%.20g.",(double) length); /* Copy IDAT chunks */ len=0; p=blob+8; for (i=8; i<(ssize_t) length; i+=len+12) { len=(size_t) (*p) << 24; len|=(size_t) (*(p+1)) << 16; len|=(size_t) (*(p+2)) << 8; len|=(size_t) (*(p+3)); p+=4; if (*(p)==73 && *(p+1)==68 && *(p+2)==65 && *(p+3)==84) /* IDAT */ { /* Found an IDAT chunk. */ (void) WriteBlobMSBULong(image,len); LogPNGChunk(logging,mng_IDAT,len); (void) WriteBlob(image,len+4,p); (void) WriteBlobMSBULong(image,crc32(0,p,(uInt) len+4)); } else { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Skipping %c%c%c%c chunk, length=%.20g.", *(p),*(p+1),*(p+2),*(p+3),(double) len); } p+=(8+len); } } else if (length != 0) { /* Write JDAA chunk header */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAA chunk, length=%.20g.",(double) length); (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAA); LogPNGChunk(logging,mng_JDAA,length); /* Write JDAT chunk(s) data */ (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob, (uInt) length)); } blob=(unsigned char *) RelinquishMagickMemory(blob); } /* Encode image as a JPEG blob */ if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image_info."); jpeg_image_info=(ImageInfo *) CloneImageInfo(image_info); if (jpeg_image_info == (ImageInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating jpeg_image."); jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image == (Image *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); (void) AcquireUniqueFilename(jpeg_image->filename); unique_filenames++; (void) FormatLocaleString(jpeg_image_info->filename,MaxTextExtent,"%s", jpeg_image->filename); status=OpenBlob(jpeg_image_info,jpeg_image,WriteBinaryBlobMode, &image->exception); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Created jpeg_image, %.20g x %.20g.",(double) jpeg_image->columns, (double) jpeg_image->rows); if (status == MagickFalse) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if (jng_color_type == 8 || jng_color_type == 12) jpeg_image_info->type=GrayscaleType; jpeg_image_info->quality=jng_quality; jpeg_image->quality=jng_quality; (void) CopyMagickString(jpeg_image_info->magick,"JPEG",MaxTextExtent); (void) CopyMagickString(jpeg_image->magick,"JPEG",MaxTextExtent); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Creating blob."); blob=ImageToBlob(jpeg_image_info,jpeg_image,&length,&image->exception); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Successfully read jpeg_image into a blob, length=%.20g.", (double) length); (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Write JDAT chunk, length=%.20g.",(double) length); } /* Write JDAT chunk(s) */ (void) WriteBlobMSBULong(image,(size_t) length); PNGType(chunk,mng_JDAT); LogPNGChunk(logging,mng_JDAT,length); (void) WriteBlob(image,4,chunk); (void) WriteBlob(image,length,blob); (void) WriteBlobMSBULong(image,crc32(crc32(0,chunk,4),blob,(uInt) length)); jpeg_image=DestroyImage(jpeg_image); (void) RelinquishUniqueFileResource(jpeg_image_info->filename); unique_filenames--; jpeg_image_info=DestroyImageInfo(jpeg_image_info); blob=(unsigned char *) RelinquishMagickMemory(blob); /* Write any JNG-chunk-e profiles */ (void) Magick_png_write_chunk_from_profile(image,"JNG-chunk-e",logging); /* Write IEND chunk */ (void) WriteBlobMSBULong(image,0L); PNGType(chunk,mng_IEND); LogPNGChunk(logging,mng_IEND,0); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteOneJNGImage(); unique_filenames=%d",unique_filenames); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J N G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJNGImage() writes a JPEG Network Graphics (JNG) image file. % % JNG support written by Glenn Randers-Pehrson, glennrp@image... % % The format of the WriteJNGImage method is: % % MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: The image. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% */ static MagickBooleanType WriteJNGImage(const ImageInfo *image_info,Image *image) { MagickBooleanType logging, status; MngInfo *mng_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteJNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); if ((image->columns > 65535UL) || (image->rows > 65535UL)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; (void) WriteBlob(image,8,(const unsigned char *) "\213JNG\r\n\032\n"); status=WriteOneJNGImage(mng_info,image_info,image); mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); (void) CatchImageException(image); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " exit WriteJNGImage()"); return(status); } #endif static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { const char *option; Image *next_image; MagickBooleanType status; volatile MagickBooleanType logging; MngInfo *mng_info; int image_count, need_iterations, need_matte; volatile int #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) need_local_plte, #endif all_images_are_gray, need_defi, use_global_plte; register ssize_t i; unsigned char chunk[800]; volatile unsigned int write_jng, write_mng; volatile size_t scene; size_t final_delay=0, initial_delay; #if (PNG_LIBPNG_VER < 10200) if (image_info->verbose) printf("Your PNG library (libpng-%s) is rather old.\n", PNG_LIBPNG_VER_STRING); #endif /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); logging=LogMagickEvent(CoderEvent,GetMagickModule(),"Enter WriteMNGImage()"); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); /* Allocate a MngInfo structure. */ mng_info=(MngInfo *) AcquireMagickMemory(sizeof(MngInfo)); if (mng_info == (MngInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); /* Initialize members of the MngInfo structure. */ (void) ResetMagickMemory(mng_info,0,sizeof(MngInfo)); mng_info->image=image; write_mng=LocaleCompare(image_info->magick,"MNG") == 0; /* * See if user has requested a specific PNG subformat to be used * for all of the PNGs in the MNG being written, e.g., * * convert *.png png8:animation.mng * * To do: check -define png:bit_depth and png:color_type as well, * or perhaps use mng:bit_depth and mng:color_type instead for * global settings. */ mng_info->write_png8=LocaleCompare(image_info->magick,"PNG8") == 0; mng_info->write_png24=LocaleCompare(image_info->magick,"PNG24") == 0; mng_info->write_png32=LocaleCompare(image_info->magick,"PNG32") == 0; write_jng=MagickFalse; if (image_info->compression == JPEGCompression) write_jng=MagickTrue; mng_info->adjoin=image_info->adjoin && (GetNextImageInList(image) != (Image *) NULL) && write_mng; if (logging != MagickFalse) { /* Log some info about the input */ Image *p; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Checking input image(s)\n" " Image_info depth: %.20g, Type: %d", (double) image_info->depth, image_info->type); scene=0; for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Scene: %.20g\n, Image depth: %.20g", (double) scene++, (double) p->depth); if (p->matte) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: True"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Matte: False"); if (p->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Storage class: DirectClass"); if (p->colors) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: %.20g",(double) p->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Number of colors: unspecified"); if (mng_info->adjoin == MagickFalse) break; } } use_global_plte=MagickFalse; all_images_are_gray=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_defi=MagickFalse; need_matte=MagickFalse; mng_info->framing_mode=1; mng_info->old_framing_mode=1; if (write_mng) if (image_info->page != (char *) NULL) { /* Determine image bounding box. */ SetGeometry(image,&mng_info->page); (void) ParseMetaGeometry(image_info->page,&mng_info->page.x, &mng_info->page.y,&mng_info->page.width,&mng_info->page.height); } if (write_mng) { unsigned int need_geom; unsigned short red, green, blue; mng_info->page=image->page; need_geom=MagickTrue; if (mng_info->page.width || mng_info->page.height) need_geom=MagickFalse; /* Check all the scenes. */ initial_delay=image->delay; need_iterations=MagickFalse; mng_info->equal_chrms=image->chromaticity.red_primary.x != 0.0; mng_info->equal_physs=MagickTrue, mng_info->equal_gammas=MagickTrue; mng_info->equal_srgbs=MagickTrue; mng_info->equal_backgrounds=MagickTrue; image_count=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) all_images_are_gray=MagickTrue; mng_info->equal_palettes=MagickFalse; need_local_plte=MagickFalse; #endif for (next_image=image; next_image != (Image *) NULL; ) { if (need_geom) { if ((next_image->columns+next_image->page.x) > mng_info->page.width) mng_info->page.width=next_image->columns+next_image->page.x; if ((next_image->rows+next_image->page.y) > mng_info->page.height) mng_info->page.height=next_image->rows+next_image->page.y; } if (next_image->page.x || next_image->page.y) need_defi=MagickTrue; if (next_image->matte) need_matte=MagickTrue; if ((int) next_image->dispose >= BackgroundDispose) if (next_image->matte || next_image->page.x || next_image->page.y || ((next_image->columns < mng_info->page.width) && (next_image->rows < mng_info->page.height))) mng_info->need_fram=MagickTrue; if (next_image->iterations) need_iterations=MagickTrue; final_delay=next_image->delay; if (final_delay != initial_delay || final_delay > 1UL* next_image->ticks_per_second) mng_info->need_fram=1; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* check for global palette possibility. */ if (image->matte != MagickFalse) need_local_plte=MagickTrue; if (need_local_plte == 0) { if (SetImageGray(image,&image->exception) == MagickFalse) all_images_are_gray=MagickFalse; mng_info->equal_palettes=PalettesAreEqual(image,next_image); if (use_global_plte == 0) use_global_plte=mng_info->equal_palettes; need_local_plte=!mng_info->equal_palettes; } #endif if (GetNextImageInList(next_image) != (Image *) NULL) { if (next_image->background_color.red != next_image->next->background_color.red || next_image->background_color.green != next_image->next->background_color.green || next_image->background_color.blue != next_image->next->background_color.blue) mng_info->equal_backgrounds=MagickFalse; if (next_image->gamma != next_image->next->gamma) mng_info->equal_gammas=MagickFalse; if (next_image->rendering_intent != next_image->next->rendering_intent) mng_info->equal_srgbs=MagickFalse; if ((next_image->units != next_image->next->units) || (next_image->x_resolution != next_image->next->x_resolution) || (next_image->y_resolution != next_image->next->y_resolution)) mng_info->equal_physs=MagickFalse; if (mng_info->equal_chrms) { if (next_image->chromaticity.red_primary.x != next_image->next->chromaticity.red_primary.x || next_image->chromaticity.red_primary.y != next_image->next->chromaticity.red_primary.y || next_image->chromaticity.green_primary.x != next_image->next->chromaticity.green_primary.x || next_image->chromaticity.green_primary.y != next_image->next->chromaticity.green_primary.y || next_image->chromaticity.blue_primary.x != next_image->next->chromaticity.blue_primary.x || next_image->chromaticity.blue_primary.y != next_image->next->chromaticity.blue_primary.y || next_image->chromaticity.white_point.x != next_image->next->chromaticity.white_point.x || next_image->chromaticity.white_point.y != next_image->next->chromaticity.white_point.y) mng_info->equal_chrms=MagickFalse; } } image_count++; next_image=GetNextImageInList(next_image); } if (image_count < 2) { mng_info->equal_backgrounds=MagickFalse; mng_info->equal_chrms=MagickFalse; mng_info->equal_gammas=MagickFalse; mng_info->equal_srgbs=MagickFalse; mng_info->equal_physs=MagickFalse; use_global_plte=MagickFalse; #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED need_local_plte=MagickTrue; #endif need_iterations=MagickFalse; } if (mng_info->need_fram == MagickFalse) { /* Only certain framing rates 100/n are exactly representable without the FRAM chunk but we'll allow some slop in VLC files */ if (final_delay == 0) { if (need_iterations != MagickFalse) { /* It's probably a GIF with loop; don't run it *too* fast. */ if (mng_info->adjoin) { final_delay=10; (void) ThrowMagickException(&image->exception, GetMagickModule(),CoderWarning, "input has zero delay between all frames; assuming", " 10 cs `%s'",""); } } else mng_info->ticks_per_second=0; } if (final_delay != 0) mng_info->ticks_per_second=(png_uint_32) (image->ticks_per_second/final_delay); if (final_delay > 50) mng_info->ticks_per_second=2; if (final_delay > 75) mng_info->ticks_per_second=1; if (final_delay > 125) mng_info->need_fram=MagickTrue; if (need_defi && final_delay > 2 && (final_delay != 4) && (final_delay != 5) && (final_delay != 10) && (final_delay != 20) && (final_delay != 25) && (final_delay != 50) && (final_delay != (size_t) image->ticks_per_second)) mng_info->need_fram=MagickTrue; /* make it exact; cannot be VLC */ } if (mng_info->need_fram != MagickFalse) mng_info->ticks_per_second=1UL*image->ticks_per_second; /* If pseudocolor, we should also check to see if all the palettes are identical and write a global PLTE if they are. ../glennrp Feb 99. */ /* Write the MNG version 1.0 signature and MHDR chunk. */ (void) WriteBlob(image,8,(const unsigned char *) "\212MNG\r\n\032\n"); (void) WriteBlobMSBULong(image,28L); /* chunk data length=28 */ PNGType(chunk,mng_MHDR); LogPNGChunk(logging,mng_MHDR,28L); PNGLong(chunk+4,(png_uint_32) mng_info->page.width); PNGLong(chunk+8,(png_uint_32) mng_info->page.height); PNGLong(chunk+12,mng_info->ticks_per_second); PNGLong(chunk+16,0L); /* layer count=unknown */ PNGLong(chunk+20,0L); /* frame count=unknown */ PNGLong(chunk+24,0L); /* play time=unknown */ if (write_jng) { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,27L); /* simplicity=LC+JNG */ else PNGLong(chunk+28,25L); /* simplicity=VLC+JNG */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,19L); /* simplicity=LC+JNG, no transparency */ else PNGLong(chunk+28,17L); /* simplicity=VLC+JNG, no transparency */ } } else { if (need_matte) { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,11L); /* simplicity=LC */ else PNGLong(chunk+28,9L); /* simplicity=VLC */ } else { if (need_defi || mng_info->need_fram || use_global_plte) PNGLong(chunk+28,3L); /* simplicity=LC, no transparency */ else PNGLong(chunk+28,1L); /* simplicity=VLC, no transparency */ } } (void) WriteBlob(image,32,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,32)); option=GetImageOption(image_info,"mng:need-cacheoff"); if (option != (const char *) NULL) { size_t length; /* Write "nEED CACHEOFF" to turn playback caching off for streaming MNG. */ PNGType(chunk,mng_nEED); length=CopyMagickString((char *) chunk+4,"CACHEOFF",20); (void) WriteBlobMSBULong(image,(size_t) length); LogPNGChunk(logging,mng_nEED,(size_t) length); length+=4; (void) WriteBlob(image,length,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) length)); } if ((GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) != (Image *) NULL) && (image->iterations != 1)) { /* Write MNG TERM chunk */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_TERM); LogPNGChunk(logging,mng_TERM,10L); chunk[4]=3; /* repeat animation */ chunk[5]=0; /* show last frame when done */ PNGLong(chunk+6,(png_uint_32) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) PNGLong(chunk+10,PNG_UINT_31_MAX); else PNGLong(chunk+10,(png_uint_32) image->iterations); if (logging != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM delay: %.20g",(double) (mng_info->ticks_per_second* final_delay/MagickMax(image->ticks_per_second,1))); if (image->iterations == 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " TERM iterations: %.20g",(double) PNG_UINT_31_MAX); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image iterations: %.20g",(double) image->iterations); } (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); } /* To do: check for cHRM+gAMA == sRGB, and write sRGB instead. */ if ((image->colorspace == sRGBColorspace || image->rendering_intent) && mng_info->equal_srgbs) { /* Write MNG sRGB chunk */ (void) WriteBlobMSBULong(image,1L); PNGType(chunk,mng_sRGB); LogPNGChunk(logging,mng_sRGB,1L); if (image->rendering_intent != UndefinedIntent) chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (image->rendering_intent)); else chunk[4]=(unsigned char) Magick_RenderingIntent_to_PNG_RenderingIntent( (PerceptualIntent)); (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); mng_info->have_write_global_srgb=MagickTrue; } else { if (image->gamma && mng_info->equal_gammas) { /* Write MNG gAMA chunk */ (void) WriteBlobMSBULong(image,4L); PNGType(chunk,mng_gAMA); LogPNGChunk(logging,mng_gAMA,4L); PNGLong(chunk+4,(png_uint_32) (100000*image->gamma+0.5)); (void) WriteBlob(image,8,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,8)); mng_info->have_write_global_gama=MagickTrue; } if (mng_info->equal_chrms) { PrimaryInfo primary; /* Write MNG cHRM chunk */ (void) WriteBlobMSBULong(image,32L); PNGType(chunk,mng_cHRM); LogPNGChunk(logging,mng_cHRM,32L); primary=image->chromaticity.white_point; PNGLong(chunk+4,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+8,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.red_primary; PNGLong(chunk+12,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+16,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.green_primary; PNGLong(chunk+20,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+24,(png_uint_32) (100000*primary.y+0.5)); primary=image->chromaticity.blue_primary; PNGLong(chunk+28,(png_uint_32) (100000*primary.x+0.5)); PNGLong(chunk+32,(png_uint_32) (100000*primary.y+0.5)); (void) WriteBlob(image,36,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,36)); mng_info->have_write_global_chrm=MagickTrue; } } if (image->x_resolution && image->y_resolution && mng_info->equal_physs) { /* Write MNG pHYs chunk */ (void) WriteBlobMSBULong(image,9L); PNGType(chunk,mng_pHYs); LogPNGChunk(logging,mng_pHYs,9L); if (image->units == PixelsPerInchResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0/2.54+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0/2.54+0.5)); chunk[12]=1; } else { if (image->units == PixelsPerCentimeterResolution) { PNGLong(chunk+4,(png_uint_32) (image->x_resolution*100.0+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution*100.0+0.5)); chunk[12]=1; } else { PNGLong(chunk+4,(png_uint_32) (image->x_resolution+0.5)); PNGLong(chunk+8,(png_uint_32) (image->y_resolution+0.5)); chunk[12]=0; } } (void) WriteBlob(image,13,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,13)); } /* Write MNG BACK chunk and global bKGD chunk, if the image is transparent or does not cover the entire frame. */ if (write_mng && (image->matte || image->page.x > 0 || image->page.y > 0 || (image->page.width && (image->page.width+image->page.x < mng_info->page.width)) || (image->page.height && (image->page.height+image->page.y < mng_info->page.height)))) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_BACK); LogPNGChunk(logging,mng_BACK,6L); red=ScaleQuantumToShort(image->background_color.red); green=ScaleQuantumToShort(image->background_color.green); blue=ScaleQuantumToShort(image->background_color.blue); PNGShort(chunk+4,red); PNGShort(chunk+6,green); PNGShort(chunk+8,blue); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); if (mng_info->equal_backgrounds) { (void) WriteBlobMSBULong(image,6L); PNGType(chunk,mng_bKGD); LogPNGChunk(logging,mng_bKGD,6L); (void) WriteBlob(image,10,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,10)); } } #ifdef PNG_WRITE_EMPTY_PLTE_SUPPORTED if ((need_local_plte == MagickFalse) && (image->storage_class == PseudoClass) && (all_images_are_gray == MagickFalse)) { size_t data_length; /* Write MNG PLTE chunk */ data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red) & 0xff; chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green) & 0xff; chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue) & 0xff; } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,(uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } #endif } scene=0; mng_info->delay=0; #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) mng_info->equal_palettes=MagickFalse; #endif do { if (mng_info->adjoin) { #if defined(PNG_WRITE_EMPTY_PLTE_SUPPORTED) || \ defined(PNG_MNG_FEATURES_SUPPORTED) /* If we aren't using a global palette for the entire MNG, check to see if we can use one for two or more consecutive images. */ if (need_local_plte && use_global_plte && !all_images_are_gray) { if (mng_info->IsPalette) { /* When equal_palettes is true, this image has the same palette as the previous PseudoClass image */ mng_info->have_write_global_plte=mng_info->equal_palettes; mng_info->equal_palettes=PalettesAreEqual(image,image->next); if (mng_info->equal_palettes && !mng_info->have_write_global_plte) { /* Write MNG PLTE chunk */ size_t data_length; data_length=3*image->colors; (void) WriteBlobMSBULong(image,data_length); PNGType(chunk,mng_PLTE); LogPNGChunk(logging,mng_PLTE,data_length); for (i=0; i < (ssize_t) image->colors; i++) { chunk[4+i*3]=ScaleQuantumToChar(image->colormap[i].red); chunk[5+i*3]=ScaleQuantumToChar(image->colormap[i].green); chunk[6+i*3]=ScaleQuantumToChar(image->colormap[i].blue); } (void) WriteBlob(image,data_length+4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk, (uInt) (data_length+4))); mng_info->have_write_global_plte=MagickTrue; } } else mng_info->have_write_global_plte=MagickFalse; } #endif if (need_defi) { ssize_t previous_x, previous_y; if (scene != 0) { previous_x=mng_info->page.x; previous_y=mng_info->page.y; } else { previous_x=0; previous_y=0; } mng_info->page=image->page; if ((mng_info->page.x != previous_x) || (mng_info->page.y != previous_y)) { (void) WriteBlobMSBULong(image,12L); /* data length=12 */ PNGType(chunk,mng_DEFI); LogPNGChunk(logging,mng_DEFI,12L); chunk[4]=0; /* object 0 MSB */ chunk[5]=0; /* object 0 LSB */ chunk[6]=0; /* visible */ chunk[7]=0; /* abstract */ PNGLong(chunk+8,(png_uint_32) mng_info->page.x); PNGLong(chunk+12,(png_uint_32) mng_info->page.y); (void) WriteBlob(image,16,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,16)); } } } mng_info->write_mng=write_mng; if ((int) image->dispose >= 3) mng_info->framing_mode=3; if (mng_info->need_fram && mng_info->adjoin && ((image->delay != mng_info->delay) || (mng_info->framing_mode != mng_info->old_framing_mode))) { if (image->delay == mng_info->delay) { /* Write a MNG FRAM chunk with the new framing mode. */ (void) WriteBlobMSBULong(image,1L); /* data length=1 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,1L); chunk[4]=(unsigned char) mng_info->framing_mode; (void) WriteBlob(image,5,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,5)); } else { /* Write a MNG FRAM chunk with the delay. */ (void) WriteBlobMSBULong(image,10L); /* data length=10 */ PNGType(chunk,mng_FRAM); LogPNGChunk(logging,mng_FRAM,10L); chunk[4]=(unsigned char) mng_info->framing_mode; chunk[5]=0; /* frame name separator (no name) */ chunk[6]=2; /* flag for changing default delay */ chunk[7]=0; /* flag for changing frame timeout */ chunk[8]=0; /* flag for changing frame clipping */ chunk[9]=0; /* flag for changing frame sync_id */ PNGLong(chunk+10,(png_uint_32) ((mng_info->ticks_per_second* image->delay)/MagickMax(image->ticks_per_second,1))); (void) WriteBlob(image,14,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,14)); mng_info->delay=(png_uint_32) image->delay; } mng_info->old_framing_mode=mng_info->framing_mode; } #if defined(JNG_SUPPORTED) if (image_info->compression == JPEGCompression) { ImageInfo *write_info; if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing JNG object."); /* To do: specify the desired alpha compression method. */ write_info=CloneImageInfo(image_info); write_info->compression=UndefinedCompression; status=WriteOneJNGImage(mng_info,write_info,image); write_info=DestroyImageInfo(write_info); } else #endif { if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Writing PNG object."); mng_info->need_blob = MagickFalse; mng_info->ping_preserve_colormap = MagickFalse; /* We don't want any ancillary chunks written */ mng_info->ping_exclude_bKGD=MagickTrue; mng_info->ping_exclude_caNv=MagickTrue; mng_info->ping_exclude_cHRM=MagickTrue; mng_info->ping_exclude_date=MagickTrue; mng_info->ping_exclude_EXIF=MagickTrue; mng_info->ping_exclude_eXIf=MagickTrue; mng_info->ping_exclude_gAMA=MagickTrue; mng_info->ping_exclude_iCCP=MagickTrue; /* mng_info->ping_exclude_iTXt=MagickTrue; */ mng_info->ping_exclude_oFFs=MagickTrue; mng_info->ping_exclude_pHYs=MagickTrue; mng_info->ping_exclude_sRGB=MagickTrue; mng_info->ping_exclude_tEXt=MagickTrue; mng_info->ping_exclude_tRNS=MagickTrue; mng_info->ping_exclude_vpAg=MagickTrue; mng_info->ping_exclude_zCCP=MagickTrue; mng_info->ping_exclude_zTXt=MagickTrue; status=WriteOnePNGImage(mng_info,image_info,image); } if (status == MagickFalse) { mng_info=MngInfoFreeStruct(mng_info); (void) CloseBlob(image); return(MagickFalse); } (void) CatchImageException(image); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (mng_info->adjoin); if (write_mng) { while (GetPreviousImageInList(image) != (Image *) NULL) image=GetPreviousImageInList(image); /* Write the MEND chunk. */ (void) WriteBlobMSBULong(image,0x00000000L); PNGType(chunk,mng_MEND); LogPNGChunk(logging,mng_MEND,0L); (void) WriteBlob(image,4,chunk); (void) WriteBlobMSBULong(image,crc32(0,chunk,4)); } /* Relinquish resources. */ (void) CloseBlob(image); mng_info=MngInfoFreeStruct(mng_info); if (logging != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"exit WriteMNGImage()"); return(MagickTrue); } #else /* PNG_LIBPNG_VER > 10011 */ static MagickBooleanType WritePNGImage(const ImageInfo *image_info,Image *image) { (void) image; printf("Your PNG library is too old: You have libpng-%s\n", PNG_LIBPNG_VER_STRING); ThrowBinaryException(CoderError,"PNG library is too old", image_info->filename); } static MagickBooleanType WriteMNGImage(const ImageInfo *image_info,Image *image) { return(WritePNGImage(image_info,image)); } #endif /* PNG_LIBPNG_VER > 10011 */ #endif
./CrossVul/dataset_final_sorted/CWE-617/c/bad_3363_0
crossvul-cpp_data_good_2571_1
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* kdc/do_tgs_req.c - KDC Routines to deal with TGS_REQ's */ /* * Copyright 1990, 1991, 2001, 2007, 2008, 2009, 2013, 2014 by the * Massachusetts Institute of Technology. All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ /* * Copyright (c) 2006-2008, Novell, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The copyright holder's name is not used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "k5-int.h" #include <syslog.h> #ifdef HAVE_NETINET_IN_H #include <sys/types.h> #include <netinet/in.h> #ifndef hpux #include <arpa/inet.h> #endif #endif #include "kdc_util.h" #include "kdc_audit.h" #include "policy.h" #include "extern.h" #include "adm_proto.h" #include <ctype.h> static krb5_error_code find_alternate_tgs(kdc_realm_t *, krb5_principal, krb5_db_entry **, const char**); static krb5_error_code prepare_error_tgs(struct kdc_request_state *, krb5_kdc_req *,krb5_ticket *,int, krb5_principal,krb5_data **,const char *, krb5_pa_data **); static krb5_error_code decrypt_2ndtkt(kdc_realm_t *, krb5_kdc_req *, krb5_flags, krb5_db_entry **, const char **); static krb5_error_code gen_session_key(kdc_realm_t *, krb5_kdc_req *, krb5_db_entry *, krb5_keyblock *, const char **); static krb5_int32 find_referral_tgs(kdc_realm_t *, krb5_kdc_req *, krb5_principal *); static krb5_error_code db_get_svc_princ(krb5_context, krb5_principal, krb5_flags, krb5_db_entry **, const char **); static krb5_error_code search_sprinc(kdc_realm_t *, krb5_kdc_req *, krb5_flags, krb5_db_entry **, const char **); /*ARGSUSED*/ krb5_error_code process_tgs_req(struct server_handle *handle, krb5_data *pkt, const krb5_fulladdr *from, krb5_data **response) { krb5_keyblock * subkey = 0; krb5_keyblock *header_key = NULL; krb5_kdc_req *request = 0; krb5_db_entry *server = NULL; krb5_db_entry *stkt_server = NULL; krb5_kdc_rep reply; krb5_enc_kdc_rep_part reply_encpart; krb5_ticket ticket_reply, *header_ticket = 0; int st_idx = 0; krb5_enc_tkt_part enc_tkt_reply; int newtransited = 0; krb5_error_code retval = 0; krb5_keyblock encrypting_key; krb5_timestamp kdc_time, authtime = 0; krb5_keyblock session_key; krb5_keyblock *reply_key = NULL; krb5_key_data *server_key; krb5_principal cprinc = NULL, sprinc = NULL, altcprinc = NULL; krb5_last_req_entry *nolrarray[2], nolrentry; int errcode; const char *status = 0; krb5_enc_tkt_part *header_enc_tkt = NULL; /* TGT */ krb5_enc_tkt_part *subject_tkt = NULL; /* TGT or evidence ticket */ krb5_db_entry *client = NULL, *header_server = NULL; krb5_db_entry *local_tgt, *local_tgt_storage = NULL; krb5_pa_s4u_x509_user *s4u_x509_user = NULL; /* protocol transition request */ krb5_authdata **kdc_issued_auth_data = NULL; /* auth data issued by KDC */ unsigned int c_flags = 0, s_flags = 0; /* client/server KDB flags */ krb5_boolean is_referral; const char *emsg = NULL; krb5_kvno ticket_kvno = 0; struct kdc_request_state *state = NULL; krb5_pa_data *pa_tgs_req; /*points into request*/ krb5_data scratch; krb5_pa_data **e_data = NULL; kdc_realm_t *kdc_active_realm = NULL; krb5_audit_state *au_state = NULL; krb5_data **auth_indicators = NULL; memset(&reply, 0, sizeof(reply)); memset(&reply_encpart, 0, sizeof(reply_encpart)); memset(&ticket_reply, 0, sizeof(ticket_reply)); memset(&enc_tkt_reply, 0, sizeof(enc_tkt_reply)); session_key.contents = NULL; retval = decode_krb5_tgs_req(pkt, &request); if (retval) return retval; /* Save pointer to client-requested service principal, in case of * errors before a successful call to search_sprinc(). */ sprinc = request->server; if (request->msg_type != KRB5_TGS_REQ) { krb5_free_kdc_req(handle->kdc_err_context, request); return KRB5_BADMSGTYPE; } /* * setup_server_realm() sets up the global realm-specific data pointer. */ kdc_active_realm = setup_server_realm(handle, request->server); if (kdc_active_realm == NULL) { krb5_free_kdc_req(handle->kdc_err_context, request); return KRB5KDC_ERR_WRONG_REALM; } errcode = kdc_make_rstate(kdc_active_realm, &state); if (errcode !=0) { krb5_free_kdc_req(handle->kdc_err_context, request); return errcode; } /* Initialize audit state. */ errcode = kau_init_kdc_req(kdc_context, request, from, &au_state); if (errcode) { krb5_free_kdc_req(handle->kdc_err_context, request); return errcode; } /* Seed the audit trail with the request ID and basic information. */ kau_tgs_req(kdc_context, TRUE, au_state); errcode = kdc_process_tgs_req(kdc_active_realm, request, from, pkt, &header_ticket, &header_server, &header_key, &subkey, &pa_tgs_req); if (header_ticket && header_ticket->enc_part2) cprinc = header_ticket->enc_part2->client; if (errcode) { status = "PROCESS_TGS"; goto cleanup; } if (!header_ticket) { errcode = KRB5_NO_TKT_SUPPLIED; /* XXX? */ status="UNEXPECTED NULL in header_ticket"; goto cleanup; } errcode = kau_make_tkt_id(kdc_context, header_ticket, &au_state->tkt_in_id); if (errcode) { status = "GENERATE_TICKET_ID"; goto cleanup; } scratch.length = pa_tgs_req->length; scratch.data = (char *) pa_tgs_req->contents; errcode = kdc_find_fast(&request, &scratch, subkey, header_ticket->enc_part2->session, state, NULL); /* Reset sprinc because kdc_find_fast() can replace request. */ sprinc = request->server; if (errcode !=0) { status = "FIND_FAST"; goto cleanup; } errcode = get_local_tgt(kdc_context, &sprinc->realm, header_server, &local_tgt, &local_tgt_storage); if (errcode) { status = "GET_LOCAL_TGT"; goto cleanup; } /* Ignore (for now) the request modification due to FAST processing. */ au_state->request = request; /* * Pointer to the encrypted part of the header ticket, which may be * replaced to point to the encrypted part of the evidence ticket * if constrained delegation is used. This simplifies the number of * special cases for constrained delegation. */ header_enc_tkt = header_ticket->enc_part2; /* * We've already dealt with the AP_REQ authentication, so we can * use header_ticket freely. The encrypted part (if any) has been * decrypted with the session key. */ au_state->stage = SRVC_PRINC; /* XXX make sure server here has the proper realm...taken from AP_REQ header? */ setflag(s_flags, KRB5_KDB_FLAG_ALIAS_OK); if (isflagset(request->kdc_options, KDC_OPT_CANONICALIZE)) { setflag(c_flags, KRB5_KDB_FLAG_CANONICALIZE); setflag(s_flags, KRB5_KDB_FLAG_CANONICALIZE); } errcode = search_sprinc(kdc_active_realm, request, s_flags, &server, &status); if (errcode != 0) goto cleanup; sprinc = server->princ; /* If we got a cross-realm TGS which is not the requested server, we are * issuing a referral (or alternate TGT, which we treat similarly). */ is_referral = is_cross_tgs_principal(server->princ) && !krb5_principal_compare(kdc_context, request->server, server->princ); au_state->stage = VALIDATE_POL; if ((errcode = krb5_timeofday(kdc_context, &kdc_time))) { status = "TIME_OF_DAY"; goto cleanup; } if ((retval = validate_tgs_request(kdc_active_realm, request, *server, header_ticket, kdc_time, &status, &e_data))) { if (!status) status = "UNKNOWN_REASON"; if (retval == KDC_ERR_POLICY || retval == KDC_ERR_BADOPTION) au_state->violation = PROT_CONSTRAINT; errcode = retval + ERROR_TABLE_BASE_krb5; goto cleanup; } if (!is_local_principal(kdc_active_realm, header_enc_tkt->client)) setflag(c_flags, KRB5_KDB_FLAG_CROSS_REALM); /* Check for protocol transition */ errcode = kdc_process_s4u2self_req(kdc_active_realm, request, header_enc_tkt->client, server, subkey, header_enc_tkt->session, kdc_time, &s4u_x509_user, &client, &status); if (s4u_x509_user != NULL || errcode != 0) { if (s4u_x509_user != NULL) au_state->s4u2self_user = s4u_x509_user->user_id.user; if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION) au_state->violation = PROT_CONSTRAINT; au_state->status = status; kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state); au_state->s4u2self_user = NULL; } if (errcode) goto cleanup; if (s4u_x509_user != NULL) { setflag(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION); if (is_referral) { /* The requesting server appears to no longer exist, and we found * a referral instead. Treat this as a server lookup failure. */ errcode = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; status = "LOOKING_UP_SERVER"; goto cleanup; } } /* Deal with user-to-user and constrained delegation */ errcode = decrypt_2ndtkt(kdc_active_realm, request, c_flags, &stkt_server, &status); if (errcode) goto cleanup; if (isflagset(request->kdc_options, KDC_OPT_CNAME_IN_ADDL_TKT)) { /* Do constrained delegation protocol and authorization checks */ errcode = kdc_process_s4u2proxy_req(kdc_active_realm, request, request->second_ticket[st_idx]->enc_part2, stkt_server, header_ticket->enc_part2->client, request->server, &status); if (errcode == KDC_ERR_POLICY || errcode == KDC_ERR_BADOPTION) au_state->violation = PROT_CONSTRAINT; else if (errcode) au_state->violation = LOCAL_POLICY; au_state->status = status; retval = kau_make_tkt_id(kdc_context, request->second_ticket[st_idx], &au_state->evid_tkt_id); if (retval) { status = "GENERATE_TICKET_ID"; errcode = retval; goto cleanup; } kau_s4u2proxy(kdc_context, errcode ? FALSE : TRUE, au_state); if (errcode) goto cleanup; setflag(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION); assert(krb5_is_tgs_principal(header_ticket->server)); assert(client == NULL); /* assured by kdc_process_s4u2self_req() */ client = stkt_server; stkt_server = NULL; } else if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) { krb5_db_free_principal(kdc_context, stkt_server); stkt_server = NULL; } else assert(stkt_server == NULL); au_state->stage = ISSUE_TKT; errcode = gen_session_key(kdc_active_realm, request, server, &session_key, &status); if (errcode) goto cleanup; /* * subject_tkt will refer to the evidence ticket (for constrained * delegation) or the TGT. The distinction from header_enc_tkt is * necessary because the TGS signature only protects some fields: * the others could be forged by a malicious server. */ if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) subject_tkt = request->second_ticket[st_idx]->enc_part2; else subject_tkt = header_enc_tkt; authtime = subject_tkt->times.authtime; /* Extract auth indicators from the subject ticket, except for S4U2Proxy * requests (where the client didn't authenticate). */ if (s4u_x509_user == NULL) { errcode = get_auth_indicators(kdc_context, subject_tkt, local_tgt, &auth_indicators); if (errcode) { status = "GET_AUTH_INDICATORS"; goto cleanup; } } errcode = check_indicators(kdc_context, server, auth_indicators); if (errcode) { status = "HIGHER_AUTHENTICATION_REQUIRED"; goto cleanup; } if (is_referral) ticket_reply.server = server->princ; else ticket_reply.server = request->server; /* XXX careful for realm... */ enc_tkt_reply.flags = OPTS2FLAGS(request->kdc_options); enc_tkt_reply.flags |= COPY_TKT_FLAGS(header_enc_tkt->flags); enc_tkt_reply.times.starttime = 0; if (isflagset(server->attributes, KRB5_KDB_OK_AS_DELEGATE)) setflag(enc_tkt_reply.flags, TKT_FLG_OK_AS_DELEGATE); /* Indicate support for encrypted padata (RFC 6806). */ setflag(enc_tkt_reply.flags, TKT_FLG_ENC_PA_REP); /* don't use new addresses unless forwarded, see below */ enc_tkt_reply.caddrs = header_enc_tkt->caddrs; /* noaddrarray[0] = 0; */ reply_encpart.caddrs = 0;/* optional...don't put it in */ reply_encpart.enc_padata = NULL; /* * It should be noted that local policy may affect the * processing of any of these flags. For example, some * realms may refuse to issue renewable tickets */ if (isflagset(request->kdc_options, KDC_OPT_FORWARDABLE)) { if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) { /* * If S4U2Self principal is not forwardable, then mark ticket as * unforwardable. This behaviour matches Windows, but it is * different to the MIT AS-REQ path, which returns an error * (KDC_ERR_POLICY) if forwardable tickets cannot be issued. * * Consider this block the S4U2Self equivalent to * validate_forwardable(). */ if (client != NULL && isflagset(client->attributes, KRB5_KDB_DISALLOW_FORWARDABLE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); /* * Forwardable flag is propagated along referral path. */ else if (!isflagset(header_enc_tkt->flags, TKT_FLG_FORWARDABLE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); /* * OK_TO_AUTH_AS_DELEGATE must be set on the service requesting * S4U2Self in order for forwardable tickets to be returned. */ else if (!is_referral && !isflagset(server->attributes, KRB5_KDB_OK_TO_AUTH_AS_DELEGATE)) clear(enc_tkt_reply.flags, TKT_FLG_FORWARDABLE); } } if (isflagset(request->kdc_options, KDC_OPT_FORWARDED) || isflagset(request->kdc_options, KDC_OPT_PROXY)) { /* include new addresses in ticket & reply */ enc_tkt_reply.caddrs = request->addresses; reply_encpart.caddrs = request->addresses; } /* We don't currently handle issuing anonymous tickets based on * non-anonymous ones, so just ignore the option. */ if (isflagset(request->kdc_options, KDC_OPT_REQUEST_ANONYMOUS) && !isflagset(header_enc_tkt->flags, TKT_FLG_ANONYMOUS)) clear(enc_tkt_reply.flags, TKT_FLG_ANONYMOUS); if (isflagset(request->kdc_options, KDC_OPT_POSTDATED)) { setflag(enc_tkt_reply.flags, TKT_FLG_INVALID); enc_tkt_reply.times.starttime = request->from; } else enc_tkt_reply.times.starttime = kdc_time; if (isflagset(request->kdc_options, KDC_OPT_VALIDATE)) { assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0); /* BEWARE of allocation hanging off of ticket & enc_part2, it belongs to the caller */ ticket_reply = *(header_ticket); enc_tkt_reply = *(header_ticket->enc_part2); enc_tkt_reply.authorization_data = NULL; clear(enc_tkt_reply.flags, TKT_FLG_INVALID); } if (isflagset(request->kdc_options, KDC_OPT_RENEW)) { krb5_timestamp old_starttime; krb5_deltat old_life; assert(isflagset(c_flags, KRB5_KDB_FLAGS_S4U) == 0); /* BEWARE of allocation hanging off of ticket & enc_part2, it belongs to the caller */ ticket_reply = *(header_ticket); enc_tkt_reply = *(header_ticket->enc_part2); enc_tkt_reply.authorization_data = NULL; old_starttime = enc_tkt_reply.times.starttime ? enc_tkt_reply.times.starttime : enc_tkt_reply.times.authtime; old_life = ts_delta(enc_tkt_reply.times.endtime, old_starttime); enc_tkt_reply.times.starttime = kdc_time; enc_tkt_reply.times.endtime = ts_min(header_ticket->enc_part2->times.renew_till, ts_incr(kdc_time, old_life)); } else { /* not a renew request */ enc_tkt_reply.times.starttime = kdc_time; kdc_get_ticket_endtime(kdc_active_realm, enc_tkt_reply.times.starttime, header_enc_tkt->times.endtime, request->till, client, server, &enc_tkt_reply.times.endtime); } kdc_get_ticket_renewtime(kdc_active_realm, request, header_enc_tkt, client, server, &enc_tkt_reply); /* * Set authtime to be the same as header or evidence ticket's */ enc_tkt_reply.times.authtime = authtime; /* starttime is optional, and treated as authtime if not present. so we can nuke it if it matches */ if (enc_tkt_reply.times.starttime == enc_tkt_reply.times.authtime) enc_tkt_reply.times.starttime = 0; if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION)) { altcprinc = s4u_x509_user->user_id.user; } else if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) { altcprinc = subject_tkt->client; } else { altcprinc = NULL; } if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) { krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2; encrypting_key = *(t2enc->session); } else { /* * Find the server key */ if ((errcode = krb5_dbe_find_enctype(kdc_context, server, -1, /* ignore keytype */ -1, /* Ignore salttype */ 0, /* Get highest kvno */ &server_key))) { status = "FINDING_SERVER_KEY"; goto cleanup; } /* * Convert server.key into a real key * (it may be encrypted in the database) */ if ((errcode = krb5_dbe_decrypt_key_data(kdc_context, NULL, server_key, &encrypting_key, NULL))) { status = "DECRYPT_SERVER_KEY"; goto cleanup; } } if (isflagset(c_flags, KRB5_KDB_FLAG_CONSTRAINED_DELEGATION)) { /* * Don't allow authorization data to be disabled if constrained * delegation is requested. We don't want to deny the server * the ability to validate that delegation was used. */ clear(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED); } if (isflagset(server->attributes, KRB5_KDB_NO_AUTH_DATA_REQUIRED) == 0) { /* * If we are not doing protocol transition/constrained delegation * try to lookup the client principal so plugins can add additional * authorization information. * * Always validate authorization data for constrained delegation * because we must validate the KDC signatures. */ if (!isflagset(c_flags, KRB5_KDB_FLAGS_S4U)) { /* Generate authorization data so we can include it in ticket */ setflag(c_flags, KRB5_KDB_FLAG_INCLUDE_PAC); /* Map principals from foreign (possibly non-AD) realms */ setflag(c_flags, KRB5_KDB_FLAG_MAP_PRINCIPALS); assert(client == NULL); /* should not have been set already */ errcode = krb5_db_get_principal(kdc_context, subject_tkt->client, c_flags, &client); } } if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) && !isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) enc_tkt_reply.client = s4u_x509_user->user_id.user; else enc_tkt_reply.client = subject_tkt->client; enc_tkt_reply.session = &session_key; enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; enc_tkt_reply.transited.tr_contents = empty_string; /* equivalent of "" */ /* * Only add the realm of the presented tgt to the transited list if * it is different than the local realm (cross-realm) and it is different * than the realm of the client (since the realm of the client is already * implicitly part of the transited list and should not be explicitly * listed). */ /* realm compare is like strcmp, but knows how to deal with these args */ if (krb5_realm_compare(kdc_context, header_ticket->server, tgs_server) || krb5_realm_compare(kdc_context, header_ticket->server, enc_tkt_reply.client)) { /* tgt issued by local realm or issued by realm of client */ enc_tkt_reply.transited = header_enc_tkt->transited; } else { /* tgt issued by some other realm and not the realm of the client */ /* assemble new transited field into allocated storage */ if (header_enc_tkt->transited.tr_type != KRB5_DOMAIN_X500_COMPRESS) { status = "VALIDATE_TRANSIT_TYPE"; errcode = KRB5KDC_ERR_TRTYPE_NOSUPP; goto cleanup; } memset(&enc_tkt_reply.transited, 0, sizeof(enc_tkt_reply.transited)); enc_tkt_reply.transited.tr_type = KRB5_DOMAIN_X500_COMPRESS; if ((errcode = add_to_transited(&header_enc_tkt->transited.tr_contents, &enc_tkt_reply.transited.tr_contents, header_ticket->server, enc_tkt_reply.client, request->server))) { status = "ADD_TO_TRANSITED_LIST"; goto cleanup; } newtransited = 1; } if (isflagset(c_flags, KRB5_KDB_FLAG_CROSS_REALM)) { errcode = validate_transit_path(kdc_context, header_enc_tkt->client, server, header_server); if (errcode) { status = "NON_TRANSITIVE"; goto cleanup; } } if (!isflagset (request->kdc_options, KDC_OPT_DISABLE_TRANSITED_CHECK)) { errcode = kdc_check_transited_list (kdc_active_realm, &enc_tkt_reply.transited.tr_contents, krb5_princ_realm (kdc_context, header_enc_tkt->client), krb5_princ_realm (kdc_context, request->server)); if (errcode == 0) { setflag (enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED); } else { log_tgs_badtrans(kdc_context, cprinc, sprinc, &enc_tkt_reply.transited.tr_contents, errcode); } } else krb5_klog_syslog(LOG_INFO, _("not checking transit path")); if (kdc_active_realm->realm_reject_bad_transit && !isflagset(enc_tkt_reply.flags, TKT_FLG_TRANSIT_POLICY_CHECKED)) { errcode = KRB5KDC_ERR_POLICY; status = "BAD_TRANSIT"; au_state->violation = LOCAL_POLICY; goto cleanup; } errcode = handle_authdata(kdc_context, c_flags, client, server, header_server, local_tgt, subkey != NULL ? subkey : header_ticket->enc_part2->session, &encrypting_key, /* U2U or server key */ header_key, pkt, request, s4u_x509_user ? s4u_x509_user->user_id.user : NULL, subject_tkt, auth_indicators, &enc_tkt_reply); if (errcode) { krb5_klog_syslog(LOG_INFO, _("TGS_REQ : handle_authdata (%d)"), errcode); status = "HANDLE_AUTHDATA"; goto cleanup; } ticket_reply.enc_part2 = &enc_tkt_reply; /* * If we are doing user-to-user authentication, then make sure * that the client for the second ticket matches the request * server, and then encrypt the ticket using the session key of * the second ticket. */ if (isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) { /* * Make sure the client for the second ticket matches * requested server. */ krb5_enc_tkt_part *t2enc = request->second_ticket[st_idx]->enc_part2; krb5_principal client2 = t2enc->client; if (!krb5_principal_compare(kdc_context, request->server, client2)) { altcprinc = client2; errcode = KRB5KDC_ERR_SERVER_NOMATCH; status = "2ND_TKT_MISMATCH"; au_state->status = status; kau_u2u(kdc_context, FALSE, au_state); goto cleanup; } ticket_kvno = 0; ticket_reply.enc_part.enctype = t2enc->session->enctype; kau_u2u(kdc_context, TRUE, au_state); st_idx++; } else { ticket_kvno = server_key->key_data_kvno; } errcode = krb5_encrypt_tkt_part(kdc_context, &encrypting_key, &ticket_reply); if (!isflagset(request->kdc_options, KDC_OPT_ENC_TKT_IN_SKEY)) krb5_free_keyblock_contents(kdc_context, &encrypting_key); if (errcode) { status = "ENCRYPT_TICKET"; goto cleanup; } ticket_reply.enc_part.kvno = ticket_kvno; /* Start assembling the response */ au_state->stage = ENCR_REP; reply.msg_type = KRB5_TGS_REP; if (isflagset(c_flags, KRB5_KDB_FLAG_PROTOCOL_TRANSITION) && krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_S4U_X509_USER) != NULL) { errcode = kdc_make_s4u2self_rep(kdc_context, subkey, header_ticket->enc_part2->session, s4u_x509_user, &reply, &reply_encpart); if (errcode) { status = "MAKE_S4U2SELF_PADATA"; au_state->status = status; } kau_s4u2self(kdc_context, errcode ? FALSE : TRUE, au_state); if (errcode) goto cleanup; } reply.client = enc_tkt_reply.client; reply.enc_part.kvno = 0;/* We are using the session key */ reply.ticket = &ticket_reply; reply_encpart.session = &session_key; reply_encpart.nonce = request->nonce; /* copy the time fields */ reply_encpart.times = enc_tkt_reply.times; nolrentry.lr_type = KRB5_LRQ_NONE; nolrentry.value = 0; nolrentry.magic = 0; nolrarray[0] = &nolrentry; nolrarray[1] = 0; reply_encpart.last_req = nolrarray; /* not available for TGS reqs */ reply_encpart.key_exp = 0;/* ditto */ reply_encpart.flags = enc_tkt_reply.flags; reply_encpart.server = ticket_reply.server; /* use the session key in the ticket, unless there's a subsession key in the AP_REQ */ reply.enc_part.enctype = subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype; errcode = kdc_fast_response_handle_padata(state, request, &reply, subkey ? subkey->enctype : header_ticket->enc_part2->session->enctype); if (errcode !=0 ) { status = "MAKE_FAST_RESPONSE"; goto cleanup; } errcode =kdc_fast_handle_reply_key(state, subkey?subkey:header_ticket->enc_part2->session, &reply_key); if (errcode) { status = "MAKE_FAST_REPLY_KEY"; goto cleanup; } errcode = return_enc_padata(kdc_context, pkt, request, reply_key, server, &reply_encpart, is_referral && isflagset(s_flags, KRB5_KDB_FLAG_CANONICALIZE)); if (errcode) { status = "KDC_RETURN_ENC_PADATA"; goto cleanup; } errcode = kau_make_tkt_id(kdc_context, &ticket_reply, &au_state->tkt_out_id); if (errcode) { status = "GENERATE_TICKET_ID"; goto cleanup; } if (kdc_fast_hide_client(state)) reply.client = (krb5_principal)krb5_anonymous_principal(); errcode = krb5_encode_kdc_rep(kdc_context, KRB5_TGS_REP, &reply_encpart, subkey ? 1 : 0, reply_key, &reply, response); if (errcode) { status = "ENCODE_KDC_REP"; } else { status = "ISSUE"; } memset(ticket_reply.enc_part.ciphertext.data, 0, ticket_reply.enc_part.ciphertext.length); free(ticket_reply.enc_part.ciphertext.data); /* these parts are left on as a courtesy from krb5_encode_kdc_rep so we can use them in raw form if needed. But, we don't... */ memset(reply.enc_part.ciphertext.data, 0, reply.enc_part.ciphertext.length); free(reply.enc_part.ciphertext.data); cleanup: if (status == NULL) status = "UNKNOWN_REASON"; if (reply_key) krb5_free_keyblock(kdc_context, reply_key); if (errcode) emsg = krb5_get_error_message (kdc_context, errcode); au_state->status = status; if (!errcode) au_state->reply = &reply; kau_tgs_req(kdc_context, errcode ? FALSE : TRUE, au_state); kau_free_kdc_req(au_state); log_tgs_req(kdc_context, from, request, &reply, cprinc, sprinc, altcprinc, authtime, c_flags, status, errcode, emsg); if (errcode) { krb5_free_error_message (kdc_context, emsg); emsg = NULL; } if (errcode) { int got_err = 0; if (status == 0) { status = krb5_get_error_message (kdc_context, errcode); got_err = 1; } errcode -= ERROR_TABLE_BASE_krb5; if (errcode < 0 || errcode > KRB_ERR_MAX) errcode = KRB_ERR_GENERIC; retval = prepare_error_tgs(state, request, header_ticket, errcode, (server != NULL) ? server->princ : NULL, response, status, e_data); if (got_err) { krb5_free_error_message (kdc_context, status); status = 0; } } if (header_ticket != NULL) krb5_free_ticket(kdc_context, header_ticket); if (request != NULL) krb5_free_kdc_req(kdc_context, request); if (state) kdc_free_rstate(state); krb5_db_free_principal(kdc_context, server); krb5_db_free_principal(kdc_context, stkt_server); krb5_db_free_principal(kdc_context, header_server); krb5_db_free_principal(kdc_context, client); krb5_db_free_principal(kdc_context, local_tgt_storage); if (session_key.contents != NULL) krb5_free_keyblock_contents(kdc_context, &session_key); if (newtransited) free(enc_tkt_reply.transited.tr_contents.data); if (s4u_x509_user != NULL) krb5_free_pa_s4u_x509_user(kdc_context, s4u_x509_user); if (kdc_issued_auth_data != NULL) krb5_free_authdata(kdc_context, kdc_issued_auth_data); if (subkey != NULL) krb5_free_keyblock(kdc_context, subkey); if (header_key != NULL) krb5_free_keyblock(kdc_context, header_key); if (reply.padata) krb5_free_pa_data(kdc_context, reply.padata); if (reply_encpart.enc_padata) krb5_free_pa_data(kdc_context, reply_encpart.enc_padata); if (enc_tkt_reply.authorization_data != NULL) krb5_free_authdata(kdc_context, enc_tkt_reply.authorization_data); krb5_free_pa_data(kdc_context, e_data); k5_free_data_ptr_list(auth_indicators); return retval; } static krb5_error_code prepare_error_tgs (struct kdc_request_state *state, krb5_kdc_req *request, krb5_ticket *ticket, int error, krb5_principal canon_server, krb5_data **response, const char *status, krb5_pa_data **e_data) { krb5_error errpkt; krb5_error_code retval = 0; krb5_data *scratch, *e_data_asn1 = NULL, *fast_edata = NULL; kdc_realm_t *kdc_active_realm = state->realm_data; errpkt.magic = KV5M_ERROR; errpkt.ctime = request->nonce; errpkt.cusec = 0; if ((retval = krb5_us_timeofday(kdc_context, &errpkt.stime, &errpkt.susec))) return(retval); errpkt.error = error; errpkt.server = request->server; if (ticket && ticket->enc_part2) errpkt.client = ticket->enc_part2->client; else errpkt.client = NULL; errpkt.text.length = strlen(status); if (!(errpkt.text.data = strdup(status))) return ENOMEM; if (!(scratch = (krb5_data *)malloc(sizeof(*scratch)))) { free(errpkt.text.data); return ENOMEM; } if (e_data != NULL) { retval = encode_krb5_padata_sequence(e_data, &e_data_asn1); if (retval) { free(scratch); free(errpkt.text.data); return retval; } errpkt.e_data = *e_data_asn1; } else errpkt.e_data = empty_data(); retval = kdc_fast_handle_error(kdc_context, state, request, e_data, &errpkt, &fast_edata); if (retval) { free(scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); return retval; } if (fast_edata) errpkt.e_data = *fast_edata; if (kdc_fast_hide_client(state) && errpkt.client != NULL) errpkt.client = (krb5_principal)krb5_anonymous_principal(); retval = krb5_mk_error(kdc_context, &errpkt, scratch); free(errpkt.text.data); krb5_free_data(kdc_context, e_data_asn1); krb5_free_data(kdc_context, fast_edata); if (retval) free(scratch); else *response = scratch; return retval; } /* KDC options that require a second ticket */ #define STKT_OPTIONS (KDC_OPT_CNAME_IN_ADDL_TKT | KDC_OPT_ENC_TKT_IN_SKEY) /* * Get the key for the second ticket, if any, and decrypt it. */ static krb5_error_code decrypt_2ndtkt(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_flags flags, krb5_db_entry **server_out, const char **status) { krb5_error_code retval; krb5_db_entry *server = NULL; krb5_keyblock *key; krb5_kvno kvno; krb5_ticket *stkt; if (!(req->kdc_options & STKT_OPTIONS)) return 0; stkt = req->second_ticket[0]; retval = kdc_get_server_key(kdc_context, stkt, flags, TRUE, /* match_enctype */ &server, &key, &kvno); if (retval != 0) { *status = "2ND_TKT_SERVER"; goto cleanup; } retval = krb5_decrypt_tkt_part(kdc_context, key, req->second_ticket[0]); krb5_free_keyblock(kdc_context, key); if (retval != 0) { *status = "2ND_TKT_DECRYPT"; goto cleanup; } *server_out = server; server = NULL; cleanup: krb5_db_free_principal(kdc_context, server); return retval; } static krb5_error_code get_2ndtkt_enctype(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_enctype *useenctype, const char **status) { krb5_enctype etype; krb5_ticket *stkt = req->second_ticket[0]; int i; etype = stkt->enc_part2->session->enctype; if (!krb5_c_valid_enctype(etype)) { *status = "BAD_ETYPE_IN_2ND_TKT"; return KRB5KDC_ERR_ETYPE_NOSUPP; } for (i = 0; i < req->nktypes; i++) { if (req->ktype[i] == etype) { *useenctype = etype; break; } } return 0; } static krb5_error_code gen_session_key(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_db_entry *server, krb5_keyblock *skey, const char **status) { krb5_error_code retval; krb5_enctype useenctype = 0; /* * Some special care needs to be taken in the user-to-user * case, since we don't know what keytypes the application server * which is doing user-to-user authentication can support. We * know that it at least must be able to support the encryption * type of the session key in the TGT, since otherwise it won't be * able to decrypt the U2U ticket! So we use that in preference * to anything else. */ if (req->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) { retval = get_2ndtkt_enctype(kdc_active_realm, req, &useenctype, status); if (retval != 0) goto cleanup; } if (useenctype == 0) { useenctype = select_session_keytype(kdc_active_realm, server, req->nktypes, req->ktype); } if (useenctype == 0) { /* unsupported ktype */ *status = "BAD_ENCRYPTION_TYPE"; retval = KRB5KDC_ERR_ETYPE_NOSUPP; goto cleanup; } retval = krb5_c_make_random_key(kdc_context, useenctype, skey); if (retval != 0) { /* random key failed */ *status = "MAKE_RANDOM_KEY"; goto cleanup; } cleanup: return retval; } /* * The request seems to be for a ticket-granting service somewhere else, * but we don't have a ticket for the final TGS. Try to give the requestor * some intermediate realm. */ static krb5_error_code find_alternate_tgs(kdc_realm_t *kdc_active_realm, krb5_principal princ, krb5_db_entry **server_ptr, const char **status) { krb5_error_code retval; krb5_principal *plist = NULL, *pl2; krb5_data tmp; krb5_db_entry *server = NULL; *server_ptr = NULL; assert(is_cross_tgs_principal(princ)); if ((retval = krb5_walk_realm_tree(kdc_context, krb5_princ_realm(kdc_context, princ), krb5_princ_component(kdc_context, princ, 1), &plist, KRB5_REALM_BRANCH_CHAR))) { goto cleanup; } /* move to the end */ for (pl2 = plist; *pl2; pl2++); /* the first entry in this array is for krbtgt/local@local, so we ignore it */ while (--pl2 > plist) { tmp = *krb5_princ_realm(kdc_context, *pl2); krb5_princ_set_realm(kdc_context, *pl2, krb5_princ_realm(kdc_context, princ)); retval = db_get_svc_princ(kdc_context, *pl2, 0, &server, status); krb5_princ_set_realm(kdc_context, *pl2, &tmp); if (retval == KRB5_KDB_NOENTRY) continue; else if (retval) goto cleanup; log_tgs_alt_tgt(kdc_context, server->princ); *server_ptr = server; server = NULL; goto cleanup; } cleanup: if (retval == 0 && *server_ptr == NULL) retval = KRB5_KDB_NOENTRY; if (retval != 0) *status = "UNKNOWN_SERVER"; krb5_free_realm_tree(kdc_context, plist); krb5_db_free_principal(kdc_context, server); return retval; } /* Return true if item is an element of the space/comma-separated list. */ static krb5_boolean in_list(const char *list, const char *item) { const char *p; int len = strlen(item); if (list == NULL) return FALSE; for (p = strstr(list, item); p != NULL; p = strstr(p + 1, item)) { if ((p == list || isspace((unsigned char)p[-1]) || p[-1] == ',') && (p[len] == '\0' || isspace((unsigned char)p[len]) || p[len] == ',')) return TRUE; } return FALSE; } /* * Check whether the request satisfies the conditions for generating a referral * TGT. The caller checks whether the hostname component looks like a FQDN. */ static krb5_boolean is_referral_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request) { krb5_boolean ret = FALSE; char *stype = NULL; char *hostbased = kdc_active_realm->realm_hostbased; char *no_referral = kdc_active_realm->realm_no_referral; if (!(request->kdc_options & KDC_OPT_CANONICALIZE)) return FALSE; if (request->kdc_options & KDC_OPT_ENC_TKT_IN_SKEY) return FALSE; if (krb5_princ_size(kdc_context, request->server) != 2) return FALSE; stype = data2string(krb5_princ_component(kdc_context, request->server, 0)); if (stype == NULL) return FALSE; switch (krb5_princ_type(kdc_context, request->server)) { case KRB5_NT_UNKNOWN: /* Allow referrals for NT-UNKNOWN principals, if configured. */ if (!in_list(hostbased, stype) && !in_list(hostbased, "*")) goto cleanup; /* FALLTHROUGH */ case KRB5_NT_SRV_HST: case KRB5_NT_SRV_INST: /* Deny referrals for specific service types, if configured. */ if (in_list(no_referral, stype) || in_list(no_referral, "*")) goto cleanup; ret = TRUE; break; default: goto cleanup; } cleanup: free(stype); return ret; } /* * Find a remote realm TGS principal for an unknown host-based service * principal. */ static krb5_int32 find_referral_tgs(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_principal *krbtgt_princ) { krb5_error_code retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; char **realms = NULL, *hostname = NULL; krb5_data srealm = request->server->realm; if (!is_referral_req(kdc_active_realm, request)) goto cleanup; hostname = data2string(krb5_princ_component(kdc_context, request->server, 1)); if (hostname == NULL) { retval = ENOMEM; goto cleanup; } /* If the hostname doesn't contain a '.', it's not a FQDN. */ if (strchr(hostname, '.') == NULL) goto cleanup; retval = krb5_get_host_realm(kdc_context, hostname, &realms); if (retval) { /* no match found */ kdc_err(kdc_context, retval, "unable to find realm of host"); goto cleanup; } /* Don't return a referral to the empty realm or the service realm. */ if (realms == NULL || realms[0] == NULL || *realms[0] == '\0' || data_eq_string(srealm, realms[0])) { retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; goto cleanup; } retval = krb5_build_principal(kdc_context, krbtgt_princ, srealm.length, srealm.data, "krbtgt", realms[0], (char *)0); cleanup: krb5_free_host_realm(kdc_context, realms); free(hostname); return retval; } static krb5_error_code db_get_svc_princ(krb5_context ctx, krb5_principal princ, krb5_flags flags, krb5_db_entry **server, const char **status) { krb5_error_code ret; ret = krb5_db_get_principal(ctx, princ, flags, server); if (ret == KRB5_KDB_CANTLOCK_DB) ret = KRB5KDC_ERR_SVC_UNAVAILABLE; if (ret != 0) { *status = "LOOKING_UP_SERVER"; } return ret; } static krb5_error_code search_sprinc(kdc_realm_t *kdc_active_realm, krb5_kdc_req *req, krb5_flags flags, krb5_db_entry **server, const char **status) { krb5_error_code ret; krb5_principal princ = req->server; krb5_principal reftgs = NULL; krb5_boolean allow_referral; /* Do not allow referrals for u2u or ticket modification requests, because * the server is supposed to match an already-issued ticket. */ allow_referral = !(req->kdc_options & NO_REFERRAL_OPTION); if (!allow_referral) flags &= ~KRB5_KDB_FLAG_CANONICALIZE; ret = db_get_svc_princ(kdc_context, princ, flags, server, status); if (ret == 0 || ret != KRB5_KDB_NOENTRY || !allow_referral) goto cleanup; if (!is_cross_tgs_principal(req->server)) { ret = find_referral_tgs(kdc_active_realm, req, &reftgs); if (ret != 0) goto cleanup; ret = db_get_svc_princ(kdc_context, reftgs, flags, server, status); if (ret == 0 || ret != KRB5_KDB_NOENTRY) goto cleanup; princ = reftgs; } ret = find_alternate_tgs(kdc_active_realm, princ, server, status); cleanup: if (ret != 0 && ret != KRB5KDC_ERR_SVC_UNAVAILABLE) { ret = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; if (*status == NULL) *status = "LOOKING_UP_SERVER"; } krb5_free_principal(kdc_context, reftgs); return ret; }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2571_1
crossvul-cpp_data_bad_1771_3
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #include <unistd.h> #include <errno.h> #include <assert.h> #include <time.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/ioctl.h> inline static int lldpd_af_to_lldp_proto(int af) { switch (af) { case LLDPD_AF_IPV4: return LLDP_MGMT_ADDR_IP4; case LLDPD_AF_IPV6: return LLDP_MGMT_ADDR_IP6; default: return LLDP_MGMT_ADDR_NONE; } } inline static int lldpd_af_from_lldp_proto(int proto) { switch (proto) { case LLDP_MGMT_ADDR_IP4: return LLDPD_AF_IPV4; case LLDP_MGMT_ADDR_IP6: return LLDPD_AF_IPV6; default: return LLDPD_AF_UNSPEC; } } static int _lldp_send(struct lldpd *global, struct lldpd_hardware *hardware, u_int8_t c_id_subtype, char *c_id, int c_id_len, u_int8_t p_id_subtype, char *p_id, int p_id_len, int shutdown) { struct lldpd_port *port; struct lldpd_chassis *chassis; struct lldpd_frame *frame; int length; u_int8_t *packet, *pos, *tlv; struct lldpd_mgmt *mgmt; int proto; u_int8_t mcastaddr[] = LLDP_MULTICAST_ADDR; #ifdef ENABLE_DOT1 const u_int8_t dot1[] = LLDP_TLV_ORG_DOT1; struct lldpd_vlan *vlan; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi; #endif #ifdef ENABLE_DOT3 const u_int8_t dot3[] = LLDP_TLV_ORG_DOT3; #endif #ifdef ENABLE_LLDPMED int i; const u_int8_t med[] = LLDP_TLV_ORG_MED; #endif #ifdef ENABLE_CUSTOM struct lldpd_custom *custom; #endif port = &hardware->h_lport; chassis = port->p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* LLDP multicast address */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC address */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* LLDP frame */ POKE_UINT16(ETHERTYPE_LLDP))) goto toobig; /* Chassis ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_CHASSIS_ID) && POKE_UINT8(c_id_subtype) && POKE_BYTES(c_id, c_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Port ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_ID) && POKE_UINT8(p_id_subtype) && POKE_BYTES(p_id, p_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Time to live */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_TTL) && POKE_UINT16(shutdown?0:chassis->c_ttl) && POKE_END_LLDP_TLV)) goto toobig; if (shutdown) goto end; /* System name */ if (chassis->c_name && *chassis->c_name != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_NAME) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* System description (skip it if empty) */ if (chassis->c_descr && *chassis->c_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_DESCR) && POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)) && POKE_END_LLDP_TLV)) goto toobig; } /* System capabilities */ if (global->g_config.c_cap_advertise && chassis->c_cap_available) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_CAP) && POKE_UINT16(chassis->c_cap_available) && POKE_UINT16(chassis->c_cap_enabled) && POKE_END_LLDP_TLV)) goto toobig; } /* Management addresses */ TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { proto = lldpd_af_to_lldp_proto(mgmt->m_family); assert(proto != LLDP_MGMT_ADDR_NONE); if (!( POKE_START_LLDP_TLV(LLDP_TLV_MGMT_ADDR) && /* Size of the address, including its type */ POKE_UINT8(mgmt->m_addrsize + 1) && POKE_UINT8(proto) && POKE_BYTES(&mgmt->m_addr, mgmt->m_addrsize))) goto toobig; /* Interface port type, OID */ if (mgmt->m_iface == 0) { if (!( /* We don't know the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_UNKNOWN) && POKE_UINT32(0))) goto toobig; } else { if (!( /* We have the index of the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_IFINDEX) && POKE_UINT32(mgmt->m_iface))) goto toobig; } if (!( /* We don't provide an OID for management */ POKE_UINT8(0) && POKE_END_LLDP_TLV)) goto toobig; } /* Port description */ if (port->p_descr && *port->p_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_DESCR) && POKE_BYTES(port->p_descr, strlen(port->p_descr)) && POKE_END_LLDP_TLV)) goto toobig; } #ifdef ENABLE_DOT1 /* Port VLAN ID */ if(port->p_pvid != 0) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PVID) && POKE_UINT16(port->p_pvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* Port and Protocol VLAN IDs */ TAILQ_FOREACH(ppvid, &port->p_ppvids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PPVID) && POKE_UINT8(ppvid->p_cap_status) && POKE_UINT16(ppvid->p_ppvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* VLANs */ TAILQ_FOREACH(vlan, &port->p_vlans, v_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_VLANNAME) && POKE_UINT16(vlan->v_vid) && POKE_UINT8(strlen(vlan->v_name)) && POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* Protocol Identities */ TAILQ_FOREACH(pi, &port->p_pids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PI) && POKE_UINT8(pi->p_pi_len) && POKE_BYTES(pi->p_pi, pi->p_pi_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_DOT3 /* Aggregation status */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_LA) && /* Bit 0 = capability ; Bit 1 = status */ POKE_UINT8((port->p_aggregid) ? 3:1) && POKE_UINT32(port->p_aggregid) && POKE_END_LLDP_TLV)) goto toobig; /* MAC/PHY */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MAC) && POKE_UINT8(port->p_macphy.autoneg_support | (port->p_macphy.autoneg_enabled << 1)) && POKE_UINT16(port->p_macphy.autoneg_advertised) && POKE_UINT16(port->p_macphy.mau_type) && POKE_END_LLDP_TLV)) goto toobig; /* MFS */ if (port->p_mfs) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MFS) && POKE_UINT16(port->p_mfs) && POKE_END_LLDP_TLV)) goto toobig; } /* Power */ if (port->p_power.devicetype) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_POWER) && POKE_UINT8(( (((2 - port->p_power.devicetype) %(1<< 1))<<0) | (( port->p_power.supported %(1<< 1))<<1) | (( port->p_power.enabled %(1<< 1))<<2) | (( port->p_power.paircontrol %(1<< 1))<<3))) && POKE_UINT8(port->p_power.pairs) && POKE_UINT8(port->p_power.class))) goto toobig; /* 802.3at */ if (port->p_power.powertype != LLDP_DOT3_POWER_8023AT_OFF) { if (!( POKE_UINT8(( (((port->p_power.powertype == LLDP_DOT3_POWER_8023AT_TYPE1)?1:0) << 7) | (((port->p_power.devicetype == LLDP_DOT3_POWER_PSE)?0:1) << 6) | ((port->p_power.source %(1<< 2))<<4) | ((port->p_power.priority %(1<< 2))<<0))) && POKE_UINT16(port->p_power.requested) && POKE_UINT16(port->p_power.allocated))) goto toobig; } if (!(POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_LLDPMED if (port->p_med_cap_enabled) { /* LLDP-MED cap */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_CAP) && POKE_UINT16(chassis->c_med_cap_available) && POKE_UINT8(chassis->c_med_type) && POKE_END_LLDP_TLV)) goto toobig; /* LLDP-MED inventory */ #define LLDP_INVENTORY(value, subtype) \ if (value) { \ if (!( \ POKE_START_LLDP_TLV(LLDP_TLV_ORG) && \ POKE_BYTES(med, sizeof(med)) && \ POKE_UINT8(subtype) && \ POKE_BYTES(value, \ (strlen(value)>32)?32:strlen(value)) && \ POKE_END_LLDP_TLV)) \ goto toobig; \ } if (port->p_med_cap_enabled & LLDP_MED_CAP_IV) { LLDP_INVENTORY(chassis->c_med_hw, LLDP_TLV_MED_IV_HW); LLDP_INVENTORY(chassis->c_med_fw, LLDP_TLV_MED_IV_FW); LLDP_INVENTORY(chassis->c_med_sw, LLDP_TLV_MED_IV_SW); LLDP_INVENTORY(chassis->c_med_sn, LLDP_TLV_MED_IV_SN); LLDP_INVENTORY(chassis->c_med_manuf, LLDP_TLV_MED_IV_MANUF); LLDP_INVENTORY(chassis->c_med_model, LLDP_TLV_MED_IV_MODEL); LLDP_INVENTORY(chassis->c_med_asset, LLDP_TLV_MED_IV_ASSET); } /* LLDP-MED location */ for (i = 0; i < LLDP_MED_LOCFORMAT_LAST; i++) { if (port->p_med_location[i].format == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_LOCATION) && POKE_UINT8(port->p_med_location[i].format) && POKE_BYTES(port->p_med_location[i].data, port->p_med_location[i].data_len) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED network policy */ for (i = 0; i < LLDP_MED_APPTYPE_LAST; i++) { if (port->p_med_policy[i].type == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_POLICY) && POKE_UINT32(( ((port->p_med_policy[i].type %(1<< 8))<<24) | ((port->p_med_policy[i].unknown %(1<< 1))<<23) | ((port->p_med_policy[i].tagged %(1<< 1))<<22) | /*((0 %(1<< 1))<<21) |*/ ((port->p_med_policy[i].vid %(1<<12))<< 9) | ((port->p_med_policy[i].priority %(1<< 3))<< 6) | ((port->p_med_policy[i].dscp %(1<< 6))<< 0) )) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED POE-MDI */ if ((port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PSE) || (port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PD)) { int devicetype = 0, source = 0; if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_MDI))) goto toobig; switch (port->p_med_power.devicetype) { case LLDP_MED_POW_TYPE_PSE: devicetype = 0; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PRIMARY: source = 1; break; case LLDP_MED_POW_SOURCE_BACKUP: source = 2; break; case LLDP_MED_POW_SOURCE_RESERVED: source = 3; break; default: source = 0; break; } break; case LLDP_MED_POW_TYPE_PD: devicetype = 1; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PSE: source = 1; break; case LLDP_MED_POW_SOURCE_LOCAL: source = 2; break; case LLDP_MED_POW_SOURCE_BOTH: source = 3; break; default: source = 0; break; } break; } if (!( POKE_UINT8(( ((devicetype %(1<< 2))<<6) | ((source %(1<< 2))<<4) | ((port->p_med_power.priority %(1<< 4))<<0) )) && POKE_UINT16(port->p_med_power.val) && POKE_END_LLDP_TLV)) goto toobig; } } #endif #ifdef ENABLE_CUSTOM TAILQ_FOREACH(custom, &port->p_custom_list, next) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(custom->oui, sizeof(custom->oui)) && POKE_UINT8(custom->subtype) && POKE_BYTES(custom->oui_info, custom->oui_info_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif end: /* END */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_END) && POKE_END_LLDP_TLV)) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, pos - packet) == -1) { log_warn("lldp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; /* We assume that LLDP frame is the reference */ if (!shutdown && (frame = (struct lldpd_frame*)malloc( sizeof(int) + pos - packet)) != NULL) { frame->size = pos - packet; memcpy(&frame->frame, packet, frame->size); if ((hardware->h_lport.p_lastframe == NULL) || (hardware->h_lport.p_lastframe->size != frame->size) || (memcmp(hardware->h_lport.p_lastframe->frame, frame->frame, frame->size) != 0)) { free(hardware->h_lport.p_lastframe); hardware->h_lport.p_lastframe = frame; hardware->h_lport.p_lastchange = time(NULL); } else free(frame); } free(packet); return 0; toobig: free(packet); return E2BIG; } /* Send a shutdown LLDPDU. */ int lldp_send_shutdown(struct lldpd *global, struct lldpd_hardware *hardware) { if (hardware->h_lchassis_previous_id == NULL || hardware->h_lport_previous_id == NULL) return 0; return _lldp_send(global, hardware, hardware->h_lchassis_previous_id_subtype, hardware->h_lchassis_previous_id, hardware->h_lchassis_previous_id_len, hardware->h_lport_previous_id_subtype, hardware->h_lport_previous_id, hardware->h_lport_previous_id_len, 1); } int lldp_send(struct lldpd *global, struct lldpd_hardware *hardware) { struct lldpd_port *port = &hardware->h_lport; struct lldpd_chassis *chassis = port->p_chassis; int ret; /* Check if we have a change. */ if (hardware->h_lchassis_previous_id != NULL && hardware->h_lport_previous_id != NULL && (hardware->h_lchassis_previous_id_subtype != chassis->c_id_subtype || hardware->h_lchassis_previous_id_len != chassis->c_id_len || hardware->h_lport_previous_id_subtype != port->p_id_subtype || hardware->h_lport_previous_id_len != port->p_id_len || memcmp(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len) || memcmp(hardware->h_lport_previous_id, port->p_id, port->p_id_len))) { log_info("lldp", "MSAP has changed for port %s, sending a shutdown LLDPDU", hardware->h_ifname); if ((ret = lldp_send_shutdown(global, hardware)) != 0) return ret; } log_debug("lldp", "send LLDP PDU to %s", hardware->h_ifname); if ((ret = _lldp_send(global, hardware, chassis->c_id_subtype, chassis->c_id, chassis->c_id_len, port->p_id_subtype, port->p_id, port->p_id_len, 0)) != 0) return ret; /* Record current chassis and port ID */ free(hardware->h_lchassis_previous_id); hardware->h_lchassis_previous_id_subtype = chassis->c_id_subtype; hardware->h_lchassis_previous_id_len = chassis->c_id_len; if ((hardware->h_lchassis_previous_id = malloc(chassis->c_id_len)) != NULL) memcpy(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len); free(hardware->h_lport_previous_id); hardware->h_lport_previous_id_subtype = port->p_id_subtype; hardware->h_lport_previous_id_len = port->p_id_len; if ((hardware->h_lport_previous_id = malloc(port->p_id_len)) != NULL) memcpy(hardware->h_lport_previous_id, port->p_id, port->p_id_len); return 0; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_size < (x)) { \ log_warnx("lldp", name " TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int lldp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; const char lldpaddr[] = LLDP_MULTICAST_ADDR; const char dot1[] = LLDP_TLV_ORG_DOT1; const char dot3[] = LLDP_TLV_ORG_DOT3; const char med[] = LLDP_TLV_ORG_MED; const char dcbx[] = LLDP_TLV_ORG_DCBX; unsigned char orgid[3]; int length, gotend = 0, ttl_received = 0; int tlv_size, tlv_type, tlv_subtype; u_int8_t *pos, *tlv; char *b; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan = NULL; int vlan_len; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi = NULL; #endif struct lldpd_mgmt *mgmt; int af; u_int8_t addr_str_length, addr_str_buffer[32]; u_int8_t addr_family, addr_length, *addr_ptr, iface_subtype; u_int32_t iface_number, iface; #ifdef ENABLE_CUSTOM struct lldpd_custom *custom = NULL; #endif log_debug("lldp", "receive LLDP PDU on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("lldp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("lldp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); TAILQ_INIT(&port->p_ppvids); TAILQ_INIT(&port->p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&port->p_custom_list); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t)) { log_warnx("lldp", "too short frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(lldpaddr, ETHER_ADDR_LEN) != 0) { log_info("lldp", "frame not targeted at LLDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); /* Skip source address */ if (PEEK_UINT16 != ETHERTYPE_LLDP) { log_info("lldp", "non LLDP frame received on %s", hardware->h_ifname); goto malformed; } while (length && (!gotend)) { if (length < 2) { log_warnx("lldp", "tlv header too short received on %s", hardware->h_ifname); goto malformed; } tlv_size = PEEK_UINT16; tlv_type = tlv_size >> 9; tlv_size = tlv_size & 0x1ff; (void)PEEK_SAVE(tlv); if (length < tlv_size) { log_warnx("lldp", "frame too short for tlv received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case LLDP_TLV_END: if (tlv_size != 0) { log_warnx("lldp", "lldp end received with size not null on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("lldp", "extra data after lldp end on %s", hardware->h_ifname); gotend = 1; break; case LLDP_TLV_CHASSIS_ID: case LLDP_TLV_PORT_ID: CHECK_TLV_SIZE(2, "Port Id"); tlv_subtype = PEEK_UINT8; if ((tlv_subtype == 0) || (tlv_subtype > 7)) { log_warnx("lldp", "unknown subtype for tlv id received on %s", hardware->h_ifname); goto malformed; } if ((b = (char *)calloc(1, tlv_size - 1)) == NULL) { log_warn("lldp", "unable to allocate memory for id tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 1); if (tlv_type == LLDP_TLV_PORT_ID) { port->p_id_subtype = tlv_subtype; port->p_id = b; port->p_id_len = tlv_size - 1; } else { chassis->c_id_subtype = tlv_subtype; chassis->c_id = b; chassis->c_id_len = tlv_size - 1; } break; case LLDP_TLV_TTL: CHECK_TLV_SIZE(2, "TTL"); chassis->c_ttl = PEEK_UINT16; ttl_received = 1; break; case LLDP_TLV_PORT_DESCR: case LLDP_TLV_SYSTEM_NAME: case LLDP_TLV_SYSTEM_DESCR: if (tlv_size < 1) { log_debug("lldp", "empty tlv received on %s", hardware->h_ifname); break; } if ((b = (char *)calloc(1, tlv_size + 1)) == NULL) { log_warn("lldp", "unable to allocate memory for string tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size); if (tlv_type == LLDP_TLV_PORT_DESCR) port->p_descr = b; else if (tlv_type == LLDP_TLV_SYSTEM_NAME) chassis->c_name = b; else chassis->c_descr = b; break; case LLDP_TLV_SYSTEM_CAP: CHECK_TLV_SIZE(4, "System capabilities"); chassis->c_cap_available = PEEK_UINT16; chassis->c_cap_enabled = PEEK_UINT16; break; case LLDP_TLV_MGMT_ADDR: CHECK_TLV_SIZE(1, "Management address"); addr_str_length = PEEK_UINT8; if (addr_str_length > sizeof(addr_str_buffer)) { log_warnx("lldp", "too large management address on %s", hardware->h_ifname); goto malformed; } CHECK_TLV_SIZE(1 + addr_str_length, "Management address"); PEEK_BYTES(addr_str_buffer, addr_str_length); addr_length = addr_str_length - 1; addr_family = addr_str_buffer[0]; addr_ptr = &addr_str_buffer[1]; CHECK_TLV_SIZE(1 + addr_str_length + 5, "Management address"); iface_subtype = PEEK_UINT8; iface_number = PEEK_UINT32; af = lldpd_af_from_lldp_proto(addr_family); if (af == LLDPD_AF_UNSPEC) break; if (iface_subtype == LLDP_MGMT_IFACE_IFINDEX) iface = iface_number; else iface = 0; mgmt = lldpd_alloc_mgmt(af, addr_ptr, addr_length, iface); if (mgmt == NULL) { assert(errno == ENOMEM); log_warn("lldp", "unable to allocate memory " "for management address"); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); break; case LLDP_TLV_ORG: CHECK_TLV_SIZE(1 + (int)sizeof(orgid), "Organisational"); PEEK_BYTES(orgid, sizeof(orgid)); tlv_subtype = PEEK_UINT8; if (memcmp(dot1, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT1 hardware->h_rx_unrecognized_cnt++; #else /* Dot1 */ switch (tlv_subtype) { case LLDP_TLV_DOT1_VLANNAME: CHECK_TLV_SIZE(7, "VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("lldp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = PEEK_UINT16; vlan_len = PEEK_UINT8; CHECK_TLV_SIZE(7 + vlan_len, "VLAN"); if ((vlan->v_name = (char *)calloc(1, vlan_len + 1)) == NULL) { log_warn("lldp", "unable to alloc vlan name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(vlan->v_name, vlan_len); TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); vlan = NULL; break; case LLDP_TLV_DOT1_PVID: CHECK_TLV_SIZE(6, "PVID"); port->p_pvid = PEEK_UINT16; break; case LLDP_TLV_DOT1_PPVID: CHECK_TLV_SIZE(7, "PPVID"); /* validation needed */ /* PPVID has to be unique if more than one PPVID TLVs are received - discard if duplicate */ /* if support bit is not set and enabled bit is set - PPVID TLV is considered error and discarded */ /* if PPVID > 4096 - bad and discard */ if ((ppvid = (struct lldpd_ppvid *)calloc(1, sizeof(struct lldpd_ppvid))) == NULL) { log_warn("lldp", "unable to alloc ppvid " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } ppvid->p_cap_status = PEEK_UINT8; ppvid->p_ppvid = PEEK_UINT16; TAILQ_INSERT_TAIL(&port->p_ppvids, ppvid, p_entries); break; case LLDP_TLV_DOT1_PI: /* validation needed */ /* PI has to be unique if more than one PI TLVs are received - discard if duplicate ?? */ CHECK_TLV_SIZE(5, "PI"); if ((pi = (struct lldpd_pi *)calloc(1, sizeof(struct lldpd_pi))) == NULL) { log_warn("lldp", "unable to alloc PI " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } pi->p_pi_len = PEEK_UINT8; CHECK_TLV_SIZE(5 + pi->p_pi_len, "PI"); if ((pi->p_pi = (char *)calloc(1, pi->p_pi_len)) == NULL) { log_warn("lldp", "unable to alloc pid name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(pi->p_pi, pi->p_pi_len); TAILQ_INSERT_TAIL(&port->p_pids, pi, p_entries); pi = NULL; break; default: /* Unknown Dot1 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(dot3, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT3 hardware->h_rx_unrecognized_cnt++; #else /* Dot3 */ switch (tlv_subtype) { case LLDP_TLV_DOT3_MAC: CHECK_TLV_SIZE(9, "MAC/PHY"); port->p_macphy.autoneg_support = PEEK_UINT8; port->p_macphy.autoneg_enabled = (port->p_macphy.autoneg_support & 0x2) >> 1; port->p_macphy.autoneg_support = port->p_macphy.autoneg_support & 0x1; port->p_macphy.autoneg_advertised = PEEK_UINT16; port->p_macphy.mau_type = PEEK_UINT16; break; case LLDP_TLV_DOT3_LA: CHECK_TLV_SIZE(9, "Link aggregation"); PEEK_DISCARD_UINT8; port->p_aggregid = PEEK_UINT32; break; case LLDP_TLV_DOT3_MFS: CHECK_TLV_SIZE(6, "MFS"); port->p_mfs = PEEK_UINT16; break; case LLDP_TLV_DOT3_POWER: CHECK_TLV_SIZE(7, "Power"); port->p_power.devicetype = PEEK_UINT8; port->p_power.supported = (port->p_power.devicetype & 0x2) >> 1; port->p_power.enabled = (port->p_power.devicetype & 0x4) >> 2; port->p_power.paircontrol = (port->p_power.devicetype & 0x8) >> 3; port->p_power.devicetype = (port->p_power.devicetype & 0x1)? LLDP_DOT3_POWER_PSE:LLDP_DOT3_POWER_PD; port->p_power.pairs = PEEK_UINT8; port->p_power.class = PEEK_UINT8; /* 802.3at? */ if (tlv_size >= 12) { port->p_power.powertype = PEEK_UINT8; port->p_power.source = (port->p_power.powertype & (1<<5 | 1<<4)) >> 4; port->p_power.priority = (port->p_power.powertype & (1<<1 | 1<<0)); port->p_power.powertype = (port->p_power.powertype & (1<<7))? LLDP_DOT3_POWER_8023AT_TYPE1: LLDP_DOT3_POWER_8023AT_TYPE2; port->p_power.requested = PEEK_UINT16; port->p_power.allocated = PEEK_UINT16; } else port->p_power.powertype = LLDP_DOT3_POWER_8023AT_OFF; break; default: /* Unknown Dot3 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(med, orgid, sizeof(orgid)) == 0) { /* LLDP-MED */ #ifndef ENABLE_LLDPMED hardware->h_rx_unrecognized_cnt++; #else u_int32_t policy; unsigned loctype; unsigned power; switch (tlv_subtype) { case LLDP_TLV_MED_CAP: CHECK_TLV_SIZE(7, "LLDP-MED capabilities"); chassis->c_med_cap_available = PEEK_UINT16; chassis->c_med_type = PEEK_UINT8; port->p_med_cap_enabled |= LLDP_MED_CAP_CAP; break; case LLDP_TLV_MED_POLICY: CHECK_TLV_SIZE(8, "LLDP-MED policy"); policy = PEEK_UINT32; if (((policy >> 24) < 1) || ((policy >> 24) > LLDP_MED_APPTYPE_LAST)) { log_info("lldp", "unknown policy field %d " "received on %s", policy, hardware->h_ifname); break; } port->p_med_policy[(policy >> 24) - 1].type = (policy >> 24); port->p_med_policy[(policy >> 24) - 1].unknown = ((policy & 0x800000) != 0); port->p_med_policy[(policy >> 24) - 1].tagged = ((policy & 0x400000) != 0); port->p_med_policy[(policy >> 24) - 1].vid = (policy & 0x001FFE00) >> 9; port->p_med_policy[(policy >> 24) - 1].priority = (policy & 0x1C0) >> 6; port->p_med_policy[(policy >> 24) - 1].dscp = policy & 0x3F; port->p_med_cap_enabled |= LLDP_MED_CAP_POLICY; break; case LLDP_TLV_MED_LOCATION: CHECK_TLV_SIZE(5, "LLDP-MED Location"); loctype = PEEK_UINT8; if ((loctype < 1) || (loctype > LLDP_MED_LOCFORMAT_LAST)) { log_info("lldp", "unknown location type " "received on %s", hardware->h_ifname); break; } if ((port->p_med_location[loctype - 1].data = (char*)malloc(tlv_size - 5)) == NULL) { log_warn("lldp", "unable to allocate memory " "for LLDP-MED location for " "frame received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(port->p_med_location[loctype - 1].data, tlv_size - 5); port->p_med_location[loctype - 1].data_len = tlv_size - 5; port->p_med_location[loctype - 1].format = loctype; port->p_med_cap_enabled |= LLDP_MED_CAP_LOCATION; break; case LLDP_TLV_MED_MDI: CHECK_TLV_SIZE(7, "LLDP-MED PoE-MDI"); power = PEEK_UINT8; switch (power & 0xC0) { case 0x0: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PSE; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PSE; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PRIMARY; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_BACKUP; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_RESERVED; } break; case 0x40: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PD; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PD; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PSE; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_LOCAL; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_BOTH; } break; default: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_RESERVED; } if ((power & 0x0F) > LLDP_MED_POW_PRIO_LOW) port->p_med_power.priority = LLDP_MED_POW_PRIO_UNKNOWN; else port->p_med_power.priority = power & 0x0F; port->p_med_power.val = PEEK_UINT16; break; case LLDP_TLV_MED_IV_HW: case LLDP_TLV_MED_IV_SW: case LLDP_TLV_MED_IV_FW: case LLDP_TLV_MED_IV_SN: case LLDP_TLV_MED_IV_MANUF: case LLDP_TLV_MED_IV_MODEL: case LLDP_TLV_MED_IV_ASSET: if (tlv_size <= 4) b = NULL; else { if ((b = (char*)malloc(tlv_size - 3)) == NULL) { log_warn("lldp", "unable to allocate " "memory for LLDP-MED " "inventory for frame " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 4); b[tlv_size - 4] = '\0'; } switch (tlv_subtype) { case LLDP_TLV_MED_IV_HW: chassis->c_med_hw = b; break; case LLDP_TLV_MED_IV_FW: chassis->c_med_fw = b; break; case LLDP_TLV_MED_IV_SW: chassis->c_med_sw = b; break; case LLDP_TLV_MED_IV_SN: chassis->c_med_sn = b; break; case LLDP_TLV_MED_IV_MANUF: chassis->c_med_manuf = b; break; case LLDP_TLV_MED_IV_MODEL: chassis->c_med_model = b; break; case LLDP_TLV_MED_IV_ASSET: chassis->c_med_asset = b; break; } port->p_med_cap_enabled |= LLDP_MED_CAP_IV; break; default: /* Unknown LLDP MED, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif /* ENABLE_LLDPMED */ } else if (memcmp(dcbx, orgid, sizeof(orgid)) == 0) { log_debug("lldp", "unsupported DCBX tlv received on %s - ignore", hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } else { log_debug("lldp", "unknown org tlv [%02x:%02x:%02x] received on %s", orgid[0], orgid[1], orgid[2], hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; #ifdef ENABLE_CUSTOM custom = (struct lldpd_custom*)calloc(1, sizeof(struct lldpd_custom)); if (!custom) { log_warn("lldp", "unable to allocate memory for custom TLV"); goto malformed; } custom->oui_info_len = tlv_size > 4 ? tlv_size - 4 : 0; memcpy(custom->oui, orgid, sizeof(custom->oui)); custom->subtype = tlv_subtype; if (custom->oui_info_len > 0) { custom->oui_info = malloc(custom->oui_info_len); if (!custom->oui_info) { log_warn("lldp", "unable to allocate memory for custom TLV data"); goto malformed; } PEEK_BYTES(custom->oui_info, custom->oui_info_len); } TAILQ_INSERT_TAIL(&port->p_custom_list, custom, next); custom = NULL; #endif } break; default: log_warnx("lldp", "unknown tlv (%d) received on %s", tlv_type, hardware->h_ifname); goto malformed; } if (pos > tlv + tlv_size) { log_warnx("lldp", "BUG: already past TLV!"); goto malformed; } PEEK_DISCARD(tlv + tlv_size - pos); } /* Some random check */ if ((chassis->c_id == NULL) || (port->p_id == NULL) || (!ttl_received) || (gotend == 0)) { log_warnx("lldp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_CUSTOM free(custom); #endif #ifdef ENABLE_DOT1 free(vlan); free(pi); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_1771_3
crossvul-cpp_data_good_2602_0
/* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * Derived from arch/arm/kvm/coproc.c: * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Authors: Rusty Russell <rusty@rustcorp.com.au> * Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/bsearch.h> #include <linux/kvm_host.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/debug-monitors.h> #include <asm/esr.h> #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_coproc.h> #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> #include <asm/kvm_mmu.h> #include <asm/perf_event.h> #include <asm/sysreg.h> #include <trace/events/kvm.h> #include "sys_regs.h" #include "trace.h" /* * All of this file is extremly similar to the ARM coproc.c, but the * types are different. My gut feeling is that it should be pretty * easy to merge, but that would be an ABI breakage -- again. VFP * would also need to be abstracted. * * For AArch32, we only take care of what is being trapped. Anything * that has to do with init and userspace access has to go via the * 64bit interface. */ /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ static u32 cache_levels; /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ #define CSSELR_MAX 12 /* Which cache CCSIDR represents depends on CSSELR value. */ static u32 get_ccsidr(u32 csselr) { u32 ccsidr; /* Make sure noone else changes CSSELR during this! */ local_irq_disable(); write_sysreg(csselr, csselr_el1); isb(); ccsidr = read_sysreg(ccsidr_el1); local_irq_enable(); return ccsidr; } /* * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). */ static bool access_dcsw(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!p->is_write) return read_from_write_only(vcpu, p); kvm_set_way_flush(vcpu); return true; } /* * Generic accessor for VM registers. Only called as long as HCR_TVM * is set. If the guest enables the MMU, we stop trapping the VM * sys_regs and leave it in complete control of the caches. */ static bool access_vm_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { bool was_enabled = vcpu_has_cache_enabled(vcpu); BUG_ON(!p->is_write); if (!p->is_aarch32) { vcpu_sys_reg(vcpu, r->reg) = p->regval; } else { if (!p->is_32bit) vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); } kvm_toggle_cache(vcpu, was_enabled); return true; } /* * Trap handler for the GICv3 SGI generation system register. * Forward the request to the VGIC emulation. * The cp15_64 code makes sure this automatically works * for both AArch64 and AArch32 accesses. */ static bool access_gic_sgi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!p->is_write) return read_from_write_only(vcpu, p); vgic_v3_dispatch_sgi(vcpu, p->regval); return true; } static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) return ignore_write(vcpu, p); p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; return true; } static bool trap_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) return ignore_write(vcpu, p); else return read_zero(vcpu, p); } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { p->regval = (1 << 3); return true; } } static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { p->regval = read_sysreg(dbgauthstatus_el1); return true; } } /* * We want to avoid world-switching all the DBG registers all the * time: * * - If we've touched any debug register, it is likely that we're * going to touch more of them. It then makes sense to disable the * traps and start doing the save/restore dance * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is * then mandatory to save/restore the registers, as the guest * depends on them. * * For this, we use a DIRTY bit, indicating the guest has modified the * debug registers, used as follow: * * On guest entry: * - If the dirty bit is set (because we're coming back from trapping), * disable the traps, save host registers, restore guest registers. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), * set the dirty bit, disable the traps, save host registers, * restore guest registers. * - Otherwise, enable the traps * * On guest exit: * - If the dirty bit is set, save guest registers, restore host * registers and clear the dirty bit. This ensure that the host can * now use the debug registers. */ static bool trap_debug_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { vcpu_sys_reg(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = vcpu_sys_reg(vcpu, r->reg); } trace_trap_reg(__func__, r->reg, p->is_write, p->regval); return true; } /* * reg_to_dbg/dbg_to_reg * * A 32 bit write to a debug register leave top bits alone * A 32 bit read from a debug register only returns the bottom bits * * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the * hyp.S code switches between host and guest values in future. */ static void reg_to_dbg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, u64 *dbg_reg) { u64 val = p->regval; if (p->is_32bit) { val &= 0xffffffffUL; val |= ((*dbg_reg >> 32) << 32); } *dbg_reg = val; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } static void dbg_to_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, u64 *dbg_reg) { p->regval = *dbg_reg; if (p->is_32bit) p->regval &= 0xffffffffUL; } static bool trap_bvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; } static bool trap_bcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; } static bool trap_wvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); return true; } static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; } static bool trap_wcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; if (p->is_write) reg_to_dbg(vcpu, p, dbg_reg); else dbg_to_reg(vcpu, p, dbg_reg); trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } static void reset_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; } static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1); } static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mpidr; /* * Map the vcpu_id into the first three affinity level fields of * the MPIDR. We limit the number of VCPUs in level 0 due to a * limitation to 16 CPUs in that level in the ICC_SGIxR registers * of the GICv3 to be able to address each CPU directly when * sending IPIs. */ mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; } static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 pmcr, val; pmcr = read_sysreg(pmcr_el0); /* * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN * except PMCR.E resetting to zero. */ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); vcpu_sys_reg(vcpu, PMCR_EL0) = val; } static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu)); } static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN)) || vcpu_mode_priv(vcpu)); } static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN)) || vcpu_mode_priv(vcpu)); } static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) { u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0); return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN)) || vcpu_mode_priv(vcpu)); } static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 val; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; if (p->is_write) { /* Only update writeable bits of PMCR */ val = vcpu_sys_reg(vcpu, PMCR_EL0); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ val = vcpu_sys_reg(vcpu, PMCR_EL0) & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); p->regval = val; } return true; } static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_event_counter_el0_disabled(vcpu)) return false; if (p->is_write) vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; else /* return PMSELR.SEL field */ p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; return true; } static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 pmceid; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); BUG_ON(p->is_write); if (pmu_access_el0_disabled(vcpu)) return false; if (!(p->Op2 & 1)) pmceid = read_sysreg(pmceid0_el0); else pmceid = read_sysreg(pmceid1_el0); p->regval = pmceid; return true; } static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) { u64 pmcr, val; pmcr = vcpu_sys_reg(vcpu, PMCR_EL0); val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) return false; return true; } static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 idx; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (r->CRn == 9 && r->CRm == 13) { if (r->Op2 == 2) { /* PMXEVCNTR_EL0 */ if (pmu_access_event_counter_el0_disabled(vcpu)) return false; idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; } else if (r->Op2 == 0) { /* PMCCNTR_EL0 */ if (pmu_access_cycle_counter_el0_disabled(vcpu)) return false; idx = ARMV8_PMU_CYCLE_IDX; } else { return false; } } else if (r->CRn == 0 && r->CRm == 9) { /* PMCCNTR */ if (pmu_access_event_counter_el0_disabled(vcpu)) return false; idx = ARMV8_PMU_CYCLE_IDX; } else if (r->CRn == 14 && (r->CRm & 12) == 8) { /* PMEVCNTRn_EL0 */ if (pmu_access_event_counter_el0_disabled(vcpu)) return false; idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); } else { return false; } if (!pmu_counter_idx_valid(vcpu, idx)) return false; if (p->is_write) { if (pmu_access_el0_disabled(vcpu)) return false; kvm_pmu_set_counter_value(vcpu, idx, p->regval); } else { p->regval = kvm_pmu_get_counter_value(vcpu, idx); } return true; } static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 idx, reg; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { /* PMXEVTYPER_EL0 */ idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; reg = PMEVTYPER0_EL0 + idx; } else if (r->CRn == 14 && (r->CRm & 12) == 12) { idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); if (idx == ARMV8_PMU_CYCLE_IDX) reg = PMCCFILTR_EL0; else /* PMEVTYPERn_EL0 */ reg = PMEVTYPER0_EL0 + idx; } else { BUG(); } if (!pmu_counter_idx_valid(vcpu, idx)) return false; if (p->is_write) { kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; } else { p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; } return true; } static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 val, mask; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; mask = kvm_pmu_valid_counter_mask(vcpu); if (p->is_write) { val = p->regval & mask; if (r->Op2 & 0x1) { /* accessing PMCNTENSET_EL0 */ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; kvm_pmu_enable_counter(vcpu, val); } else { /* accessing PMCNTENCLR_EL0 */ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; kvm_pmu_disable_counter(vcpu, val); } } else { p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; } return true; } static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 mask = kvm_pmu_valid_counter_mask(vcpu); if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (!vcpu_mode_priv(vcpu)) return false; if (p->is_write) { u64 val = p->regval & mask; if (r->Op2 & 0x1) /* accessing PMINTENSET_EL1 */ vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; else /* accessing PMINTENCLR_EL1 */ vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; } else { p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; } return true; } static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 mask = kvm_pmu_valid_counter_mask(vcpu); if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_access_el0_disabled(vcpu)) return false; if (p->is_write) { if (r->CRm & 0x2) /* accessing PMOVSSET_EL0 */ kvm_pmu_overflow_set(vcpu, p->regval & mask); else /* accessing PMOVSCLR_EL0 */ vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); } else { p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; } return true; } static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { u64 mask; if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (pmu_write_swinc_el0_disabled(vcpu)) return false; if (p->is_write) { mask = kvm_pmu_valid_counter_mask(vcpu); kvm_pmu_software_increment(vcpu, p->regval & mask); return true; } return false; } static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (!kvm_arm_pmu_v3_ready(vcpu)) return trap_raz_wi(vcpu, p, r); if (p->is_write) { if (!vcpu_mode_priv(vcpu)) return false; vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval & ARMV8_PMU_USERENR_MASK; } else { p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0) & ARMV8_PMU_USERENR_MASK; } return true; } /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ /* DBGBVRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ /* DBGBCRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ /* DBGWVRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ /* DBGWCRn_EL1 */ \ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } /* Macro to expand the PMEVCNTRn_EL0 register */ #define PMU_PMEVCNTR_EL0(n) \ /* PMEVCNTRn_EL0 */ \ { Op0(0b11), Op1(0b011), CRn(0b1110), \ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } /* Macro to expand the PMEVTYPERn_EL0 register */ #define PMU_PMEVTYPER_EL0(n) \ /* PMEVTYPERn_EL0 */ \ { Op0(0b11), Op1(0b011), CRn(0b1110), \ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 * * Debug handling: We do trap most, if not all debug related system * registers. The implementation is good enough to ensure that a guest * can use these with minimal performance degradation. The drawback is * that we don't implement any of the external debug, none of the * OSlock protocol. This should be revisited if we ever encounter a * more demanding guest... */ static const struct sys_reg_desc sys_reg_descs[] = { /* DC ISW */ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), access_dcsw }, /* DC CSW */ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), access_dcsw }, /* DC CISW */ { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), access_dcsw }, DBG_BCR_BVR_WCR_WVR_EL1(0), DBG_BCR_BVR_WCR_WVR_EL1(1), /* MDCCINT_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, /* MDSCR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, DBG_BCR_BVR_WCR_WVR_EL1(2), DBG_BCR_BVR_WCR_WVR_EL1(3), DBG_BCR_BVR_WCR_WVR_EL1(4), DBG_BCR_BVR_WCR_WVR_EL1(5), DBG_BCR_BVR_WCR_WVR_EL1(6), DBG_BCR_BVR_WCR_WVR_EL1(7), DBG_BCR_BVR_WCR_WVR_EL1(8), DBG_BCR_BVR_WCR_WVR_EL1(9), DBG_BCR_BVR_WCR_WVR_EL1(10), DBG_BCR_BVR_WCR_WVR_EL1(11), DBG_BCR_BVR_WCR_WVR_EL1(12), DBG_BCR_BVR_WCR_WVR_EL1(13), DBG_BCR_BVR_WCR_WVR_EL1(14), DBG_BCR_BVR_WCR_WVR_EL1(15), /* MDRAR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), trap_raz_wi }, /* OSLAR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), trap_raz_wi }, /* OSLSR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), trap_oslsr_el1 }, /* OSDLR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), trap_raz_wi }, /* DBGPRCR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), trap_raz_wi }, /* DBGCLAIMSET_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), trap_raz_wi }, /* DBGCLAIMCLR_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), trap_raz_wi }, /* DBGAUTHSTATUS_EL1 */ { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), trap_dbgauthstatus_el1 }, /* MDCCSR_EL1 */ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), trap_raz_wi }, /* DBGDTR_EL0 */ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), trap_raz_wi }, /* DBGDTR[TR]X_EL0 */ { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), trap_raz_wi }, /* DBGVCR32_EL2 */ { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), NULL, reset_val, DBGVCR32_EL2, 0 }, /* MPIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), NULL, reset_mpidr, MPIDR_EL1 }, /* SCTLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, /* CPACR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), NULL, reset_val, CPACR_EL1, 0 }, /* TTBR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), access_vm_reg, reset_unknown, TTBR0_EL1 }, /* TTBR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), access_vm_reg, reset_unknown, TTBR1_EL1 }, /* TCR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), access_vm_reg, reset_val, TCR_EL1, 0 }, /* AFSR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), access_vm_reg, reset_unknown, AFSR0_EL1 }, /* AFSR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), access_vm_reg, reset_unknown, AFSR1_EL1 }, /* ESR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), access_vm_reg, reset_unknown, ESR_EL1 }, /* FAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), access_vm_reg, reset_unknown, FAR_EL1 }, /* PAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), NULL, reset_unknown, PAR_EL1 }, /* PMINTENSET_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), access_pminten, reset_unknown, PMINTENSET_EL1 }, /* PMINTENCLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), access_pminten, NULL, PMINTENSET_EL1 }, /* MAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), access_vm_reg, reset_unknown, MAIR_EL1 }, /* AMAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, /* VBAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), NULL, reset_val, VBAR_EL1, 0 }, /* ICC_SGI1R_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101), access_gic_sgi }, /* ICC_SRE_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), access_gic_sre }, /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, /* TPIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), NULL, reset_unknown, TPIDR_EL1 }, /* CNTKCTL_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), NULL, reset_val, CNTKCTL_EL1, 0}, /* CSSELR_EL1 */ { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), NULL, reset_unknown, CSSELR_EL1 }, /* PMCR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), access_pmcr, reset_pmcr, }, /* PMCNTENSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, /* PMCNTENCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), access_pmcnten, NULL, PMCNTENSET_EL0 }, /* PMOVSCLR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), access_pmovs, NULL, PMOVSSET_EL0 }, /* PMSWINC_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), access_pmswinc, reset_unknown, PMSWINC_EL0 }, /* PMSELR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), access_pmselr, reset_unknown, PMSELR_EL0 }, /* PMCEID0_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), access_pmceid }, /* PMCEID1_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), access_pmceid }, /* PMCCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, /* PMXEVTYPER_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), access_pmu_evtyper }, /* PMXEVCNTR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), access_pmu_evcntr }, /* PMUSERENR_EL0 * This register resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, /* PMOVSSET_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), access_pmovs, reset_unknown, PMOVSSET_EL0 }, /* TPIDR_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), NULL, reset_unknown, TPIDR_EL0 }, /* TPIDRRO_EL0 */ { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), NULL, reset_unknown, TPIDRRO_EL0 }, /* PMEVCNTRn_EL0 */ PMU_PMEVCNTR_EL0(0), PMU_PMEVCNTR_EL0(1), PMU_PMEVCNTR_EL0(2), PMU_PMEVCNTR_EL0(3), PMU_PMEVCNTR_EL0(4), PMU_PMEVCNTR_EL0(5), PMU_PMEVCNTR_EL0(6), PMU_PMEVCNTR_EL0(7), PMU_PMEVCNTR_EL0(8), PMU_PMEVCNTR_EL0(9), PMU_PMEVCNTR_EL0(10), PMU_PMEVCNTR_EL0(11), PMU_PMEVCNTR_EL0(12), PMU_PMEVCNTR_EL0(13), PMU_PMEVCNTR_EL0(14), PMU_PMEVCNTR_EL0(15), PMU_PMEVCNTR_EL0(16), PMU_PMEVCNTR_EL0(17), PMU_PMEVCNTR_EL0(18), PMU_PMEVCNTR_EL0(19), PMU_PMEVCNTR_EL0(20), PMU_PMEVCNTR_EL0(21), PMU_PMEVCNTR_EL0(22), PMU_PMEVCNTR_EL0(23), PMU_PMEVCNTR_EL0(24), PMU_PMEVCNTR_EL0(25), PMU_PMEVCNTR_EL0(26), PMU_PMEVCNTR_EL0(27), PMU_PMEVCNTR_EL0(28), PMU_PMEVCNTR_EL0(29), PMU_PMEVCNTR_EL0(30), /* PMEVTYPERn_EL0 */ PMU_PMEVTYPER_EL0(0), PMU_PMEVTYPER_EL0(1), PMU_PMEVTYPER_EL0(2), PMU_PMEVTYPER_EL0(3), PMU_PMEVTYPER_EL0(4), PMU_PMEVTYPER_EL0(5), PMU_PMEVTYPER_EL0(6), PMU_PMEVTYPER_EL0(7), PMU_PMEVTYPER_EL0(8), PMU_PMEVTYPER_EL0(9), PMU_PMEVTYPER_EL0(10), PMU_PMEVTYPER_EL0(11), PMU_PMEVTYPER_EL0(12), PMU_PMEVTYPER_EL0(13), PMU_PMEVTYPER_EL0(14), PMU_PMEVTYPER_EL0(15), PMU_PMEVTYPER_EL0(16), PMU_PMEVTYPER_EL0(17), PMU_PMEVTYPER_EL0(18), PMU_PMEVTYPER_EL0(19), PMU_PMEVTYPER_EL0(20), PMU_PMEVTYPER_EL0(21), PMU_PMEVTYPER_EL0(22), PMU_PMEVTYPER_EL0(23), PMU_PMEVTYPER_EL0(24), PMU_PMEVTYPER_EL0(25), PMU_PMEVTYPER_EL0(26), PMU_PMEVTYPER_EL0(27), PMU_PMEVTYPER_EL0(28), PMU_PMEVTYPER_EL0(29), PMU_PMEVTYPER_EL0(30), /* PMCCFILTR_EL0 * This register resets as unknown in 64bit mode while it resets as zero * in 32bit mode. Here we choose to reset it as zero for consistency. */ { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, /* DACR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), NULL, reset_unknown, DACR32_EL2 }, /* IFSR32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), NULL, reset_unknown, IFSR32_EL2 }, /* FPEXC32_EL2 */ { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), NULL, reset_val, FPEXC32_EL2, 0x70 }, }; static bool trap_dbgidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { return ignore_write(vcpu, p); } else { u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1); u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | (6 << 16) | (el3 << 14) | (el3 << 12)); return true; } } static bool trap_debug32(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { if (p->is_write) { vcpu_cp14(vcpu, r->reg) = p->regval; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = vcpu_cp14(vcpu, r->reg); } return true; } /* AArch32 debug register mappings * * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] * * All control registers and watchpoint value registers are mapped to * the lower 32 bits of their AArch64 equivalents. We share the trap * handlers with the above AArch64 code which checks what mode the * system is in. */ static bool trap_xvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; if (p->is_write) { u64 val = *dbg_reg; val &= 0xffffffffUL; val |= p->regval << 32; *dbg_reg = val; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; } else { p->regval = *dbg_reg >> 32; } trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); return true; } #define DBG_BCR_BVR_WCR_WVR(n) \ /* DBGBVRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ /* DBGBCRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ /* DBGWVRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ /* DBGWCRn */ \ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } #define DBGBXVR(n) \ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } /* * Trapped cp14 registers. We generally ignore most of the external * debug, on the principle that they don't really make sense to a * guest. Revisit this one day, would this principle change. */ static const struct sys_reg_desc cp14_regs[] = { /* DBGIDR */ { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, /* DBGDTRRXext */ { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(0), /* DBGDSCRint */ { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(1), /* DBGDCCINT */ { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, /* DBGDSCRext */ { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, DBG_BCR_BVR_WCR_WVR(2), /* DBGDTR[RT]Xint */ { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, /* DBGDTR[RT]Xext */ { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(3), DBG_BCR_BVR_WCR_WVR(4), DBG_BCR_BVR_WCR_WVR(5), /* DBGWFAR */ { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, /* DBGOSECCR */ { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, DBG_BCR_BVR_WCR_WVR(6), /* DBGVCR */ { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, DBG_BCR_BVR_WCR_WVR(7), DBG_BCR_BVR_WCR_WVR(8), DBG_BCR_BVR_WCR_WVR(9), DBG_BCR_BVR_WCR_WVR(10), DBG_BCR_BVR_WCR_WVR(11), DBG_BCR_BVR_WCR_WVR(12), DBG_BCR_BVR_WCR_WVR(13), DBG_BCR_BVR_WCR_WVR(14), DBG_BCR_BVR_WCR_WVR(15), /* DBGDRAR (32bit) */ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, DBGBXVR(0), /* DBGOSLAR */ { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, DBGBXVR(1), /* DBGOSLSR */ { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, DBGBXVR(2), DBGBXVR(3), /* DBGOSDLR */ { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, DBGBXVR(4), /* DBGPRCR */ { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, DBGBXVR(5), DBGBXVR(6), DBGBXVR(7), DBGBXVR(8), DBGBXVR(9), DBGBXVR(10), DBGBXVR(11), DBGBXVR(12), DBGBXVR(13), DBGBXVR(14), DBGBXVR(15), /* DBGDSAR (32bit) */ { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, /* DBGDEVID2 */ { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, /* DBGDEVID1 */ { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, /* DBGDEVID */ { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, /* DBGCLAIMSET */ { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, /* DBGCLAIMCLR */ { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, /* DBGAUTHSTATUS */ { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, }; /* Trapped cp14 64bit registers */ static const struct sys_reg_desc cp14_64_regs[] = { /* DBGDRAR (64bit) */ { Op1( 0), CRm( 1), .access = trap_raz_wi }, /* DBGDSAR (64bit) */ { Op1( 0), CRm( 2), .access = trap_raz_wi }, }; /* Macro to expand the PMEVCNTRn register */ #define PMU_PMEVCNTR(n) \ /* PMEVCNTRn */ \ { Op1(0), CRn(0b1110), \ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evcntr } /* Macro to expand the PMEVTYPERn register */ #define PMU_PMEVTYPER(n) \ /* PMEVTYPERn */ \ { Op1(0), CRn(0b1110), \ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ access_pmu_evtyper } /* * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, * depending on the way they are accessed (as a 32bit or a 64bit * register). */ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, /* * DC{C,I,CI}SW operations: */ { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, /* PMU */ { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, /* ICC_SRE */ { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, /* PMEVCNTRn */ PMU_PMEVCNTR(0), PMU_PMEVCNTR(1), PMU_PMEVCNTR(2), PMU_PMEVCNTR(3), PMU_PMEVCNTR(4), PMU_PMEVCNTR(5), PMU_PMEVCNTR(6), PMU_PMEVCNTR(7), PMU_PMEVCNTR(8), PMU_PMEVCNTR(9), PMU_PMEVCNTR(10), PMU_PMEVCNTR(11), PMU_PMEVCNTR(12), PMU_PMEVCNTR(13), PMU_PMEVCNTR(14), PMU_PMEVCNTR(15), PMU_PMEVCNTR(16), PMU_PMEVCNTR(17), PMU_PMEVCNTR(18), PMU_PMEVCNTR(19), PMU_PMEVCNTR(20), PMU_PMEVCNTR(21), PMU_PMEVCNTR(22), PMU_PMEVCNTR(23), PMU_PMEVCNTR(24), PMU_PMEVCNTR(25), PMU_PMEVCNTR(26), PMU_PMEVCNTR(27), PMU_PMEVCNTR(28), PMU_PMEVCNTR(29), PMU_PMEVCNTR(30), /* PMEVTYPERn */ PMU_PMEVTYPER(0), PMU_PMEVTYPER(1), PMU_PMEVTYPER(2), PMU_PMEVTYPER(3), PMU_PMEVTYPER(4), PMU_PMEVTYPER(5), PMU_PMEVTYPER(6), PMU_PMEVTYPER(7), PMU_PMEVTYPER(8), PMU_PMEVTYPER(9), PMU_PMEVTYPER(10), PMU_PMEVTYPER(11), PMU_PMEVTYPER(12), PMU_PMEVTYPER(13), PMU_PMEVTYPER(14), PMU_PMEVTYPER(15), PMU_PMEVTYPER(16), PMU_PMEVTYPER(17), PMU_PMEVTYPER(18), PMU_PMEVTYPER(19), PMU_PMEVTYPER(20), PMU_PMEVTYPER(21), PMU_PMEVTYPER(22), PMU_PMEVTYPER(23), PMU_PMEVTYPER(24), PMU_PMEVTYPER(25), PMU_PMEVTYPER(26), PMU_PMEVTYPER(27), PMU_PMEVTYPER(28), PMU_PMEVTYPER(29), PMU_PMEVTYPER(30), /* PMCCFILTR */ { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, }; static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; /* Target specific emulation tables */ static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; void kvm_register_target_sys_reg_table(unsigned int target, struct kvm_sys_reg_target_table *table) { target_tables[target] = table; } /* Get specific register table for this target. */ static const struct sys_reg_desc *get_target_table(unsigned target, bool mode_is_64, size_t *num) { struct kvm_sys_reg_target_table *table; table = target_tables[target]; if (mode_is_64) { *num = table->table64.num; return table->table64.table; } else { *num = table->table32.num; return table->table32.table; } } #define reg_to_match_value(x) \ ({ \ unsigned long val; \ val = (x)->Op0 << 14; \ val |= (x)->Op1 << 11; \ val |= (x)->CRn << 7; \ val |= (x)->CRm << 3; \ val |= (x)->Op2; \ val; \ }) static int match_sys_reg(const void *key, const void *elt) { const unsigned long pval = (unsigned long)key; const struct sys_reg_desc *r = elt; return pval - reg_to_match_value(r); } static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], unsigned int num) { unsigned long pval = reg_to_match_value(params); return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); } int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) { kvm_inject_undefined(vcpu); return 1; } /* * emulate_cp -- tries to match a sys_reg access in a handling table, and * call the corresponding trap handler. * * @params: pointer to the descriptor of the access * @table: array of trap descriptors * @num: size of the trap descriptor array * * Return 0 if the access has been handled, and -1 if not. */ static int emulate_cp(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *table, size_t num) { const struct sys_reg_desc *r; if (!table) return -1; /* Not handled */ r = find_reg(params, table, num); if (r) { /* * Not having an accessor means that we have * configured a trap that we don't know how to * handle. This certainly qualifies as a gross bug * that should be fixed right away. */ BUG_ON(!r->access); if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); /* Handled */ return 0; } } /* Not handled */ return -1; } static void unhandled_cp_access(struct kvm_vcpu *vcpu, struct sys_reg_params *params) { u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); int cp = -1; switch(hsr_ec) { case ESR_ELx_EC_CP15_32: case ESR_ELx_EC_CP15_64: cp = 15; break; case ESR_ELx_EC_CP14_MR: case ESR_ELx_EC_CP14_64: cp = 14; break; default: WARN_ON(1); } kvm_err("Unsupported guest CP%d access at: %08lx\n", cp, *vcpu_pc(vcpu)); print_sys_reg_instr(params); kvm_inject_undefined(vcpu); } /** * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, const struct sys_reg_desc *global, size_t nr_global, const struct sys_reg_desc *target_specific, size_t nr_specific) { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); int Rt = (hsr >> 5) & 0xf; int Rt2 = (hsr >> 10) & 0xf; params.is_aarch32 = true; params.is_32bit = false; params.CRm = (hsr >> 1) & 0xf; params.is_write = ((hsr & 1) == 0); params.Op0 = 0; params.Op1 = (hsr >> 16) & 0xf; params.Op2 = 0; params.CRn = 0; /* * Make a 64-bit value out of Rt and Rt2. As we use the same trap * backends between AArch32 and AArch64, we get away with it. */ if (params.is_write) { params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; } if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) goto out; if (!emulate_cp(vcpu, &params, global, nr_global)) goto out; unhandled_cp_access(vcpu, &params); out: /* Split up the value between registers for the read side */ if (!params.is_write) { vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); } return 1; } /** * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, const struct sys_reg_desc *global, size_t nr_global, const struct sys_reg_desc *target_specific, size_t nr_specific) { struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); int Rt = (hsr >> 5) & 0xf; params.is_aarch32 = true; params.is_32bit = true; params.CRm = (hsr >> 1) & 0xf; params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = ((hsr & 1) == 0); params.CRn = (hsr >> 10) & 0xf; params.Op0 = 0; params.Op1 = (hsr >> 14) & 0x7; params.Op2 = (hsr >> 17) & 0x7; if (!emulate_cp(vcpu, &params, target_specific, nr_specific) || !emulate_cp(vcpu, &params, global, nr_global)) { if (!params.is_write) vcpu_set_reg(vcpu, Rt, params.regval); return 1; } unhandled_cp_access(vcpu, &params); return 1; } int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { const struct sys_reg_desc *target_specific; size_t num; target_specific = get_target_table(vcpu->arch.target, false, &num); return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs), target_specific, num); } int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) { const struct sys_reg_desc *target_specific; size_t num; target_specific = get_target_table(vcpu->arch.target, false, &num); return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), target_specific, num); } int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs), NULL, 0); } int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) { return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs), NULL, 0); } static int emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params) { size_t num; const struct sys_reg_desc *table, *r; table = get_target_table(vcpu->arch.target, true, &num); /* Search target-specific then generic table. */ r = find_reg(params, table, num); if (!r) r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); if (likely(r)) { /* * Not having an accessor means that we have * configured a trap that we don't know how to * handle. This certainly qualifies as a gross bug * that should be fixed right away. */ BUG_ON(!r->access); if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } /* If access function fails, it should complain. */ } else { kvm_err("Unsupported guest sys_reg access at: %lx\n", *vcpu_pc(vcpu)); print_sys_reg_instr(params); } kvm_inject_undefined(vcpu); return 1; } static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, const struct sys_reg_desc *table, size_t num) { unsigned long i; for (i = 0; i < num; i++) if (table[i].reset) table[i].reset(vcpu, &table[i]); } /** * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_hsr(vcpu); int Rt = (esr >> 5) & 0x1f; int ret; trace_kvm_handle_sys_reg(esr); params.is_aarch32 = false; params.is_32bit = false; params.Op0 = (esr >> 20) & 3; params.Op1 = (esr >> 14) & 0x7; params.CRn = (esr >> 10) & 0xf; params.CRm = (esr >> 1) & 0xf; params.Op2 = (esr >> 17) & 0x7; params.regval = vcpu_get_reg(vcpu, Rt); params.is_write = !(esr & 1); ret = emulate_sys_reg(vcpu, &params); if (!params.is_write) vcpu_set_reg(vcpu, Rt, params.regval); return ret; } /****************************************************************************** * Userspace API *****************************************************************************/ static bool index_to_params(u64 id, struct sys_reg_params *params) { switch (id & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U64: /* Any unused index bits means it's not valid. */ if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK | KVM_REG_ARM64_SYSREG_OP0_MASK | KVM_REG_ARM64_SYSREG_OP1_MASK | KVM_REG_ARM64_SYSREG_CRN_MASK | KVM_REG_ARM64_SYSREG_CRM_MASK | KVM_REG_ARM64_SYSREG_OP2_MASK)) return false; params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); return true; default: return false; } } /* Decode an index value, and find the sys_reg_desc entry. */ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id) { size_t num; const struct sys_reg_desc *table, *r; struct sys_reg_params params; /* We only do sys_reg for now. */ if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) return NULL; if (!index_to_params(id, &params)) return NULL; table = get_target_table(vcpu->arch.target, true, &num); r = find_reg(&params, table, num); if (!r) r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); /* Not saved in the sys_reg array? */ if (r && !r->reg) r = NULL; return r; } /* * These are the invariant sys_reg registers: we let the guest see the * host versions of these, so they're part of the guest state. * * A future CPU may provide a mechanism to present different values to * the guest, or a future kvm may trap them. */ #define FUNCTION_INVARIANT(reg) \ static void get_##reg(struct kvm_vcpu *v, \ const struct sys_reg_desc *r) \ { \ ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ } FUNCTION_INVARIANT(midr_el1) FUNCTION_INVARIANT(ctr_el0) FUNCTION_INVARIANT(revidr_el1) FUNCTION_INVARIANT(id_pfr0_el1) FUNCTION_INVARIANT(id_pfr1_el1) FUNCTION_INVARIANT(id_dfr0_el1) FUNCTION_INVARIANT(id_afr0_el1) FUNCTION_INVARIANT(id_mmfr0_el1) FUNCTION_INVARIANT(id_mmfr1_el1) FUNCTION_INVARIANT(id_mmfr2_el1) FUNCTION_INVARIANT(id_mmfr3_el1) FUNCTION_INVARIANT(id_isar0_el1) FUNCTION_INVARIANT(id_isar1_el1) FUNCTION_INVARIANT(id_isar2_el1) FUNCTION_INVARIANT(id_isar3_el1) FUNCTION_INVARIANT(id_isar4_el1) FUNCTION_INVARIANT(id_isar5_el1) FUNCTION_INVARIANT(clidr_el1) FUNCTION_INVARIANT(aidr_el1) /* ->val is filled in by kvm_sys_reg_table_init() */ static struct sys_reg_desc invariant_sys_regs[] = { { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), NULL, get_midr_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), NULL, get_revidr_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), NULL, get_id_pfr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), NULL, get_id_pfr1_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), NULL, get_id_dfr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), NULL, get_id_afr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), NULL, get_id_mmfr0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), NULL, get_id_mmfr1_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), NULL, get_id_mmfr2_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), NULL, get_id_mmfr3_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), NULL, get_id_isar0_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), NULL, get_id_isar1_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), NULL, get_id_isar2_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), NULL, get_id_isar3_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), NULL, get_id_isar4_el1 }, { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), NULL, get_id_isar5_el1 }, { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), NULL, get_clidr_el1 }, { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), NULL, get_aidr_el1 }, { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), NULL, get_ctr_el0 }, }; static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) { if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) return -EFAULT; return 0; } static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) { if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) return -EFAULT; return 0; } static int get_invariant_sys_reg(u64 id, void __user *uaddr) { struct sys_reg_params params; const struct sys_reg_desc *r; if (!index_to_params(id, &params)) return -ENOENT; r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); if (!r) return -ENOENT; return reg_to_user(uaddr, &r->val, id); } static int set_invariant_sys_reg(u64 id, void __user *uaddr) { struct sys_reg_params params; const struct sys_reg_desc *r; int err; u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ if (!index_to_params(id, &params)) return -ENOENT; r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); if (!r) return -ENOENT; err = reg_from_user(&val, uaddr, id); if (err) return err; /* This is what we mean by invariant: you can't change it. */ if (r->val != val) return -EINVAL; return 0; } static bool is_valid_cache(u32 val) { u32 level, ctype; if (val >= CSSELR_MAX) return false; /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ level = (val >> 1); ctype = (cache_levels >> (level * 3)) & 7; switch (ctype) { case 0: /* No cache */ return false; case 1: /* Instruction cache only */ return (val & 1); case 2: /* Data cache only */ case 4: /* Unified cache */ return !(val & 1); case 3: /* Separate instruction and data caches */ return true; default: /* Reserved: we can't know instruction or data. */ return false; } } static int demux_c15_get(u64 id, void __user *uaddr) { u32 val; u32 __user *uval = uaddr; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { case KVM_REG_ARM_DEMUX_ID_CCSIDR: if (KVM_REG_SIZE(id) != 4) return -ENOENT; val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) >> KVM_REG_ARM_DEMUX_VAL_SHIFT; if (!is_valid_cache(val)) return -ENOENT; return put_user(get_ccsidr(val), uval); default: return -ENOENT; } } static int demux_c15_set(u64 id, void __user *uaddr) { u32 val, newval; u32 __user *uval = uaddr; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { case KVM_REG_ARM_DEMUX_ID_CCSIDR: if (KVM_REG_SIZE(id) != 4) return -ENOENT; val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) >> KVM_REG_ARM_DEMUX_VAL_SHIFT; if (!is_valid_cache(val)) return -ENOENT; if (get_user(newval, uval)) return -EFAULT; /* This is also invariant: you can't change it. */ if (newval != get_ccsidr(val)) return -EINVAL; return 0; default: return -ENOENT; } } int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct sys_reg_desc *r; void __user *uaddr = (void __user *)(unsigned long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_get(reg->id, uaddr); if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) return -ENOENT; r = index_to_sys_reg_desc(vcpu, reg->id); if (!r) return get_invariant_sys_reg(reg->id, uaddr); if (r->get_user) return (r->get_user)(vcpu, r, reg, uaddr); return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); } int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct sys_reg_desc *r; void __user *uaddr = (void __user *)(unsigned long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_set(reg->id, uaddr); if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) return -ENOENT; r = index_to_sys_reg_desc(vcpu, reg->id); if (!r) return set_invariant_sys_reg(reg->id, uaddr); if (r->set_user) return (r->set_user)(vcpu, r, reg, uaddr); return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); } static unsigned int num_demux_regs(void) { unsigned int i, count = 0; for (i = 0; i < CSSELR_MAX; i++) if (is_valid_cache(i)) count++; return count; } static int write_demux_regids(u64 __user *uindices) { u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; unsigned int i; val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; for (i = 0; i < CSSELR_MAX; i++) { if (!is_valid_cache(i)) continue; if (put_user(val | i, uindices)) return -EFAULT; uindices++; } return 0; } static u64 sys_reg_to_index(const struct sys_reg_desc *reg) { return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM64_SYSREG | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); } static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) { if (!*uind) return true; if (put_user(sys_reg_to_index(reg), *uind)) return false; (*uind)++; return true; } /* Assumed ordered tables, see kvm_sys_reg_table_init. */ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) { const struct sys_reg_desc *i1, *i2, *end1, *end2; unsigned int total = 0; size_t num; /* We check for duplicates here, to allow arch-specific overrides. */ i1 = get_target_table(vcpu->arch.target, true, &num); end1 = i1 + num; i2 = sys_reg_descs; end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); BUG_ON(i1 == end1 || i2 == end2); /* Walk carefully, as both tables may refer to the same register. */ while (i1 || i2) { int cmp = cmp_sys_reg(i1, i2); /* target-specific overrides generic entry. */ if (cmp <= 0) { /* Ignore registers we trap but don't save. */ if (i1->reg) { if (!copy_reg_to_user(i1, &uind)) return -EFAULT; total++; } } else { /* Ignore registers we trap but don't save. */ if (i2->reg) { if (!copy_reg_to_user(i2, &uind)) return -EFAULT; total++; } } if (cmp <= 0 && ++i1 == end1) i1 = NULL; if (cmp >= 0 && ++i2 == end2) i2 = NULL; } return total; } unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) { return ARRAY_SIZE(invariant_sys_regs) + num_demux_regs() + walk_sys_regs(vcpu, (u64 __user *)NULL); } int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { unsigned int i; int err; /* Then give them all the invariant registers' indices. */ for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) return -EFAULT; uindices++; } err = walk_sys_regs(vcpu, uindices); if (err < 0) return err; uindices += err; return write_demux_regids(uindices); } static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) { unsigned int i; for (i = 1; i < n; i++) { if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); return 1; } } return 0; } void kvm_sys_reg_table_init(void) { unsigned int i; struct sys_reg_desc clidr; /* Make sure tables are unique and in order. */ BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); /* We abuse the reset function to overwrite the table itself. */ for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); /* * CLIDR format is awkward, so clean it up. See ARM B4.1.20: * * If software reads the Cache Type fields from Ctype1 * upwards, once it has seen a value of 0b000, no caches * exist at further-out levels of the hierarchy. So, for * example, if Ctype3 is the first Cache Type field with a * value of 0b000, the values of Ctype4 to Ctype7 must be * ignored. */ get_clidr_el1(NULL, &clidr); /* Ugly... */ cache_levels = clidr.val; for (i = 0; i < 7; i++) if (((cache_levels >> (i*3)) & 7) == 0) break; /* Clear all higher bits. */ cache_levels &= (1 << (i*3))-1; } /** * kvm_reset_sys_regs - sets system registers to reset value * @vcpu: The VCPU pointer * * This function finds the right table above and sets the registers on the * virtual CPU struct to their architecturally defined reset values. */ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) { size_t num; const struct sys_reg_desc *table; /* Catch someone adding a register without putting in reset entry. */ memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); /* Generic chip reset first (so target could override). */ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); table = get_target_table(vcpu->arch.target, true, &num); reset_sys_reg_descs(vcpu, table, num); for (num = 1; num < NR_SYS_REGS; num++) if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) panic("Didn't reset vcpu_sys_reg(%zi)", num); }
./CrossVul/dataset_final_sorted/CWE-617/c/good_2602_0
crossvul-cpp_data_good_1771_3
/* -*- mode: c; c-file-style: "openbsd" -*- */ /* * Copyright (c) 2008 Vincent Bernat <bernat@luffy.cx> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "lldpd.h" #include "frame.h" #include <unistd.h> #include <errno.h> #include <time.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/ioctl.h> inline static int lldpd_af_to_lldp_proto(int af) { switch (af) { case LLDPD_AF_IPV4: return LLDP_MGMT_ADDR_IP4; case LLDPD_AF_IPV6: return LLDP_MGMT_ADDR_IP6; default: return LLDP_MGMT_ADDR_NONE; } } inline static int lldpd_af_from_lldp_proto(int proto) { switch (proto) { case LLDP_MGMT_ADDR_IP4: return LLDPD_AF_IPV4; case LLDP_MGMT_ADDR_IP6: return LLDPD_AF_IPV6; default: return LLDPD_AF_UNSPEC; } } static int _lldp_send(struct lldpd *global, struct lldpd_hardware *hardware, u_int8_t c_id_subtype, char *c_id, int c_id_len, u_int8_t p_id_subtype, char *p_id, int p_id_len, int shutdown) { struct lldpd_port *port; struct lldpd_chassis *chassis; struct lldpd_frame *frame; int length; u_int8_t *packet, *pos, *tlv; struct lldpd_mgmt *mgmt; int proto; u_int8_t mcastaddr[] = LLDP_MULTICAST_ADDR; #ifdef ENABLE_DOT1 const u_int8_t dot1[] = LLDP_TLV_ORG_DOT1; struct lldpd_vlan *vlan; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi; #endif #ifdef ENABLE_DOT3 const u_int8_t dot3[] = LLDP_TLV_ORG_DOT3; #endif #ifdef ENABLE_LLDPMED int i; const u_int8_t med[] = LLDP_TLV_ORG_MED; #endif #ifdef ENABLE_CUSTOM struct lldpd_custom *custom; #endif port = &hardware->h_lport; chassis = port->p_chassis; length = hardware->h_mtu; if ((packet = (u_int8_t*)calloc(1, length)) == NULL) return ENOMEM; pos = packet; /* Ethernet header */ if (!( /* LLDP multicast address */ POKE_BYTES(mcastaddr, sizeof(mcastaddr)) && /* Source MAC address */ POKE_BYTES(&hardware->h_lladdr, ETHER_ADDR_LEN) && /* LLDP frame */ POKE_UINT16(ETHERTYPE_LLDP))) goto toobig; /* Chassis ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_CHASSIS_ID) && POKE_UINT8(c_id_subtype) && POKE_BYTES(c_id, c_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Port ID */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_ID) && POKE_UINT8(p_id_subtype) && POKE_BYTES(p_id, p_id_len) && POKE_END_LLDP_TLV)) goto toobig; /* Time to live */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_TTL) && POKE_UINT16(shutdown?0:chassis->c_ttl) && POKE_END_LLDP_TLV)) goto toobig; if (shutdown) goto end; /* System name */ if (chassis->c_name && *chassis->c_name != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_NAME) && POKE_BYTES(chassis->c_name, strlen(chassis->c_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* System description (skip it if empty) */ if (chassis->c_descr && *chassis->c_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_DESCR) && POKE_BYTES(chassis->c_descr, strlen(chassis->c_descr)) && POKE_END_LLDP_TLV)) goto toobig; } /* System capabilities */ if (global->g_config.c_cap_advertise && chassis->c_cap_available) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_SYSTEM_CAP) && POKE_UINT16(chassis->c_cap_available) && POKE_UINT16(chassis->c_cap_enabled) && POKE_END_LLDP_TLV)) goto toobig; } /* Management addresses */ TAILQ_FOREACH(mgmt, &chassis->c_mgmt, m_entries) { proto = lldpd_af_to_lldp_proto(mgmt->m_family); if (proto == LLDP_MGMT_ADDR_NONE) continue; if (!( POKE_START_LLDP_TLV(LLDP_TLV_MGMT_ADDR) && /* Size of the address, including its type */ POKE_UINT8(mgmt->m_addrsize + 1) && POKE_UINT8(proto) && POKE_BYTES(&mgmt->m_addr, mgmt->m_addrsize))) goto toobig; /* Interface port type, OID */ if (mgmt->m_iface == 0) { if (!( /* We don't know the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_UNKNOWN) && POKE_UINT32(0))) goto toobig; } else { if (!( /* We have the index of the management interface */ POKE_UINT8(LLDP_MGMT_IFACE_IFINDEX) && POKE_UINT32(mgmt->m_iface))) goto toobig; } if (!( /* We don't provide an OID for management */ POKE_UINT8(0) && POKE_END_LLDP_TLV)) goto toobig; } /* Port description */ if (port->p_descr && *port->p_descr != '\0') { if (!( POKE_START_LLDP_TLV(LLDP_TLV_PORT_DESCR) && POKE_BYTES(port->p_descr, strlen(port->p_descr)) && POKE_END_LLDP_TLV)) goto toobig; } #ifdef ENABLE_DOT1 /* Port VLAN ID */ if(port->p_pvid != 0) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PVID) && POKE_UINT16(port->p_pvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* Port and Protocol VLAN IDs */ TAILQ_FOREACH(ppvid, &port->p_ppvids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PPVID) && POKE_UINT8(ppvid->p_cap_status) && POKE_UINT16(ppvid->p_ppvid) && POKE_END_LLDP_TLV)) { goto toobig; } } /* VLANs */ TAILQ_FOREACH(vlan, &port->p_vlans, v_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_VLANNAME) && POKE_UINT16(vlan->v_vid) && POKE_UINT8(strlen(vlan->v_name)) && POKE_BYTES(vlan->v_name, strlen(vlan->v_name)) && POKE_END_LLDP_TLV)) goto toobig; } /* Protocol Identities */ TAILQ_FOREACH(pi, &port->p_pids, p_entries) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot1, sizeof(dot1)) && POKE_UINT8(LLDP_TLV_DOT1_PI) && POKE_UINT8(pi->p_pi_len) && POKE_BYTES(pi->p_pi, pi->p_pi_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_DOT3 /* Aggregation status */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_LA) && /* Bit 0 = capability ; Bit 1 = status */ POKE_UINT8((port->p_aggregid) ? 3:1) && POKE_UINT32(port->p_aggregid) && POKE_END_LLDP_TLV)) goto toobig; /* MAC/PHY */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MAC) && POKE_UINT8(port->p_macphy.autoneg_support | (port->p_macphy.autoneg_enabled << 1)) && POKE_UINT16(port->p_macphy.autoneg_advertised) && POKE_UINT16(port->p_macphy.mau_type) && POKE_END_LLDP_TLV)) goto toobig; /* MFS */ if (port->p_mfs) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_MFS) && POKE_UINT16(port->p_mfs) && POKE_END_LLDP_TLV)) goto toobig; } /* Power */ if (port->p_power.devicetype) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(dot3, sizeof(dot3)) && POKE_UINT8(LLDP_TLV_DOT3_POWER) && POKE_UINT8(( (((2 - port->p_power.devicetype) %(1<< 1))<<0) | (( port->p_power.supported %(1<< 1))<<1) | (( port->p_power.enabled %(1<< 1))<<2) | (( port->p_power.paircontrol %(1<< 1))<<3))) && POKE_UINT8(port->p_power.pairs) && POKE_UINT8(port->p_power.class))) goto toobig; /* 802.3at */ if (port->p_power.powertype != LLDP_DOT3_POWER_8023AT_OFF) { if (!( POKE_UINT8(( (((port->p_power.powertype == LLDP_DOT3_POWER_8023AT_TYPE1)?1:0) << 7) | (((port->p_power.devicetype == LLDP_DOT3_POWER_PSE)?0:1) << 6) | ((port->p_power.source %(1<< 2))<<4) | ((port->p_power.priority %(1<< 2))<<0))) && POKE_UINT16(port->p_power.requested) && POKE_UINT16(port->p_power.allocated))) goto toobig; } if (!(POKE_END_LLDP_TLV)) goto toobig; } #endif #ifdef ENABLE_LLDPMED if (port->p_med_cap_enabled) { /* LLDP-MED cap */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_CAP) && POKE_UINT16(chassis->c_med_cap_available) && POKE_UINT8(chassis->c_med_type) && POKE_END_LLDP_TLV)) goto toobig; /* LLDP-MED inventory */ #define LLDP_INVENTORY(value, subtype) \ if (value) { \ if (!( \ POKE_START_LLDP_TLV(LLDP_TLV_ORG) && \ POKE_BYTES(med, sizeof(med)) && \ POKE_UINT8(subtype) && \ POKE_BYTES(value, \ (strlen(value)>32)?32:strlen(value)) && \ POKE_END_LLDP_TLV)) \ goto toobig; \ } if (port->p_med_cap_enabled & LLDP_MED_CAP_IV) { LLDP_INVENTORY(chassis->c_med_hw, LLDP_TLV_MED_IV_HW); LLDP_INVENTORY(chassis->c_med_fw, LLDP_TLV_MED_IV_FW); LLDP_INVENTORY(chassis->c_med_sw, LLDP_TLV_MED_IV_SW); LLDP_INVENTORY(chassis->c_med_sn, LLDP_TLV_MED_IV_SN); LLDP_INVENTORY(chassis->c_med_manuf, LLDP_TLV_MED_IV_MANUF); LLDP_INVENTORY(chassis->c_med_model, LLDP_TLV_MED_IV_MODEL); LLDP_INVENTORY(chassis->c_med_asset, LLDP_TLV_MED_IV_ASSET); } /* LLDP-MED location */ for (i = 0; i < LLDP_MED_LOCFORMAT_LAST; i++) { if (port->p_med_location[i].format == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_LOCATION) && POKE_UINT8(port->p_med_location[i].format) && POKE_BYTES(port->p_med_location[i].data, port->p_med_location[i].data_len) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED network policy */ for (i = 0; i < LLDP_MED_APPTYPE_LAST; i++) { if (port->p_med_policy[i].type == i + 1) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_POLICY) && POKE_UINT32(( ((port->p_med_policy[i].type %(1<< 8))<<24) | ((port->p_med_policy[i].unknown %(1<< 1))<<23) | ((port->p_med_policy[i].tagged %(1<< 1))<<22) | /*((0 %(1<< 1))<<21) |*/ ((port->p_med_policy[i].vid %(1<<12))<< 9) | ((port->p_med_policy[i].priority %(1<< 3))<< 6) | ((port->p_med_policy[i].dscp %(1<< 6))<< 0) )) && POKE_END_LLDP_TLV)) goto toobig; } } /* LLDP-MED POE-MDI */ if ((port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PSE) || (port->p_med_power.devicetype == LLDP_MED_POW_TYPE_PD)) { int devicetype = 0, source = 0; if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(med, sizeof(med)) && POKE_UINT8(LLDP_TLV_MED_MDI))) goto toobig; switch (port->p_med_power.devicetype) { case LLDP_MED_POW_TYPE_PSE: devicetype = 0; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PRIMARY: source = 1; break; case LLDP_MED_POW_SOURCE_BACKUP: source = 2; break; case LLDP_MED_POW_SOURCE_RESERVED: source = 3; break; default: source = 0; break; } break; case LLDP_MED_POW_TYPE_PD: devicetype = 1; switch (port->p_med_power.source) { case LLDP_MED_POW_SOURCE_PSE: source = 1; break; case LLDP_MED_POW_SOURCE_LOCAL: source = 2; break; case LLDP_MED_POW_SOURCE_BOTH: source = 3; break; default: source = 0; break; } break; } if (!( POKE_UINT8(( ((devicetype %(1<< 2))<<6) | ((source %(1<< 2))<<4) | ((port->p_med_power.priority %(1<< 4))<<0) )) && POKE_UINT16(port->p_med_power.val) && POKE_END_LLDP_TLV)) goto toobig; } } #endif #ifdef ENABLE_CUSTOM TAILQ_FOREACH(custom, &port->p_custom_list, next) { if (!( POKE_START_LLDP_TLV(LLDP_TLV_ORG) && POKE_BYTES(custom->oui, sizeof(custom->oui)) && POKE_UINT8(custom->subtype) && POKE_BYTES(custom->oui_info, custom->oui_info_len) && POKE_END_LLDP_TLV)) goto toobig; } #endif end: /* END */ if (!( POKE_START_LLDP_TLV(LLDP_TLV_END) && POKE_END_LLDP_TLV)) goto toobig; if (interfaces_send_helper(global, hardware, (char *)packet, pos - packet) == -1) { log_warn("lldp", "unable to send packet on real device for %s", hardware->h_ifname); free(packet); return ENETDOWN; } hardware->h_tx_cnt++; /* We assume that LLDP frame is the reference */ if (!shutdown && (frame = (struct lldpd_frame*)malloc( sizeof(int) + pos - packet)) != NULL) { frame->size = pos - packet; memcpy(&frame->frame, packet, frame->size); if ((hardware->h_lport.p_lastframe == NULL) || (hardware->h_lport.p_lastframe->size != frame->size) || (memcmp(hardware->h_lport.p_lastframe->frame, frame->frame, frame->size) != 0)) { free(hardware->h_lport.p_lastframe); hardware->h_lport.p_lastframe = frame; hardware->h_lport.p_lastchange = time(NULL); } else free(frame); } free(packet); return 0; toobig: free(packet); return E2BIG; } /* Send a shutdown LLDPDU. */ int lldp_send_shutdown(struct lldpd *global, struct lldpd_hardware *hardware) { if (hardware->h_lchassis_previous_id == NULL || hardware->h_lport_previous_id == NULL) return 0; return _lldp_send(global, hardware, hardware->h_lchassis_previous_id_subtype, hardware->h_lchassis_previous_id, hardware->h_lchassis_previous_id_len, hardware->h_lport_previous_id_subtype, hardware->h_lport_previous_id, hardware->h_lport_previous_id_len, 1); } int lldp_send(struct lldpd *global, struct lldpd_hardware *hardware) { struct lldpd_port *port = &hardware->h_lport; struct lldpd_chassis *chassis = port->p_chassis; int ret; /* Check if we have a change. */ if (hardware->h_lchassis_previous_id != NULL && hardware->h_lport_previous_id != NULL && (hardware->h_lchassis_previous_id_subtype != chassis->c_id_subtype || hardware->h_lchassis_previous_id_len != chassis->c_id_len || hardware->h_lport_previous_id_subtype != port->p_id_subtype || hardware->h_lport_previous_id_len != port->p_id_len || memcmp(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len) || memcmp(hardware->h_lport_previous_id, port->p_id, port->p_id_len))) { log_info("lldp", "MSAP has changed for port %s, sending a shutdown LLDPDU", hardware->h_ifname); if ((ret = lldp_send_shutdown(global, hardware)) != 0) return ret; } log_debug("lldp", "send LLDP PDU to %s", hardware->h_ifname); if ((ret = _lldp_send(global, hardware, chassis->c_id_subtype, chassis->c_id, chassis->c_id_len, port->p_id_subtype, port->p_id, port->p_id_len, 0)) != 0) return ret; /* Record current chassis and port ID */ free(hardware->h_lchassis_previous_id); hardware->h_lchassis_previous_id_subtype = chassis->c_id_subtype; hardware->h_lchassis_previous_id_len = chassis->c_id_len; if ((hardware->h_lchassis_previous_id = malloc(chassis->c_id_len)) != NULL) memcpy(hardware->h_lchassis_previous_id, chassis->c_id, chassis->c_id_len); free(hardware->h_lport_previous_id); hardware->h_lport_previous_id_subtype = port->p_id_subtype; hardware->h_lport_previous_id_len = port->p_id_len; if ((hardware->h_lport_previous_id = malloc(port->p_id_len)) != NULL) memcpy(hardware->h_lport_previous_id, port->p_id, port->p_id_len); return 0; } #define CHECK_TLV_SIZE(x, name) \ do { if (tlv_size < (x)) { \ log_warnx("lldp", name " TLV too short received on %s", \ hardware->h_ifname); \ goto malformed; \ } } while (0) int lldp_decode(struct lldpd *cfg, char *frame, int s, struct lldpd_hardware *hardware, struct lldpd_chassis **newchassis, struct lldpd_port **newport) { struct lldpd_chassis *chassis; struct lldpd_port *port; const char lldpaddr[] = LLDP_MULTICAST_ADDR; const char dot1[] = LLDP_TLV_ORG_DOT1; const char dot3[] = LLDP_TLV_ORG_DOT3; const char med[] = LLDP_TLV_ORG_MED; const char dcbx[] = LLDP_TLV_ORG_DCBX; unsigned char orgid[3]; int length, gotend = 0, ttl_received = 0; int tlv_size, tlv_type, tlv_subtype; u_int8_t *pos, *tlv; char *b; #ifdef ENABLE_DOT1 struct lldpd_vlan *vlan = NULL; int vlan_len; struct lldpd_ppvid *ppvid; struct lldpd_pi *pi = NULL; #endif struct lldpd_mgmt *mgmt; int af; u_int8_t addr_str_length, addr_str_buffer[32]; u_int8_t addr_family, addr_length, *addr_ptr, iface_subtype; u_int32_t iface_number, iface; #ifdef ENABLE_CUSTOM struct lldpd_custom *custom = NULL; #endif log_debug("lldp", "receive LLDP PDU on %s", hardware->h_ifname); if ((chassis = calloc(1, sizeof(struct lldpd_chassis))) == NULL) { log_warn("lldp", "failed to allocate remote chassis"); return -1; } TAILQ_INIT(&chassis->c_mgmt); if ((port = calloc(1, sizeof(struct lldpd_port))) == NULL) { log_warn("lldp", "failed to allocate remote port"); free(chassis); return -1; } #ifdef ENABLE_DOT1 TAILQ_INIT(&port->p_vlans); TAILQ_INIT(&port->p_ppvids); TAILQ_INIT(&port->p_pids); #endif #ifdef ENABLE_CUSTOM TAILQ_INIT(&port->p_custom_list); #endif length = s; pos = (u_int8_t*)frame; if (length < 2*ETHER_ADDR_LEN + sizeof(u_int16_t)) { log_warnx("lldp", "too short frame received on %s", hardware->h_ifname); goto malformed; } if (PEEK_CMP(lldpaddr, ETHER_ADDR_LEN) != 0) { log_info("lldp", "frame not targeted at LLDP multicast address received on %s", hardware->h_ifname); goto malformed; } PEEK_DISCARD(ETHER_ADDR_LEN); /* Skip source address */ if (PEEK_UINT16 != ETHERTYPE_LLDP) { log_info("lldp", "non LLDP frame received on %s", hardware->h_ifname); goto malformed; } while (length && (!gotend)) { if (length < 2) { log_warnx("lldp", "tlv header too short received on %s", hardware->h_ifname); goto malformed; } tlv_size = PEEK_UINT16; tlv_type = tlv_size >> 9; tlv_size = tlv_size & 0x1ff; (void)PEEK_SAVE(tlv); if (length < tlv_size) { log_warnx("lldp", "frame too short for tlv received on %s", hardware->h_ifname); goto malformed; } switch (tlv_type) { case LLDP_TLV_END: if (tlv_size != 0) { log_warnx("lldp", "lldp end received with size not null on %s", hardware->h_ifname); goto malformed; } if (length) log_debug("lldp", "extra data after lldp end on %s", hardware->h_ifname); gotend = 1; break; case LLDP_TLV_CHASSIS_ID: case LLDP_TLV_PORT_ID: CHECK_TLV_SIZE(2, "Port Id"); tlv_subtype = PEEK_UINT8; if ((tlv_subtype == 0) || (tlv_subtype > 7)) { log_warnx("lldp", "unknown subtype for tlv id received on %s", hardware->h_ifname); goto malformed; } if ((b = (char *)calloc(1, tlv_size - 1)) == NULL) { log_warn("lldp", "unable to allocate memory for id tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 1); if (tlv_type == LLDP_TLV_PORT_ID) { port->p_id_subtype = tlv_subtype; port->p_id = b; port->p_id_len = tlv_size - 1; } else { chassis->c_id_subtype = tlv_subtype; chassis->c_id = b; chassis->c_id_len = tlv_size - 1; } break; case LLDP_TLV_TTL: CHECK_TLV_SIZE(2, "TTL"); chassis->c_ttl = PEEK_UINT16; ttl_received = 1; break; case LLDP_TLV_PORT_DESCR: case LLDP_TLV_SYSTEM_NAME: case LLDP_TLV_SYSTEM_DESCR: if (tlv_size < 1) { log_debug("lldp", "empty tlv received on %s", hardware->h_ifname); break; } if ((b = (char *)calloc(1, tlv_size + 1)) == NULL) { log_warn("lldp", "unable to allocate memory for string tlv " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size); if (tlv_type == LLDP_TLV_PORT_DESCR) port->p_descr = b; else if (tlv_type == LLDP_TLV_SYSTEM_NAME) chassis->c_name = b; else chassis->c_descr = b; break; case LLDP_TLV_SYSTEM_CAP: CHECK_TLV_SIZE(4, "System capabilities"); chassis->c_cap_available = PEEK_UINT16; chassis->c_cap_enabled = PEEK_UINT16; break; case LLDP_TLV_MGMT_ADDR: CHECK_TLV_SIZE(1, "Management address"); addr_str_length = PEEK_UINT8; if (addr_str_length > sizeof(addr_str_buffer)) { log_warnx("lldp", "too large management address on %s", hardware->h_ifname); goto malformed; } CHECK_TLV_SIZE(1 + addr_str_length, "Management address"); PEEK_BYTES(addr_str_buffer, addr_str_length); addr_length = addr_str_length - 1; addr_family = addr_str_buffer[0]; addr_ptr = &addr_str_buffer[1]; CHECK_TLV_SIZE(1 + addr_str_length + 5, "Management address"); iface_subtype = PEEK_UINT8; iface_number = PEEK_UINT32; af = lldpd_af_from_lldp_proto(addr_family); if (af == LLDPD_AF_UNSPEC) break; if (iface_subtype == LLDP_MGMT_IFACE_IFINDEX) iface = iface_number; else iface = 0; mgmt = lldpd_alloc_mgmt(af, addr_ptr, addr_length, iface); if (mgmt == NULL) { if (errno == ENOMEM) log_warn("lldp", "unable to allocate memory " "for management address"); else log_warn("lldp", "too large management address " "received on %s", hardware->h_ifname); goto malformed; } TAILQ_INSERT_TAIL(&chassis->c_mgmt, mgmt, m_entries); break; case LLDP_TLV_ORG: CHECK_TLV_SIZE(1 + (int)sizeof(orgid), "Organisational"); PEEK_BYTES(orgid, sizeof(orgid)); tlv_subtype = PEEK_UINT8; if (memcmp(dot1, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT1 hardware->h_rx_unrecognized_cnt++; #else /* Dot1 */ switch (tlv_subtype) { case LLDP_TLV_DOT1_VLANNAME: CHECK_TLV_SIZE(7, "VLAN"); if ((vlan = (struct lldpd_vlan *)calloc(1, sizeof(struct lldpd_vlan))) == NULL) { log_warn("lldp", "unable to alloc vlan " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } vlan->v_vid = PEEK_UINT16; vlan_len = PEEK_UINT8; CHECK_TLV_SIZE(7 + vlan_len, "VLAN"); if ((vlan->v_name = (char *)calloc(1, vlan_len + 1)) == NULL) { log_warn("lldp", "unable to alloc vlan name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(vlan->v_name, vlan_len); TAILQ_INSERT_TAIL(&port->p_vlans, vlan, v_entries); vlan = NULL; break; case LLDP_TLV_DOT1_PVID: CHECK_TLV_SIZE(6, "PVID"); port->p_pvid = PEEK_UINT16; break; case LLDP_TLV_DOT1_PPVID: CHECK_TLV_SIZE(7, "PPVID"); /* validation needed */ /* PPVID has to be unique if more than one PPVID TLVs are received - discard if duplicate */ /* if support bit is not set and enabled bit is set - PPVID TLV is considered error and discarded */ /* if PPVID > 4096 - bad and discard */ if ((ppvid = (struct lldpd_ppvid *)calloc(1, sizeof(struct lldpd_ppvid))) == NULL) { log_warn("lldp", "unable to alloc ppvid " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } ppvid->p_cap_status = PEEK_UINT8; ppvid->p_ppvid = PEEK_UINT16; TAILQ_INSERT_TAIL(&port->p_ppvids, ppvid, p_entries); break; case LLDP_TLV_DOT1_PI: /* validation needed */ /* PI has to be unique if more than one PI TLVs are received - discard if duplicate ?? */ CHECK_TLV_SIZE(5, "PI"); if ((pi = (struct lldpd_pi *)calloc(1, sizeof(struct lldpd_pi))) == NULL) { log_warn("lldp", "unable to alloc PI " "structure for " "tlv received on %s", hardware->h_ifname); goto malformed; } pi->p_pi_len = PEEK_UINT8; CHECK_TLV_SIZE(5 + pi->p_pi_len, "PI"); if ((pi->p_pi = (char *)calloc(1, pi->p_pi_len)) == NULL) { log_warn("lldp", "unable to alloc pid name for " "tlv received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(pi->p_pi, pi->p_pi_len); TAILQ_INSERT_TAIL(&port->p_pids, pi, p_entries); pi = NULL; break; default: /* Unknown Dot1 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(dot3, orgid, sizeof(orgid)) == 0) { #ifndef ENABLE_DOT3 hardware->h_rx_unrecognized_cnt++; #else /* Dot3 */ switch (tlv_subtype) { case LLDP_TLV_DOT3_MAC: CHECK_TLV_SIZE(9, "MAC/PHY"); port->p_macphy.autoneg_support = PEEK_UINT8; port->p_macphy.autoneg_enabled = (port->p_macphy.autoneg_support & 0x2) >> 1; port->p_macphy.autoneg_support = port->p_macphy.autoneg_support & 0x1; port->p_macphy.autoneg_advertised = PEEK_UINT16; port->p_macphy.mau_type = PEEK_UINT16; break; case LLDP_TLV_DOT3_LA: CHECK_TLV_SIZE(9, "Link aggregation"); PEEK_DISCARD_UINT8; port->p_aggregid = PEEK_UINT32; break; case LLDP_TLV_DOT3_MFS: CHECK_TLV_SIZE(6, "MFS"); port->p_mfs = PEEK_UINT16; break; case LLDP_TLV_DOT3_POWER: CHECK_TLV_SIZE(7, "Power"); port->p_power.devicetype = PEEK_UINT8; port->p_power.supported = (port->p_power.devicetype & 0x2) >> 1; port->p_power.enabled = (port->p_power.devicetype & 0x4) >> 2; port->p_power.paircontrol = (port->p_power.devicetype & 0x8) >> 3; port->p_power.devicetype = (port->p_power.devicetype & 0x1)? LLDP_DOT3_POWER_PSE:LLDP_DOT3_POWER_PD; port->p_power.pairs = PEEK_UINT8; port->p_power.class = PEEK_UINT8; /* 802.3at? */ if (tlv_size >= 12) { port->p_power.powertype = PEEK_UINT8; port->p_power.source = (port->p_power.powertype & (1<<5 | 1<<4)) >> 4; port->p_power.priority = (port->p_power.powertype & (1<<1 | 1<<0)); port->p_power.powertype = (port->p_power.powertype & (1<<7))? LLDP_DOT3_POWER_8023AT_TYPE1: LLDP_DOT3_POWER_8023AT_TYPE2; port->p_power.requested = PEEK_UINT16; port->p_power.allocated = PEEK_UINT16; } else port->p_power.powertype = LLDP_DOT3_POWER_8023AT_OFF; break; default: /* Unknown Dot3 TLV, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif } else if (memcmp(med, orgid, sizeof(orgid)) == 0) { /* LLDP-MED */ #ifndef ENABLE_LLDPMED hardware->h_rx_unrecognized_cnt++; #else u_int32_t policy; unsigned loctype; unsigned power; switch (tlv_subtype) { case LLDP_TLV_MED_CAP: CHECK_TLV_SIZE(7, "LLDP-MED capabilities"); chassis->c_med_cap_available = PEEK_UINT16; chassis->c_med_type = PEEK_UINT8; port->p_med_cap_enabled |= LLDP_MED_CAP_CAP; break; case LLDP_TLV_MED_POLICY: CHECK_TLV_SIZE(8, "LLDP-MED policy"); policy = PEEK_UINT32; if (((policy >> 24) < 1) || ((policy >> 24) > LLDP_MED_APPTYPE_LAST)) { log_info("lldp", "unknown policy field %d " "received on %s", policy, hardware->h_ifname); break; } port->p_med_policy[(policy >> 24) - 1].type = (policy >> 24); port->p_med_policy[(policy >> 24) - 1].unknown = ((policy & 0x800000) != 0); port->p_med_policy[(policy >> 24) - 1].tagged = ((policy & 0x400000) != 0); port->p_med_policy[(policy >> 24) - 1].vid = (policy & 0x001FFE00) >> 9; port->p_med_policy[(policy >> 24) - 1].priority = (policy & 0x1C0) >> 6; port->p_med_policy[(policy >> 24) - 1].dscp = policy & 0x3F; port->p_med_cap_enabled |= LLDP_MED_CAP_POLICY; break; case LLDP_TLV_MED_LOCATION: CHECK_TLV_SIZE(5, "LLDP-MED Location"); loctype = PEEK_UINT8; if ((loctype < 1) || (loctype > LLDP_MED_LOCFORMAT_LAST)) { log_info("lldp", "unknown location type " "received on %s", hardware->h_ifname); break; } if ((port->p_med_location[loctype - 1].data = (char*)malloc(tlv_size - 5)) == NULL) { log_warn("lldp", "unable to allocate memory " "for LLDP-MED location for " "frame received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(port->p_med_location[loctype - 1].data, tlv_size - 5); port->p_med_location[loctype - 1].data_len = tlv_size - 5; port->p_med_location[loctype - 1].format = loctype; port->p_med_cap_enabled |= LLDP_MED_CAP_LOCATION; break; case LLDP_TLV_MED_MDI: CHECK_TLV_SIZE(7, "LLDP-MED PoE-MDI"); power = PEEK_UINT8; switch (power & 0xC0) { case 0x0: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PSE; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PSE; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PRIMARY; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_BACKUP; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_RESERVED; } break; case 0x40: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_PD; port->p_med_cap_enabled |= LLDP_MED_CAP_MDI_PD; switch (power & 0x30) { case 0x0: port->p_med_power.source = LLDP_MED_POW_SOURCE_UNKNOWN; break; case 0x10: port->p_med_power.source = LLDP_MED_POW_SOURCE_PSE; break; case 0x20: port->p_med_power.source = LLDP_MED_POW_SOURCE_LOCAL; break; default: port->p_med_power.source = LLDP_MED_POW_SOURCE_BOTH; } break; default: port->p_med_power.devicetype = LLDP_MED_POW_TYPE_RESERVED; } if ((power & 0x0F) > LLDP_MED_POW_PRIO_LOW) port->p_med_power.priority = LLDP_MED_POW_PRIO_UNKNOWN; else port->p_med_power.priority = power & 0x0F; port->p_med_power.val = PEEK_UINT16; break; case LLDP_TLV_MED_IV_HW: case LLDP_TLV_MED_IV_SW: case LLDP_TLV_MED_IV_FW: case LLDP_TLV_MED_IV_SN: case LLDP_TLV_MED_IV_MANUF: case LLDP_TLV_MED_IV_MODEL: case LLDP_TLV_MED_IV_ASSET: if (tlv_size <= 4) b = NULL; else { if ((b = (char*)malloc(tlv_size - 3)) == NULL) { log_warn("lldp", "unable to allocate " "memory for LLDP-MED " "inventory for frame " "received on %s", hardware->h_ifname); goto malformed; } PEEK_BYTES(b, tlv_size - 4); b[tlv_size - 4] = '\0'; } switch (tlv_subtype) { case LLDP_TLV_MED_IV_HW: chassis->c_med_hw = b; break; case LLDP_TLV_MED_IV_FW: chassis->c_med_fw = b; break; case LLDP_TLV_MED_IV_SW: chassis->c_med_sw = b; break; case LLDP_TLV_MED_IV_SN: chassis->c_med_sn = b; break; case LLDP_TLV_MED_IV_MANUF: chassis->c_med_manuf = b; break; case LLDP_TLV_MED_IV_MODEL: chassis->c_med_model = b; break; case LLDP_TLV_MED_IV_ASSET: chassis->c_med_asset = b; break; } port->p_med_cap_enabled |= LLDP_MED_CAP_IV; break; default: /* Unknown LLDP MED, ignore it */ hardware->h_rx_unrecognized_cnt++; } #endif /* ENABLE_LLDPMED */ } else if (memcmp(dcbx, orgid, sizeof(orgid)) == 0) { log_debug("lldp", "unsupported DCBX tlv received on %s - ignore", hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; } else { log_debug("lldp", "unknown org tlv [%02x:%02x:%02x] received on %s", orgid[0], orgid[1], orgid[2], hardware->h_ifname); hardware->h_rx_unrecognized_cnt++; #ifdef ENABLE_CUSTOM custom = (struct lldpd_custom*)calloc(1, sizeof(struct lldpd_custom)); if (!custom) { log_warn("lldp", "unable to allocate memory for custom TLV"); goto malformed; } custom->oui_info_len = tlv_size > 4 ? tlv_size - 4 : 0; memcpy(custom->oui, orgid, sizeof(custom->oui)); custom->subtype = tlv_subtype; if (custom->oui_info_len > 0) { custom->oui_info = malloc(custom->oui_info_len); if (!custom->oui_info) { log_warn("lldp", "unable to allocate memory for custom TLV data"); goto malformed; } PEEK_BYTES(custom->oui_info, custom->oui_info_len); } TAILQ_INSERT_TAIL(&port->p_custom_list, custom, next); custom = NULL; #endif } break; default: log_warnx("lldp", "unknown tlv (%d) received on %s", tlv_type, hardware->h_ifname); goto malformed; } if (pos > tlv + tlv_size) { log_warnx("lldp", "BUG: already past TLV!"); goto malformed; } PEEK_DISCARD(tlv + tlv_size - pos); } /* Some random check */ if ((chassis->c_id == NULL) || (port->p_id == NULL) || (!ttl_received) || (gotend == 0)) { log_warnx("lldp", "some mandatory tlv are missing for frame received on %s", hardware->h_ifname); goto malformed; } *newchassis = chassis; *newport = port; return 1; malformed: #ifdef ENABLE_CUSTOM free(custom); #endif #ifdef ENABLE_DOT1 free(vlan); free(pi); #endif lldpd_chassis_cleanup(chassis, 1); lldpd_port_cleanup(port, 1); free(port); return -1; }
./CrossVul/dataset_final_sorted/CWE-617/c/good_1771_3
crossvul-cpp_data_bad_2489_1
/* Copyright (c) 2001 Matej Pfajfar. * Copyright (c) 2001-2004, Roger Dingledine. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2013, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file relay.c * \brief Handle relay cell encryption/decryption, plus packaging and * receiving from circuits, plus queuing on circuits. **/ #define RELAY_PRIVATE #include "or.h" #include "addressmap.h" #include "buffers.h" #include "channel.h" #include "circuitbuild.h" #include "circuitlist.h" #include "circuituse.h" #include "config.h" #include "connection.h" #include "connection_edge.h" #include "connection_or.h" #include "control.h" #include "geoip.h" #include "main.h" #include "mempool.h" #include "networkstatus.h" #include "nodelist.h" #include "onion.h" #include "policies.h" #include "reasons.h" #include "relay.h" #include "rendcommon.h" #include "router.h" #include "routerlist.h" #include "routerparse.h" static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction, crypt_path_t *layer_hint); static int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ, edge_connection_t *conn, crypt_path_t *layer_hint); static void circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint); static void circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint); static int circuit_resume_edge_reading_helper(edge_connection_t *conn, circuit_t *circ, crypt_path_t *layer_hint); static int circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint); static int circuit_queue_streams_are_blocked(circuit_t *circ); static void adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ, entry_connection_t *conn, node_t *node, const tor_addr_t *addr); /** Stop reading on edge connections when we have this many cells * waiting on the appropriate queue. */ #define CELL_QUEUE_HIGHWATER_SIZE 256 /** Start reading from edge connections again when we get down to this many * cells. */ #define CELL_QUEUE_LOWWATER_SIZE 64 /** Stats: how many relay cells have originated at this hop, or have * been relayed onward (not recognized at this hop)? */ uint64_t stats_n_relay_cells_relayed = 0; /** Stats: how many relay cells have been delivered to streams at this * hop? */ uint64_t stats_n_relay_cells_delivered = 0; /** Used to tell which stream to read from first on a circuit. */ static tor_weak_rng_t stream_choice_rng = TOR_WEAK_RNG_INIT; /** Update digest from the payload of cell. Assign integrity part to * cell. */ static void relay_set_digest(crypto_digest_t *digest, cell_t *cell) { char integrity[4]; relay_header_t rh; crypto_digest_add_bytes(digest, (char*)cell->payload, CELL_PAYLOAD_SIZE); crypto_digest_get_digest(digest, integrity, 4); // log_fn(LOG_DEBUG,"Putting digest of %u %u %u %u into relay cell.", // integrity[0], integrity[1], integrity[2], integrity[3]); relay_header_unpack(&rh, cell->payload); memcpy(rh.integrity, integrity, 4); relay_header_pack(cell->payload, &rh); } /** Does the digest for this circuit indicate that this cell is for us? * * Update digest from the payload of cell (with the integrity part set * to 0). If the integrity part is valid, return 1, else restore digest * and cell to their original state and return 0. */ static int relay_digest_matches(crypto_digest_t *digest, cell_t *cell) { char received_integrity[4], calculated_integrity[4]; relay_header_t rh; crypto_digest_t *backup_digest=NULL; backup_digest = crypto_digest_dup(digest); relay_header_unpack(&rh, cell->payload); memcpy(received_integrity, rh.integrity, 4); memset(rh.integrity, 0, 4); relay_header_pack(cell->payload, &rh); // log_fn(LOG_DEBUG,"Reading digest of %u %u %u %u from relay cell.", // received_integrity[0], received_integrity[1], // received_integrity[2], received_integrity[3]); crypto_digest_add_bytes(digest, (char*) cell->payload, CELL_PAYLOAD_SIZE); crypto_digest_get_digest(digest, calculated_integrity, 4); if (tor_memneq(received_integrity, calculated_integrity, 4)) { // log_fn(LOG_INFO,"Recognized=0 but bad digest. Not recognizing."); // (%d vs %d).", received_integrity, calculated_integrity); /* restore digest to its old form */ crypto_digest_assign(digest, backup_digest); /* restore the relay header */ memcpy(rh.integrity, received_integrity, 4); relay_header_pack(cell->payload, &rh); crypto_digest_free(backup_digest); return 0; } crypto_digest_free(backup_digest); return 1; } /** Apply <b>cipher</b> to CELL_PAYLOAD_SIZE bytes of <b>in</b> * (in place). * * If <b>encrypt_mode</b> is 1 then encrypt, else decrypt. * * Return -1 if the crypto fails, else return 0. */ static int relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in, int encrypt_mode) { int r; (void)encrypt_mode; r = crypto_cipher_crypt_inplace(cipher, (char*) in, CELL_PAYLOAD_SIZE); if (r) { log_warn(LD_BUG,"Error during relay encryption"); return -1; } return 0; } /** Receive a relay cell: * - Crypt it (encrypt if headed toward the origin or if we <b>are</b> the * origin; decrypt if we're headed toward the exit). * - Check if recognized (if exitward). * - If recognized and the digest checks out, then find if there's a stream * that the cell is intended for, and deliver it to the right * connection_edge. * - If not recognized, then we need to relay it: append it to the appropriate * cell_queue on <b>circ</b>. * * Return -<b>reason</b> on failure. */ int circuit_receive_relay_cell(cell_t *cell, circuit_t *circ, cell_direction_t cell_direction) { channel_t *chan = NULL; crypt_path_t *layer_hint=NULL; char recognized=0; int reason; tor_assert(cell); tor_assert(circ); tor_assert(cell_direction == CELL_DIRECTION_OUT || cell_direction == CELL_DIRECTION_IN); if (circ->marked_for_close) return 0; if (relay_crypt(circ, cell, cell_direction, &layer_hint, &recognized) < 0) { log_warn(LD_BUG,"relay crypt failed. Dropping connection."); return -END_CIRC_REASON_INTERNAL; } if (recognized) { edge_connection_t *conn = NULL; if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) { pathbias_check_probe_response(circ, cell); /* We need to drop this cell no matter what to avoid code that expects * a certain purpose (such as the hidserv code). */ return 0; } conn = relay_lookup_conn(circ, cell, cell_direction, layer_hint); if (cell_direction == CELL_DIRECTION_OUT) { ++stats_n_relay_cells_delivered; log_debug(LD_OR,"Sending away from origin."); if ((reason=connection_edge_process_relay_cell(cell, circ, conn, NULL)) < 0) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "connection_edge_process_relay_cell (away from origin) " "failed."); return reason; } } if (cell_direction == CELL_DIRECTION_IN) { ++stats_n_relay_cells_delivered; log_debug(LD_OR,"Sending to origin."); if ((reason = connection_edge_process_relay_cell(cell, circ, conn, layer_hint)) < 0) { log_warn(LD_OR, "connection_edge_process_relay_cell (at origin) failed."); return reason; } } return 0; } /* not recognized. pass it on. */ if (cell_direction == CELL_DIRECTION_OUT) { cell->circ_id = circ->n_circ_id; /* switch it */ chan = circ->n_chan; } else if (! CIRCUIT_IS_ORIGIN(circ)) { cell->circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; /* switch it */ chan = TO_OR_CIRCUIT(circ)->p_chan; } else { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Dropping unrecognized inbound cell on origin circuit."); /* If we see unrecognized cells on path bias testing circs, * it's bad mojo. Those circuits need to die. * XXX: Shouldn't they always die? */ if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) { TO_ORIGIN_CIRCUIT(circ)->path_state = PATH_STATE_USE_FAILED; return -END_CIRC_REASON_TORPROTOCOL; } else { return 0; } } if (!chan) { // XXXX Can this splice stuff be done more cleanly? if (! CIRCUIT_IS_ORIGIN(circ) && TO_OR_CIRCUIT(circ)->rend_splice && cell_direction == CELL_DIRECTION_OUT) { or_circuit_t *splice = TO_OR_CIRCUIT(circ)->rend_splice; tor_assert(circ->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED); tor_assert(splice->base_.purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED); cell->circ_id = splice->p_circ_id; cell->command = CELL_RELAY; /* can't be relay_early anyway */ if ((reason = circuit_receive_relay_cell(cell, TO_CIRCUIT(splice), CELL_DIRECTION_IN)) < 0) { log_warn(LD_REND, "Error relaying cell across rendezvous; closing " "circuits"); /* XXXX Do this here, or just return -1? */ circuit_mark_for_close(circ, -reason); return reason; } return 0; } log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Didn't recognize cell, but circ stops here! Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } log_debug(LD_OR,"Passing on unrecognized cell."); ++stats_n_relay_cells_relayed; /* XXXX no longer quite accurate {cells} * we might kill the circ before we relay * the cells. */ append_cell_to_circuit_queue(circ, chan, cell, cell_direction, 0); return 0; } /** Do the appropriate en/decryptions for <b>cell</b> arriving on * <b>circ</b> in direction <b>cell_direction</b>. * * If cell_direction == CELL_DIRECTION_IN: * - If we're at the origin (we're the OP), for hops 1..N, * decrypt cell. If recognized, stop. * - Else (we're not the OP), encrypt one hop. Cell is not recognized. * * If cell_direction == CELL_DIRECTION_OUT: * - decrypt one hop. Check if recognized. * * If cell is recognized, set *recognized to 1, and set * *layer_hint to the hop that recognized it. * * Return -1 to indicate that we should mark the circuit for close, * else return 0. */ int relay_crypt(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction, crypt_path_t **layer_hint, char *recognized) { relay_header_t rh; tor_assert(circ); tor_assert(cell); tor_assert(recognized); tor_assert(cell_direction == CELL_DIRECTION_IN || cell_direction == CELL_DIRECTION_OUT); if (cell_direction == CELL_DIRECTION_IN) { if (CIRCUIT_IS_ORIGIN(circ)) { /* We're at the beginning of the circuit. * We'll want to do layered decrypts. */ crypt_path_t *thishop, *cpath = TO_ORIGIN_CIRCUIT(circ)->cpath; thishop = cpath; if (thishop->state != CPATH_STATE_OPEN) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay cell before first created cell? Closing."); return -1; } do { /* Remember: cpath is in forward order, that is, first hop first. */ tor_assert(thishop); if (relay_crypt_one_payload(thishop->b_crypto, cell->payload, 0) < 0) return -1; relay_header_unpack(&rh, cell->payload); if (rh.recognized == 0) { /* it's possibly recognized. have to check digest to be sure. */ if (relay_digest_matches(thishop->b_digest, cell)) { *recognized = 1; *layer_hint = thishop; return 0; } } thishop = thishop->next; } while (thishop != cpath && thishop->state == CPATH_STATE_OPEN); log_fn(LOG_PROTOCOL_WARN, LD_OR, "Incoming cell at client not recognized. Closing."); return -1; } else { /* we're in the middle. Just one crypt. */ if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->p_crypto, cell->payload, 1) < 0) return -1; // log_fn(LOG_DEBUG,"Skipping recognized check, because we're not " // "the client."); } } else /* cell_direction == CELL_DIRECTION_OUT */ { /* we're in the middle. Just one crypt. */ if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->n_crypto, cell->payload, 0) < 0) return -1; relay_header_unpack(&rh, cell->payload); if (rh.recognized == 0) { /* it's possibly recognized. have to check digest to be sure. */ if (relay_digest_matches(TO_OR_CIRCUIT(circ)->n_digest, cell)) { *recognized = 1; return 0; } } } return 0; } /** Package a relay cell from an edge: * - Encrypt it to the right layer * - Append it to the appropriate cell_queue on <b>circ</b>. */ static int circuit_package_relay_cell(cell_t *cell, circuit_t *circ, cell_direction_t cell_direction, crypt_path_t *layer_hint, streamid_t on_stream, const char *filename, int lineno) { channel_t *chan; /* where to send the cell */ if (cell_direction == CELL_DIRECTION_OUT) { crypt_path_t *thishop; /* counter for repeated crypts */ chan = circ->n_chan; if (!chan) { log_warn(LD_BUG,"outgoing relay cell sent from %s:%d has n_chan==NULL." " Dropping.", filename, lineno); return 0; /* just drop it */ } if (!CIRCUIT_IS_ORIGIN(circ)) { log_warn(LD_BUG,"outgoing relay cell sent from %s:%d on non-origin " "circ. Dropping.", filename, lineno); return 0; /* just drop it */ } relay_set_digest(layer_hint->f_digest, cell); thishop = layer_hint; /* moving from farthest to nearest hop */ do { tor_assert(thishop); /* XXXX RD This is a bug, right? */ log_debug(LD_OR,"crypting a layer of the relay cell."); if (relay_crypt_one_payload(thishop->f_crypto, cell->payload, 1) < 0) { return -1; } thishop = thishop->prev; } while (thishop != TO_ORIGIN_CIRCUIT(circ)->cpath->prev); } else { /* incoming cell */ or_circuit_t *or_circ; if (CIRCUIT_IS_ORIGIN(circ)) { /* We should never package an _incoming_ cell from the circuit * origin; that means we messed up somewhere. */ log_warn(LD_BUG,"incoming relay cell at origin circuit. Dropping."); assert_circuit_ok(circ); return 0; /* just drop it */ } or_circ = TO_OR_CIRCUIT(circ); chan = or_circ->p_chan; relay_set_digest(or_circ->p_digest, cell); if (relay_crypt_one_payload(or_circ->p_crypto, cell->payload, 1) < 0) return -1; } ++stats_n_relay_cells_relayed; append_cell_to_circuit_queue(circ, chan, cell, cell_direction, on_stream); return 0; } /** If cell's stream_id matches the stream_id of any conn that's * attached to circ, return that conn, else return NULL. */ static edge_connection_t * relay_lookup_conn(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction, crypt_path_t *layer_hint) { edge_connection_t *tmpconn; relay_header_t rh; relay_header_unpack(&rh, cell->payload); if (!rh.stream_id) return NULL; /* IN or OUT cells could have come from either direction, now * that we allow rendezvous *to* an OP. */ if (CIRCUIT_IS_ORIGIN(circ)) { for (tmpconn = TO_ORIGIN_CIRCUIT(circ)->p_streams; tmpconn; tmpconn=tmpconn->next_stream) { if (rh.stream_id == tmpconn->stream_id && !tmpconn->base_.marked_for_close && tmpconn->cpath_layer == layer_hint) { log_debug(LD_APP,"found conn for stream %d.", rh.stream_id); return tmpconn; } } } else { for (tmpconn = TO_OR_CIRCUIT(circ)->n_streams; tmpconn; tmpconn=tmpconn->next_stream) { if (rh.stream_id == tmpconn->stream_id && !tmpconn->base_.marked_for_close) { log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id); if (cell_direction == CELL_DIRECTION_OUT || connection_edge_is_rendezvous_stream(tmpconn)) return tmpconn; } } for (tmpconn = TO_OR_CIRCUIT(circ)->resolving_streams; tmpconn; tmpconn=tmpconn->next_stream) { if (rh.stream_id == tmpconn->stream_id && !tmpconn->base_.marked_for_close) { log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id); return tmpconn; } } } return NULL; /* probably a begin relay cell */ } /** Pack the relay_header_t host-order structure <b>src</b> into * network-order in the buffer <b>dest</b>. See tor-spec.txt for details * about the wire format. */ void relay_header_pack(uint8_t *dest, const relay_header_t *src) { set_uint8(dest, src->command); set_uint16(dest+1, htons(src->recognized)); set_uint16(dest+3, htons(src->stream_id)); memcpy(dest+5, src->integrity, 4); set_uint16(dest+9, htons(src->length)); } /** Unpack the network-order buffer <b>src</b> into a host-order * relay_header_t structure <b>dest</b>. */ void relay_header_unpack(relay_header_t *dest, const uint8_t *src) { dest->command = get_uint8(src); dest->recognized = ntohs(get_uint16(src+1)); dest->stream_id = ntohs(get_uint16(src+3)); memcpy(dest->integrity, src+5, 4); dest->length = ntohs(get_uint16(src+9)); } /** Convert the relay <b>command</b> into a human-readable string. */ static const char * relay_command_to_string(uint8_t command) { switch (command) { case RELAY_COMMAND_BEGIN: return "BEGIN"; case RELAY_COMMAND_DATA: return "DATA"; case RELAY_COMMAND_END: return "END"; case RELAY_COMMAND_CONNECTED: return "CONNECTED"; case RELAY_COMMAND_SENDME: return "SENDME"; case RELAY_COMMAND_EXTEND: return "EXTEND"; case RELAY_COMMAND_EXTENDED: return "EXTENDED"; case RELAY_COMMAND_TRUNCATE: return "TRUNCATE"; case RELAY_COMMAND_TRUNCATED: return "TRUNCATED"; case RELAY_COMMAND_DROP: return "DROP"; case RELAY_COMMAND_RESOLVE: return "RESOLVE"; case RELAY_COMMAND_RESOLVED: return "RESOLVED"; case RELAY_COMMAND_BEGIN_DIR: return "BEGIN_DIR"; case RELAY_COMMAND_ESTABLISH_INTRO: return "ESTABLISH_INTRO"; case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: return "ESTABLISH_RENDEZVOUS"; case RELAY_COMMAND_INTRODUCE1: return "INTRODUCE1"; case RELAY_COMMAND_INTRODUCE2: return "INTRODUCE2"; case RELAY_COMMAND_RENDEZVOUS1: return "RENDEZVOUS1"; case RELAY_COMMAND_RENDEZVOUS2: return "RENDEZVOUS2"; case RELAY_COMMAND_INTRO_ESTABLISHED: return "INTRO_ESTABLISHED"; case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED: return "RENDEZVOUS_ESTABLISHED"; case RELAY_COMMAND_INTRODUCE_ACK: return "INTRODUCE_ACK"; default: return "(unrecognized)"; } } /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and send * it onto the open circuit <b>circ</b>. <b>stream_id</b> is the ID on * <b>circ</b> for the stream that's sending the relay cell, or 0 if it's a * control cell. <b>cpath_layer</b> is NULL for OR->OP cells, or the * destination hop for OP->OR cells. * * If you can't send the cell, mark the circuit for close and return -1. Else * return 0. */ int relay_send_command_from_edge_(streamid_t stream_id, circuit_t *circ, uint8_t relay_command, const char *payload, size_t payload_len, crypt_path_t *cpath_layer, const char *filename, int lineno) { cell_t cell; relay_header_t rh; cell_direction_t cell_direction; /* XXXX NM Split this function into a separate versions per circuit type? */ tor_assert(circ); tor_assert(payload_len <= RELAY_PAYLOAD_SIZE); memset(&cell, 0, sizeof(cell_t)); cell.command = CELL_RELAY; if (cpath_layer) { cell.circ_id = circ->n_circ_id; cell_direction = CELL_DIRECTION_OUT; } else if (! CIRCUIT_IS_ORIGIN(circ)) { cell.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; cell_direction = CELL_DIRECTION_IN; } else { return -1; } memset(&rh, 0, sizeof(rh)); rh.command = relay_command; rh.stream_id = stream_id; rh.length = payload_len; relay_header_pack(cell.payload, &rh); if (payload_len) memcpy(cell.payload+RELAY_HEADER_SIZE, payload, payload_len); log_debug(LD_OR,"delivering %d cell %s.", relay_command, cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward"); /* If we are sending an END cell and this circuit is used for a tunneled * directory request, advance its state. */ if (relay_command == RELAY_COMMAND_END && circ->dirreq_id) geoip_change_dirreq_state(circ->dirreq_id, DIRREQ_TUNNELED, DIRREQ_END_CELL_SENT); if (cell_direction == CELL_DIRECTION_OUT && circ->n_chan) { /* if we're using relaybandwidthrate, this conn wants priority */ channel_timestamp_client(circ->n_chan); } if (cell_direction == CELL_DIRECTION_OUT) { origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ); if (origin_circ->remaining_relay_early_cells > 0 && (relay_command == RELAY_COMMAND_EXTEND || relay_command == RELAY_COMMAND_EXTEND2 || cpath_layer != origin_circ->cpath)) { /* If we've got any relay_early cells left and (we're sending * an extend cell or we're not talking to the first hop), use * one of them. Don't worry about the conn protocol version: * append_cell_to_circuit_queue will fix it up. */ cell.command = CELL_RELAY_EARLY; --origin_circ->remaining_relay_early_cells; log_debug(LD_OR, "Sending a RELAY_EARLY cell; %d remaining.", (int)origin_circ->remaining_relay_early_cells); /* Memorize the command that is sent as RELAY_EARLY cell; helps debug * task 878. */ origin_circ->relay_early_commands[ origin_circ->relay_early_cells_sent++] = relay_command; } else if (relay_command == RELAY_COMMAND_EXTEND || relay_command == RELAY_COMMAND_EXTEND2) { /* If no RELAY_EARLY cells can be sent over this circuit, log which * commands have been sent as RELAY_EARLY cells before; helps debug * task 878. */ smartlist_t *commands_list = smartlist_new(); int i = 0; char *commands = NULL; for (; i < origin_circ->relay_early_cells_sent; i++) smartlist_add(commands_list, (char *) relay_command_to_string(origin_circ->relay_early_commands[i])); commands = smartlist_join_strings(commands_list, ",", 0, NULL); log_warn(LD_BUG, "Uh-oh. We're sending a RELAY_COMMAND_EXTEND cell, " "but we have run out of RELAY_EARLY cells on that circuit. " "Commands sent before: %s", commands); tor_free(commands); smartlist_free(commands_list); } } if (circuit_package_relay_cell(&cell, circ, cell_direction, cpath_layer, stream_id, filename, lineno) < 0) { log_warn(LD_BUG,"circuit_package_relay_cell failed. Closing."); circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL); return -1; } return 0; } /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and * send it onto the open circuit <b>circ</b>. <b>fromconn</b> is the stream * that's sending the relay cell, or NULL if it's a control cell. * <b>cpath_layer</b> is NULL for OR->OP cells, or the destination hop * for OP->OR cells. * * If you can't send the cell, mark the circuit for close and * return -1. Else return 0. */ int connection_edge_send_command(edge_connection_t *fromconn, uint8_t relay_command, const char *payload, size_t payload_len) { /* XXXX NM Split this function into a separate versions per circuit type? */ circuit_t *circ; crypt_path_t *cpath_layer = fromconn->cpath_layer; tor_assert(fromconn); circ = fromconn->on_circuit; if (fromconn->base_.marked_for_close) { log_warn(LD_BUG, "called on conn that's already marked for close at %s:%d.", fromconn->base_.marked_for_close_file, fromconn->base_.marked_for_close); return 0; } if (!circ) { if (fromconn->base_.type == CONN_TYPE_AP) { log_info(LD_APP,"no circ. Closing conn."); connection_mark_unattached_ap(EDGE_TO_ENTRY_CONN(fromconn), END_STREAM_REASON_INTERNAL); } else { log_info(LD_EXIT,"no circ. Closing conn."); fromconn->edge_has_sent_end = 1; /* no circ to send to */ fromconn->end_reason = END_STREAM_REASON_INTERNAL; connection_mark_for_close(TO_CONN(fromconn)); } return -1; } return relay_send_command_from_edge(fromconn->stream_id, circ, relay_command, payload, payload_len, cpath_layer); } /** How many times will I retry a stream that fails due to DNS * resolve failure or misc error? */ #define MAX_RESOLVE_FAILURES 3 /** Return 1 if reason is something that you should retry if you * get the end cell before you've connected; else return 0. */ static int edge_reason_is_retriable(int reason) { return reason == END_STREAM_REASON_HIBERNATING || reason == END_STREAM_REASON_RESOURCELIMIT || reason == END_STREAM_REASON_EXITPOLICY || reason == END_STREAM_REASON_RESOLVEFAILED || reason == END_STREAM_REASON_MISC || reason == END_STREAM_REASON_NOROUTE; } /** Called when we receive an END cell on a stream that isn't open yet, * from the client side. * Arguments are as for connection_edge_process_relay_cell(). */ static int connection_ap_process_end_not_open( relay_header_t *rh, cell_t *cell, origin_circuit_t *circ, entry_connection_t *conn, crypt_path_t *layer_hint) { node_t *exitrouter; int reason = *(cell->payload+RELAY_HEADER_SIZE); int control_reason; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn); (void) layer_hint; /* unused */ if (rh->length > 0) { if (reason == END_STREAM_REASON_TORPROTOCOL || reason == END_STREAM_REASON_DESTROY) { /* Both of these reasons could mean a failed tag * hit the exit and it complained. Do not probe. * Fail the circuit. */ circ->path_state = PATH_STATE_USE_FAILED; return -END_CIRC_REASON_TORPROTOCOL; } else if (reason == END_STREAM_REASON_INTERNAL) { /* We can't infer success or failure, since older Tors report * ENETUNREACH as END_STREAM_REASON_INTERNAL. */ } else { /* Path bias: If we get a valid reason code from the exit, * it wasn't due to tagging. * * We rely on recognized+digest being strong enough to make * tags unlikely to allow us to get tagged, yet 'recognized' * reason codes here. */ pathbias_mark_use_success(circ); } } if (rh->length == 0) { reason = END_STREAM_REASON_MISC; } control_reason = reason | END_STREAM_REASON_FLAG_REMOTE; if (edge_reason_is_retriable(reason) && /* avoid retry if rend */ !connection_edge_is_rendezvous_stream(edge_conn)) { const char *chosen_exit_digest = circ->build_state->chosen_exit->identity_digest; log_info(LD_APP,"Address '%s' refused due to '%s'. Considering retrying.", safe_str(conn->socks_request->address), stream_end_reason_to_string(reason)); exitrouter = node_get_mutable_by_id(chosen_exit_digest); switch (reason) { case END_STREAM_REASON_EXITPOLICY: { tor_addr_t addr; tor_addr_make_unspec(&addr); if (rh->length >= 5) { int ttl = -1; tor_addr_make_unspec(&addr); if (rh->length == 5 || rh->length == 9) { tor_addr_from_ipv4n(&addr, get_uint32(cell->payload+RELAY_HEADER_SIZE+1)); if (rh->length == 9) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5)); } else if (rh->length == 17 || rh->length == 21) { tor_addr_from_ipv6_bytes(&addr, (char*)(cell->payload+RELAY_HEADER_SIZE+1)); if (rh->length == 21) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+17)); } if (tor_addr_is_null(&addr)) { log_info(LD_APP,"Address '%s' resolved to 0.0.0.0. Closing,", safe_str(conn->socks_request->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if ((tor_addr_family(&addr) == AF_INET && !conn->ipv4_traffic_ok) || (tor_addr_family(&addr) == AF_INET6 && !conn->ipv6_traffic_ok)) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got an EXITPOLICY failure on a connection with a " "mismatched family. Closing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if (get_options()->ClientDNSRejectInternalAddresses && tor_addr_is_internal(&addr, 0)) { log_info(LD_APP,"Address '%s' resolved to internal. Closing,", safe_str(conn->socks_request->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } client_dns_set_addressmap(conn, conn->socks_request->address, &addr, conn->chosen_exit_name, ttl); { char new_addr[TOR_ADDR_BUF_LEN]; tor_addr_to_str(new_addr, &addr, sizeof(new_addr), 1); if (strcmp(conn->socks_request->address, new_addr)) { strlcpy(conn->socks_request->address, new_addr, sizeof(conn->socks_request->address)); control_event_stream_status(conn, STREAM_EVENT_REMAP, 0); } } } /* check if he *ought* to have allowed it */ adjust_exit_policy_from_exitpolicy_failure(circ, conn, exitrouter, &addr); if (conn->chosen_exit_optional || conn->chosen_exit_retries) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; /* A non-zero chosen_exit_retries can happen if we set a * TrackHostExits for this address under a port that the exit * relay allows, but then try the same address with a different * port that it doesn't allow to exit. We shouldn't unregister * the mapping, since it is probably still wanted on the * original port. But now we give away to the exit relay that * we probably have a TrackHostExits on it. So be it. */ conn->chosen_exit_retries = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, conn will get closed below */ break; } case END_STREAM_REASON_CONNECTREFUSED: if (!conn->chosen_exit_optional) break; /* break means it'll close, below */ /* Else fall through: expire this circuit, clear the * chosen_exit_name field, and try again. */ case END_STREAM_REASON_RESOLVEFAILED: case END_STREAM_REASON_TIMEOUT: case END_STREAM_REASON_MISC: case END_STREAM_REASON_NOROUTE: if (client_dns_incr_failures(conn->socks_request->address) < MAX_RESOLVE_FAILURES) { /* We haven't retried too many times; reattach the connection. */ circuit_log_path(LOG_INFO,LD_APP,circ); /* Mark this circuit "unusable for new streams". */ mark_circuit_unusable_for_new_conns(circ); if (conn->chosen_exit_optional) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, conn will get closed below */ } else { log_notice(LD_APP, "Have tried resolving or connecting to address '%s' " "at %d different places. Giving up.", safe_str(conn->socks_request->address), MAX_RESOLVE_FAILURES); /* clear the failures, so it will have a full try next time */ client_dns_clear_failures(conn->socks_request->address); } break; case END_STREAM_REASON_HIBERNATING: case END_STREAM_REASON_RESOURCELIMIT: if (exitrouter) { policies_set_node_exitpolicy_to_reject_all(exitrouter); } if (conn->chosen_exit_optional) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, will close below */ break; } /* end switch */ log_info(LD_APP,"Giving up on retrying; conn can't be handled."); } log_info(LD_APP, "Edge got end (%s) before we're connected. Marking for close.", stream_end_reason_to_string(rh->length > 0 ? reason : -1)); circuit_log_path(LOG_INFO,LD_APP,circ); /* need to test because of detach_retriable */ if (!ENTRY_TO_CONN(conn)->marked_for_close) connection_mark_unattached_ap(conn, control_reason); return 0; } /** Called when we have gotten an END_REASON_EXITPOLICY failure on <b>circ</b> * for <b>conn</b>, while attempting to connect via <b>node</b>. If the node * told us which address it rejected, then <b>addr</b> is that address; * otherwise it is AF_UNSPEC. * * If we are sure the node should have allowed this address, mark the node as * having a reject *:* exit policy. Otherwise, mark the circuit as unusable * for this particular address. **/ static void adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ, entry_connection_t *conn, node_t *node, const tor_addr_t *addr) { int make_reject_all = 0; const sa_family_t family = tor_addr_family(addr); if (node) { tor_addr_t tmp; int asked_for_family = tor_addr_parse(&tmp, conn->socks_request->address); if (family == AF_UNSPEC) { make_reject_all = 1; } else if (node_exit_policy_is_exact(node, family) && asked_for_family != -1 && !conn->chosen_exit_name) { make_reject_all = 1; } if (make_reject_all) { log_info(LD_APP, "Exitrouter %s seems to be more restrictive than its exit " "policy. Not using this router as exit for now.", node_describe(node)); policies_set_node_exitpolicy_to_reject_all(node); } } if (family != AF_UNSPEC) addr_policy_append_reject_addr(&circ->prepend_policy, addr); } /** Helper: change the socks_request-&gt;address field on conn to the * dotted-quad representation of <b>new_addr</b>, * and send an appropriate REMAP event. */ static void remap_event_helper(entry_connection_t *conn, const tor_addr_t *new_addr) { tor_addr_to_str(conn->socks_request->address, new_addr, sizeof(conn->socks_request->address), 1); control_event_stream_status(conn, STREAM_EVENT_REMAP, REMAP_STREAM_SOURCE_EXIT); } /** Extract the contents of a connected cell in <b>cell</b>, whose relay * header has already been parsed into <b>rh</b>. On success, set * <b>addr_out</b> to the address we're connected to, and <b>ttl_out</b> to * the ttl of that address, in seconds, and return 0. On failure, return * -1. */ int connected_cell_parse(const relay_header_t *rh, const cell_t *cell, tor_addr_t *addr_out, int *ttl_out) { uint32_t bytes; const uint8_t *payload = cell->payload + RELAY_HEADER_SIZE; tor_addr_make_unspec(addr_out); *ttl_out = -1; if (rh->length == 0) return 0; if (rh->length < 4) return -1; bytes = ntohl(get_uint32(payload)); /* If bytes is 0, this is maybe a v6 address. Otherwise it's a v4 address */ if (bytes != 0) { /* v4 address */ tor_addr_from_ipv4h(addr_out, bytes); if (rh->length >= 8) { bytes = ntohl(get_uint32(payload + 4)); if (bytes <= INT32_MAX) *ttl_out = bytes; } } else { if (rh->length < 25) /* 4 bytes of 0s, 1 addr, 16 ipv4, 4 ttl. */ return -1; if (get_uint8(payload + 4) != 6) return -1; tor_addr_from_ipv6_bytes(addr_out, (char*)(payload + 5)); bytes = ntohl(get_uint32(payload + 21)); if (bytes <= INT32_MAX) *ttl_out = (int) bytes; } return 0; } /** An incoming relay cell has arrived from circuit <b>circ</b> to * stream <b>conn</b>. * * The arguments here are the same as in * connection_edge_process_relay_cell() below; this function is called * from there when <b>conn</b> is defined and not in an open state. */ static int connection_edge_process_relay_cell_not_open( relay_header_t *rh, cell_t *cell, circuit_t *circ, edge_connection_t *conn, crypt_path_t *layer_hint) { if (rh->command == RELAY_COMMAND_END) { if (CIRCUIT_IS_ORIGIN(circ) && conn->base_.type == CONN_TYPE_AP) { return connection_ap_process_end_not_open(rh, cell, TO_ORIGIN_CIRCUIT(circ), EDGE_TO_ENTRY_CONN(conn), layer_hint); } else { /* we just got an 'end', don't need to send one */ conn->edge_has_sent_end = 1; conn->end_reason = *(cell->payload+RELAY_HEADER_SIZE) | END_STREAM_REASON_FLAG_REMOTE; connection_mark_for_close(TO_CONN(conn)); return 0; } } if (conn->base_.type == CONN_TYPE_AP && rh->command == RELAY_COMMAND_CONNECTED) { tor_addr_t addr; int ttl; entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); tor_assert(CIRCUIT_IS_ORIGIN(circ)); if (conn->base_.state != AP_CONN_STATE_CONNECT_WAIT) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got 'connected' while not in state connect_wait. Dropping."); return 0; } conn->base_.state = AP_CONN_STATE_OPEN; log_info(LD_APP,"'connected' received after %d seconds.", (int)(time(NULL) - conn->base_.timestamp_lastread)); if (connected_cell_parse(rh, cell, &addr, &ttl) < 0) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a badly formatted connected cell. Closing."); connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); } if (tor_addr_family(&addr) != AF_UNSPEC) { const sa_family_t family = tor_addr_family(&addr); if (tor_addr_is_null(&addr) || (get_options()->ClientDNSRejectInternalAddresses && tor_addr_is_internal(&addr, 0))) { log_info(LD_APP, "...but it claims the IP address was %s. Closing.", fmt_addr(&addr)); connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if ((family == AF_INET && ! entry_conn->ipv4_traffic_ok) || (family == AF_INET6 && ! entry_conn->ipv6_traffic_ok)) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a connected cell to %s with unsupported address family." " Closing.", fmt_addr(&addr)); connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } client_dns_set_addressmap(entry_conn, entry_conn->socks_request->address, &addr, entry_conn->chosen_exit_name, ttl); remap_event_helper(entry_conn, &addr); } circuit_log_path(LOG_INFO,LD_APP,TO_ORIGIN_CIRCUIT(circ)); /* don't send a socks reply to transparent conns */ tor_assert(entry_conn->socks_request != NULL); if (!entry_conn->socks_request->has_finished) connection_ap_handshake_socks_reply(entry_conn, NULL, 0, 0); /* Was it a linked dir conn? If so, a dir request just started to * fetch something; this could be a bootstrap status milestone. */ log_debug(LD_APP, "considering"); if (TO_CONN(conn)->linked_conn && TO_CONN(conn)->linked_conn->type == CONN_TYPE_DIR) { connection_t *dirconn = TO_CONN(conn)->linked_conn; log_debug(LD_APP, "it is! %d", dirconn->purpose); switch (dirconn->purpose) { case DIR_PURPOSE_FETCH_CERTIFICATE: if (consensus_is_waiting_for_certs()) control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_KEYS, 0); break; case DIR_PURPOSE_FETCH_CONSENSUS: control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_STATUS, 0); break; case DIR_PURPOSE_FETCH_SERVERDESC: case DIR_PURPOSE_FETCH_MICRODESC: control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS, count_loading_descriptors_progress()); break; } } /* This is definitely a success, so forget about any pending data we * had sent. */ if (entry_conn->pending_optimistic_data) { generic_buffer_free(entry_conn->pending_optimistic_data); entry_conn->pending_optimistic_data = NULL; } /* handle anything that might have queued */ if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return 0; } return 0; } if (conn->base_.type == CONN_TYPE_AP && rh->command == RELAY_COMMAND_RESOLVED) { int ttl; int answer_len; uint8_t answer_type; entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); if (conn->base_.state != AP_CONN_STATE_RESOLVE_WAIT) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a 'resolved' cell while " "not in state resolve_wait. Dropping."); return 0; } tor_assert(SOCKS_COMMAND_IS_RESOLVE(entry_conn->socks_request->command)); answer_len = cell->payload[RELAY_HEADER_SIZE+1]; if (rh->length < 2 || answer_len+2>rh->length) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Dropping malformed 'resolved' cell"); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } answer_type = cell->payload[RELAY_HEADER_SIZE]; if (rh->length >= answer_len+6) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+ 2+answer_len)); else ttl = -1; if (answer_type == RESOLVED_TYPE_IPV4 || answer_type == RESOLVED_TYPE_IPV6) { tor_addr_t addr; if (decode_address_from_payload(&addr, cell->payload+RELAY_HEADER_SIZE, rh->length) && tor_addr_is_internal(&addr, 0) && get_options()->ClientDNSRejectInternalAddresses) { log_info(LD_APP,"Got a resolve with answer %s. Rejecting.", fmt_addr(&addr)); connection_ap_handshake_socks_resolved(entry_conn, RESOLVED_TYPE_ERROR_TRANSIENT, 0, NULL, 0, TIME_MAX); connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL); return 0; } } connection_ap_handshake_socks_resolved(entry_conn, answer_type, cell->payload[RELAY_HEADER_SIZE+1], /*answer_len*/ cell->payload+RELAY_HEADER_SIZE+2, /*answer*/ ttl, -1); if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) { tor_addr_t addr; tor_addr_from_ipv4n(&addr, get_uint32(cell->payload+RELAY_HEADER_SIZE+2)); remap_event_helper(entry_conn, &addr); } else if (answer_type == RESOLVED_TYPE_IPV6 && answer_len == 16) { tor_addr_t addr; tor_addr_from_ipv6_bytes(&addr, (char*)(cell->payload+RELAY_HEADER_SIZE+2)); remap_event_helper(entry_conn, &addr); } connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_DONE | END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED); return 0; } log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Got an unexpected relay command %d, in state %d (%s). Dropping.", rh->command, conn->base_.state, conn_state_to_string(conn->base_.type, conn->base_.state)); return 0; /* for forward compatibility, don't kill the circuit */ // connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); // connection_mark_for_close(conn); // return -1; } /** An incoming relay cell has arrived on circuit <b>circ</b>. If * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is * destined for <b>conn</b>. * * If <b>layer_hint</b> is defined, then we're the origin of the * circuit, and it specifies the hop that packaged <b>cell</b>. * * Return -reason if you want to warn and tear down the circuit, else 0. */ static int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ, edge_connection_t *conn, crypt_path_t *layer_hint) { static int num_seen=0; relay_header_t rh; unsigned domain = layer_hint?LD_APP:LD_EXIT; int reason; int optimistic_data = 0; /* Set to 1 if we receive data on a stream * that's in the EXIT_CONN_STATE_RESOLVING * or EXIT_CONN_STATE_CONNECTING states. */ tor_assert(cell); tor_assert(circ); relay_header_unpack(&rh, cell->payload); // log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id); num_seen++; log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).", num_seen, rh.command, rh.stream_id); if (rh.length > RELAY_PAYLOAD_SIZE) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay cell length field too long. Closing circuit."); return - END_CIRC_REASON_TORPROTOCOL; } if (rh.stream_id == 0) { switch (rh.command) { case RELAY_COMMAND_BEGIN: case RELAY_COMMAND_CONNECTED: case RELAY_COMMAND_DATA: case RELAY_COMMAND_END: case RELAY_COMMAND_RESOLVE: case RELAY_COMMAND_RESOLVED: case RELAY_COMMAND_BEGIN_DIR: log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay command %d with zero " "stream_id. Dropping.", (int)rh.command); return 0; default: ; } } /* either conn is NULL, in which case we've got a control cell, or else * conn points to the recognized stream. */ if (conn && !connection_state_is_open(TO_CONN(conn))) { if (conn->base_.type == CONN_TYPE_EXIT && (conn->base_.state == EXIT_CONN_STATE_CONNECTING || conn->base_.state == EXIT_CONN_STATE_RESOLVING) && rh.command == RELAY_COMMAND_DATA) { /* Allow DATA cells to be delivered to an exit node in state * EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING. * This speeds up HTTP, for example. */ optimistic_data = 1; } else { return connection_edge_process_relay_cell_not_open( &rh, cell, circ, conn, layer_hint); } } switch (rh.command) { case RELAY_COMMAND_DROP: // log_info(domain,"Got a relay-level padding cell. Dropping."); return 0; case RELAY_COMMAND_BEGIN: case RELAY_COMMAND_BEGIN_DIR: if (layer_hint && circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Relay begin request unsupported at AP. Dropping."); return 0; } if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED && layer_hint != TO_ORIGIN_CIRCUIT(circ)->cpath->prev) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Relay begin request to Hidden Service " "from intermediary node. Dropping."); return 0; } if (conn) { log_fn(LOG_PROTOCOL_WARN, domain, "Begin cell for known stream. Dropping."); return 0; } if (rh.command == RELAY_COMMAND_BEGIN_DIR) { /* Assign this circuit and its app-ward OR connection a unique ID, * so that we can measure download times. The local edge and dir * connection will be assigned the same ID when they are created * and linked. */ static uint64_t next_id = 0; circ->dirreq_id = ++next_id; TO_OR_CIRCUIT(circ)->p_chan->dirreq_id = circ->dirreq_id; } return connection_exit_begin_conn(cell, circ); case RELAY_COMMAND_DATA: ++stats_n_data_cells_received; if (( layer_hint && --layer_hint->deliver_window < 0) || (!layer_hint && --circ->deliver_window < 0)) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "(relay data) circ deliver_window below 0. Killing."); if (conn) { /* XXXX Do we actually need to do this? Will killing the circuit * not send an END and mark the stream for close as appropriate? */ connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL); connection_mark_for_close(TO_CONN(conn)); } return -END_CIRC_REASON_TORPROTOCOL; } log_debug(domain,"circ deliver_window now %d.", layer_hint ? layer_hint->deliver_window : circ->deliver_window); circuit_consider_sending_sendme(circ, layer_hint); if (!conn) { log_info(domain,"data cell dropped, unknown stream (streamid %d).", rh.stream_id); return 0; } if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "(relay data) conn deliver_window below 0. Killing."); return -END_CIRC_REASON_TORPROTOCOL; } stats_n_data_bytes_received += rh.length; connection_write_to_buf((char*)(cell->payload + RELAY_HEADER_SIZE), rh.length, TO_CONN(conn)); if (!optimistic_data) { /* Only send a SENDME if we're not getting optimistic data; otherwise * a SENDME could arrive before the CONNECTED. */ connection_edge_consider_sending_sendme(conn); } return 0; case RELAY_COMMAND_END: reason = rh.length > 0 ? get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC; if (!conn) { log_info(domain,"end cell (%s) dropped, unknown stream.", stream_end_reason_to_string(reason)); return 0; } /* XXX add to this log_fn the exit node's nickname? */ log_info(domain,TOR_SOCKET_T_FORMAT": end cell (%s) for stream %d. " "Removing stream.", conn->base_.s, stream_end_reason_to_string(reason), conn->stream_id); if (conn->base_.type == CONN_TYPE_AP) { entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn); if (entry_conn->socks_request && !entry_conn->socks_request->has_finished) log_warn(LD_BUG, "open stream hasn't sent socks answer yet? Closing."); } /* We just *got* an end; no reason to send one. */ conn->edge_has_sent_end = 1; if (!conn->end_reason) conn->end_reason = reason | END_STREAM_REASON_FLAG_REMOTE; if (!conn->base_.marked_for_close) { /* only mark it if not already marked. it's possible to * get the 'end' right around when the client hangs up on us. */ connection_mark_and_flush(TO_CONN(conn)); } return 0; case RELAY_COMMAND_EXTEND: case RELAY_COMMAND_EXTEND2: { static uint64_t total_n_extend=0, total_nonearly=0; total_n_extend++; if (rh.stream_id) { log_fn(LOG_PROTOCOL_WARN, domain, "'extend' cell received for non-zero stream. Dropping."); return 0; } if (cell->command != CELL_RELAY_EARLY && !networkstatus_get_param(NULL,"AllowNonearlyExtend",0,0,1)) { #define EARLY_WARNING_INTERVAL 3600 static ratelim_t early_warning_limit = RATELIM_INIT(EARLY_WARNING_INTERVAL); char *m; if (cell->command == CELL_RELAY) { ++total_nonearly; if ((m = rate_limit_log(&early_warning_limit, approx_time()))) { double percentage = ((double)total_nonearly)/total_n_extend; percentage *= 100; log_fn(LOG_PROTOCOL_WARN, domain, "EXTEND cell received, " "but not via RELAY_EARLY. Dropping.%s", m); log_fn(LOG_PROTOCOL_WARN, domain, " (We have dropped %.02f%% of " "all EXTEND cells for this reason)", percentage); tor_free(m); } } else { log_fn(LOG_WARN, domain, "EXTEND cell received, in a cell with type %d! Dropping.", cell->command); } return 0; } return circuit_extend(cell, circ); } case RELAY_COMMAND_EXTENDED: case RELAY_COMMAND_EXTENDED2: if (!layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "'extended' unsupported at non-origin. Dropping."); return 0; } log_debug(domain,"Got an extended cell! Yay."); { extended_cell_t extended_cell; if (extended_cell_parse(&extended_cell, rh.command, (const uint8_t*)cell->payload+RELAY_HEADER_SIZE, rh.length)<0) { log_warn(LD_PROTOCOL, "Can't parse EXTENDED cell; killing circuit."); return -END_CIRC_REASON_TORPROTOCOL; } if ((reason = circuit_finish_handshake(TO_ORIGIN_CIRCUIT(circ), &extended_cell.created_cell)) < 0) { log_warn(domain,"circuit_finish_handshake failed."); return reason; } } if ((reason=circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ)))<0) { log_info(domain,"circuit_send_next_onion_skin() failed."); return reason; } return 0; case RELAY_COMMAND_TRUNCATE: if (layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "'truncate' unsupported at origin. Dropping."); return 0; } if (circ->n_hop) { if (circ->n_chan) log_warn(LD_BUG, "n_chan and n_hop set on the same circuit!"); extend_info_free(circ->n_hop); circ->n_hop = NULL; tor_free(circ->n_chan_create_cell); circuit_set_state(circ, CIRCUIT_STATE_OPEN); } if (circ->n_chan) { uint8_t trunc_reason = get_uint8(cell->payload + RELAY_HEADER_SIZE); circuit_clear_cell_queue(circ, circ->n_chan); channel_send_destroy(circ->n_circ_id, circ->n_chan, trunc_reason); circuit_set_n_circid_chan(circ, 0, NULL); } log_debug(LD_EXIT, "Processed 'truncate', replying."); { char payload[1]; payload[0] = (char)END_CIRC_REASON_REQUESTED; relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED, payload, sizeof(payload), NULL); } return 0; case RELAY_COMMAND_TRUNCATED: if (!layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_EXIT, "'truncated' unsupported at non-origin. Dropping."); return 0; } circuit_truncated(TO_ORIGIN_CIRCUIT(circ), layer_hint, get_uint8(cell->payload + RELAY_HEADER_SIZE)); return 0; case RELAY_COMMAND_CONNECTED: if (conn) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "'connected' unsupported while open. Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } log_info(domain, "'connected' received, no conn attached anymore. Ignoring."); return 0; case RELAY_COMMAND_SENDME: if (!rh.stream_id) { if (layer_hint) { if (layer_hint->package_window + CIRCWINDOW_INCREMENT > CIRCWINDOW_START_MAX) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Unexpected sendme cell from exit relay. " "Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } layer_hint->package_window += CIRCWINDOW_INCREMENT; log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.", layer_hint->package_window); circuit_resume_edge_reading(circ, layer_hint); } else { if (circ->package_window + CIRCWINDOW_INCREMENT > CIRCWINDOW_START_MAX) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Unexpected sendme cell from client. " "Closing circ (window %d).", circ->package_window); return -END_CIRC_REASON_TORPROTOCOL; } circ->package_window += CIRCWINDOW_INCREMENT; log_debug(LD_APP, "circ-level sendme at non-origin, packagewindow %d.", circ->package_window); circuit_resume_edge_reading(circ, layer_hint); } return 0; } if (!conn) { log_info(domain,"sendme cell dropped, unknown stream (streamid %d).", rh.stream_id); return 0; } conn->package_window += STREAMWINDOW_INCREMENT; log_debug(domain,"stream-level sendme, packagewindow now %d.", conn->package_window); if (circuit_queue_streams_are_blocked(circ)) { /* Still waiting for queue to flush; don't touch conn */ return 0; } connection_start_reading(TO_CONN(conn)); /* handle whatever might still be on the inbuf */ if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) { /* (We already sent an end cell if possible) */ connection_mark_for_close(TO_CONN(conn)); return 0; } return 0; case RELAY_COMMAND_RESOLVE: if (layer_hint) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "resolve request unsupported at AP; dropping."); return 0; } else if (conn) { log_fn(LOG_PROTOCOL_WARN, domain, "resolve request for known stream; dropping."); return 0; } else if (circ->purpose != CIRCUIT_PURPOSE_OR) { log_fn(LOG_PROTOCOL_WARN, domain, "resolve request on circ with purpose %d; dropping", circ->purpose); return 0; } connection_exit_begin_resolve(cell, TO_OR_CIRCUIT(circ)); return 0; case RELAY_COMMAND_RESOLVED: if (conn) { log_fn(LOG_PROTOCOL_WARN, domain, "'resolved' unsupported while open. Closing circ."); return -END_CIRC_REASON_TORPROTOCOL; } log_info(domain, "'resolved' received, no conn attached anymore. Ignoring."); return 0; case RELAY_COMMAND_ESTABLISH_INTRO: case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: case RELAY_COMMAND_INTRODUCE1: case RELAY_COMMAND_INTRODUCE2: case RELAY_COMMAND_INTRODUCE_ACK: case RELAY_COMMAND_RENDEZVOUS1: case RELAY_COMMAND_RENDEZVOUS2: case RELAY_COMMAND_INTRO_ESTABLISHED: case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED: rend_process_relay_cell(circ, layer_hint, rh.command, rh.length, cell->payload+RELAY_HEADER_SIZE); return 0; } log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received unknown relay command %d. Perhaps the other side is using " "a newer version of Tor? Dropping.", rh.command); return 0; /* for forward compatibility, don't kill the circuit */ } /** How many relay_data cells have we built, ever? */ uint64_t stats_n_data_cells_packaged = 0; /** How many bytes of data have we put in relay_data cells have we built, * ever? This would be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if * every relay cell we ever sent were completely full of data. */ uint64_t stats_n_data_bytes_packaged = 0; /** How many relay_data cells have we received, ever? */ uint64_t stats_n_data_cells_received = 0; /** How many bytes of data have we received relay_data cells, ever? This would * be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if every relay cell we * ever received were completely full of data. */ uint64_t stats_n_data_bytes_received = 0; /** If <b>conn</b> has an entire relay payload of bytes on its inbuf (or * <b>package_partial</b> is true), and the appropriate package windows aren't * empty, grab a cell and send it down the circuit. * * If *<b>max_cells</b> is given, package no more than max_cells. Decrement * *<b>max_cells</b> by the number of cells packaged. * * Return -1 (and send a RELAY_COMMAND_END cell if necessary) if conn should * be marked for close, else return 0. */ int connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial, int *max_cells) { size_t bytes_to_process, length; char payload[CELL_PAYLOAD_SIZE]; circuit_t *circ; const unsigned domain = conn->base_.type == CONN_TYPE_AP ? LD_APP : LD_EXIT; int sending_from_optimistic = 0; entry_connection_t *entry_conn = conn->base_.type == CONN_TYPE_AP ? EDGE_TO_ENTRY_CONN(conn) : NULL; const int sending_optimistically = entry_conn && conn->base_.type == CONN_TYPE_AP && conn->base_.state != AP_CONN_STATE_OPEN; crypt_path_t *cpath_layer = conn->cpath_layer; tor_assert(conn); if (conn->base_.marked_for_close) { log_warn(LD_BUG, "called on conn that's already marked for close at %s:%d.", conn->base_.marked_for_close_file, conn->base_.marked_for_close); return 0; } if (max_cells && *max_cells <= 0) return 0; repeat_connection_edge_package_raw_inbuf: circ = circuit_get_by_edge_conn(conn); if (!circ) { log_info(domain,"conn has no circuit! Closing."); conn->end_reason = END_STREAM_REASON_CANT_ATTACH; return -1; } if (circuit_consider_stop_edge_reading(circ, cpath_layer)) return 0; if (conn->package_window <= 0) { log_info(domain,"called with package_window %d. Skipping.", conn->package_window); connection_stop_reading(TO_CONN(conn)); return 0; } sending_from_optimistic = entry_conn && entry_conn->sending_optimistic_data != NULL; if (PREDICT_UNLIKELY(sending_from_optimistic)) { bytes_to_process = generic_buffer_len(entry_conn->sending_optimistic_data); if (PREDICT_UNLIKELY(!bytes_to_process)) { log_warn(LD_BUG, "sending_optimistic_data was non-NULL but empty"); bytes_to_process = connection_get_inbuf_len(TO_CONN(conn)); sending_from_optimistic = 0; } } else { bytes_to_process = connection_get_inbuf_len(TO_CONN(conn)); } if (!bytes_to_process) return 0; if (!package_partial && bytes_to_process < RELAY_PAYLOAD_SIZE) return 0; if (bytes_to_process > RELAY_PAYLOAD_SIZE) { length = RELAY_PAYLOAD_SIZE; } else { length = bytes_to_process; } stats_n_data_bytes_packaged += length; stats_n_data_cells_packaged += 1; if (PREDICT_UNLIKELY(sending_from_optimistic)) { /* XXXX We could be more efficient here by sometimes packing * previously-sent optimistic data in the same cell with data * from the inbuf. */ generic_buffer_get(entry_conn->sending_optimistic_data, payload, length); if (!generic_buffer_len(entry_conn->sending_optimistic_data)) { generic_buffer_free(entry_conn->sending_optimistic_data); entry_conn->sending_optimistic_data = NULL; } } else { connection_fetch_from_buf(payload, length, TO_CONN(conn)); } log_debug(domain,TOR_SOCKET_T_FORMAT": Packaging %d bytes (%d waiting).", conn->base_.s, (int)length, (int)connection_get_inbuf_len(TO_CONN(conn))); if (sending_optimistically && !sending_from_optimistic) { /* This is new optimistic data; remember it in case we need to detach and retry */ if (!entry_conn->pending_optimistic_data) entry_conn->pending_optimistic_data = generic_buffer_new(); generic_buffer_add(entry_conn->pending_optimistic_data, payload, length); } if (connection_edge_send_command(conn, RELAY_COMMAND_DATA, payload, length) < 0 ) /* circuit got marked for close, don't continue, don't need to mark conn */ return 0; if (!cpath_layer) { /* non-rendezvous exit */ tor_assert(circ->package_window > 0); circ->package_window--; } else { /* we're an AP, or an exit on a rendezvous circ */ tor_assert(cpath_layer->package_window > 0); cpath_layer->package_window--; } if (--conn->package_window <= 0) { /* is it 0 after decrement? */ connection_stop_reading(TO_CONN(conn)); log_debug(domain,"conn->package_window reached 0."); circuit_consider_stop_edge_reading(circ, cpath_layer); return 0; /* don't process the inbuf any more */ } log_debug(domain,"conn->package_window is now %d",conn->package_window); if (max_cells) { *max_cells -= 1; if (*max_cells <= 0) return 0; } /* handle more if there's more, or return 0 if there isn't */ goto repeat_connection_edge_package_raw_inbuf; } /** Called when we've just received a relay data cell, when * we've just finished flushing all bytes to stream <b>conn</b>, * or when we've flushed *some* bytes to the stream <b>conn</b>. * * If conn->outbuf is not too full, and our deliver window is * low, send back a suitable number of stream-level sendme cells. */ void connection_edge_consider_sending_sendme(edge_connection_t *conn) { circuit_t *circ; if (connection_outbuf_too_full(TO_CONN(conn))) return; circ = circuit_get_by_edge_conn(conn); if (!circ) { /* this can legitimately happen if the destroy has already * arrived and torn down the circuit */ log_info(LD_APP,"No circuit associated with conn. Skipping."); return; } while (conn->deliver_window <= STREAMWINDOW_START - STREAMWINDOW_INCREMENT) { log_debug(conn->base_.type == CONN_TYPE_AP ?LD_APP:LD_EXIT, "Outbuf %d, Queuing stream sendme.", (int)conn->base_.outbuf_flushlen); conn->deliver_window += STREAMWINDOW_INCREMENT; if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME, NULL, 0) < 0) { log_warn(LD_APP,"connection_edge_send_command failed. Skipping."); return; /* the circuit's closed, don't continue */ } } } /** The circuit <b>circ</b> has received a circuit-level sendme * (on hop <b>layer_hint</b>, if we're the OP). Go through all the * attached streams and let them resume reading and packaging, if * their stream windows allow it. */ static void circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint) { if (circuit_queue_streams_are_blocked(circ)) { log_debug(layer_hint?LD_APP:LD_EXIT,"Too big queue, no resuming"); return; } log_debug(layer_hint?LD_APP:LD_EXIT,"resuming"); if (CIRCUIT_IS_ORIGIN(circ)) circuit_resume_edge_reading_helper(TO_ORIGIN_CIRCUIT(circ)->p_streams, circ, layer_hint); else circuit_resume_edge_reading_helper(TO_OR_CIRCUIT(circ)->n_streams, circ, layer_hint); } void stream_choice_seed_weak_rng(void) { crypto_seed_weak_rng(&stream_choice_rng); } /** A helper function for circuit_resume_edge_reading() above. * The arguments are the same, except that <b>conn</b> is the head * of a linked list of edge streams that should each be considered. */ static int circuit_resume_edge_reading_helper(edge_connection_t *first_conn, circuit_t *circ, crypt_path_t *layer_hint) { edge_connection_t *conn; int n_packaging_streams, n_streams_left; int packaged_this_round; int cells_on_queue; int cells_per_conn; edge_connection_t *chosen_stream = NULL; int max_to_package; if (first_conn == NULL) { /* Don't bother to try to do the rest of this if there are no connections * to resume. */ return 0; } /* How many cells do we have space for? It will be the minimum of * the number needed to exhaust the package window, and the minimum * needed to fill the cell queue. */ max_to_package = circ->package_window; if (CIRCUIT_IS_ORIGIN(circ)) { cells_on_queue = circ->n_chan_cells.n; } else { or_circuit_t *or_circ = TO_OR_CIRCUIT(circ); cells_on_queue = or_circ->p_chan_cells.n; } if (CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue < max_to_package) max_to_package = CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue; /* Once we used to start listening on the streams in the order they * appeared in the linked list. That leads to starvation on the * streams that appeared later on the list, since the first streams * would always get to read first. Instead, we just pick a random * stream on the list, and enable reading for streams starting at that * point (and wrapping around as if the list were circular). It would * probably be better to actually remember which streams we've * serviced in the past, but this is simple and effective. */ /* Select a stream uniformly at random from the linked list. We * don't need cryptographic randomness here. */ { int num_streams = 0; for (conn = first_conn; conn; conn = conn->next_stream) { num_streams++; if (tor_weak_random_one_in_n(&stream_choice_rng, num_streams)) { chosen_stream = conn; } /* Invariant: chosen_stream has been chosen uniformly at random from * among the first num_streams streams on first_conn. * * (Note that we iterate over every stream on the circuit, so that after * we've considered the first stream, we've chosen it with P=1; and * after we consider the second stream, we've switched to it with P=1/2 * and stayed with the first stream with P=1/2; and after we've * considered the third stream, we've switched to it with P=1/3 and * remained with one of the first two streams with P=(2/3), giving each * one P=(1/2)(2/3) )=(1/3).) */ } } /* Count how many non-marked streams there are that have anything on * their inbuf, and enable reading on all of the connections. */ n_packaging_streams = 0; /* Activate reading starting from the chosen stream */ for (conn=chosen_stream; conn; conn = conn->next_stream) { /* Start reading for the streams starting from here */ if (conn->base_.marked_for_close || conn->package_window <= 0) continue; if (!layer_hint || conn->cpath_layer == layer_hint) { connection_start_reading(TO_CONN(conn)); if (connection_get_inbuf_len(TO_CONN(conn)) > 0) ++n_packaging_streams; } } /* Go back and do the ones we skipped, circular-style */ for (conn = first_conn; conn != chosen_stream; conn = conn->next_stream) { if (conn->base_.marked_for_close || conn->package_window <= 0) continue; if (!layer_hint || conn->cpath_layer == layer_hint) { connection_start_reading(TO_CONN(conn)); if (connection_get_inbuf_len(TO_CONN(conn)) > 0) ++n_packaging_streams; } } if (n_packaging_streams == 0) /* avoid divide-by-zero */ return 0; again: cells_per_conn = CEIL_DIV(max_to_package, n_packaging_streams); packaged_this_round = 0; n_streams_left = 0; /* Iterate over all connections. Package up to cells_per_conn cells on * each. Update packaged_this_round with the total number of cells * packaged, and n_streams_left with the number that still have data to * package. */ for (conn=first_conn; conn; conn=conn->next_stream) { if (conn->base_.marked_for_close || conn->package_window <= 0) continue; if (!layer_hint || conn->cpath_layer == layer_hint) { int n = cells_per_conn, r; /* handle whatever might still be on the inbuf */ r = connection_edge_package_raw_inbuf(conn, 1, &n); /* Note how many we packaged */ packaged_this_round += (cells_per_conn-n); if (r<0) { /* Problem while packaging. (We already sent an end cell if * possible) */ connection_mark_for_close(TO_CONN(conn)); continue; } /* If there's still data to read, we'll be coming back to this stream. */ if (connection_get_inbuf_len(TO_CONN(conn))) ++n_streams_left; /* If the circuit won't accept any more data, return without looking * at any more of the streams. Any connections that should be stopped * have already been stopped by connection_edge_package_raw_inbuf. */ if (circuit_consider_stop_edge_reading(circ, layer_hint)) return -1; /* XXXX should we also stop immediately if we fill up the cell queue? * Probably. */ } } /* If we made progress, and we are willing to package more, and there are * any streams left that want to package stuff... try again! */ if (packaged_this_round && packaged_this_round < max_to_package && n_streams_left) { max_to_package -= packaged_this_round; n_packaging_streams = n_streams_left; goto again; } return 0; } /** Check if the package window for <b>circ</b> is empty (at * hop <b>layer_hint</b> if it's defined). * * If yes, tell edge streams to stop reading and return 1. * Else return 0. */ static int circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint) { edge_connection_t *conn = NULL; unsigned domain = layer_hint ? LD_APP : LD_EXIT; if (!layer_hint) { or_circuit_t *or_circ = TO_OR_CIRCUIT(circ); log_debug(domain,"considering circ->package_window %d", circ->package_window); if (circ->package_window <= 0) { log_debug(domain,"yes, not-at-origin. stopped."); for (conn = or_circ->n_streams; conn; conn=conn->next_stream) connection_stop_reading(TO_CONN(conn)); return 1; } return 0; } /* else, layer hint is defined, use it */ log_debug(domain,"considering layer_hint->package_window %d", layer_hint->package_window); if (layer_hint->package_window <= 0) { log_debug(domain,"yes, at-origin. stopped."); for (conn = TO_ORIGIN_CIRCUIT(circ)->p_streams; conn; conn=conn->next_stream) { if (conn->cpath_layer == layer_hint) connection_stop_reading(TO_CONN(conn)); } return 1; } return 0; } /** Check if the deliver_window for circuit <b>circ</b> (at hop * <b>layer_hint</b> if it's defined) is low enough that we should * send a circuit-level sendme back down the circuit. If so, send * enough sendmes that the window would be overfull if we sent any * more. */ static void circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint) { // log_fn(LOG_INFO,"Considering: layer_hint is %s", // layer_hint ? "defined" : "null"); while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <= CIRCWINDOW_START - CIRCWINDOW_INCREMENT) { log_debug(LD_CIRC,"Queuing circuit sendme."); if (layer_hint) layer_hint->deliver_window += CIRCWINDOW_INCREMENT; else circ->deliver_window += CIRCWINDOW_INCREMENT; if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME, NULL, 0, layer_hint) < 0) { log_warn(LD_CIRC, "relay_send_command_from_edge failed. Circuit's closed."); return; /* the circuit's closed, don't continue */ } } } #ifdef ACTIVE_CIRCUITS_PARANOIA #define assert_cmux_ok_paranoid(chan) \ assert_circuit_mux_okay(chan) #else #define assert_cmux_ok_paranoid(chan) #endif /** The total number of cells we have allocated from the memory pool. */ static size_t total_cells_allocated = 0; /** A memory pool to allocate packed_cell_t objects. */ static mp_pool_t *cell_pool = NULL; /** Allocate structures to hold cells. */ void init_cell_pool(void) { tor_assert(!cell_pool); cell_pool = mp_pool_new(sizeof(packed_cell_t), 128*1024); } /** Free all storage used to hold cells (and insertion times if we measure * cell statistics). */ void free_cell_pool(void) { /* Maybe we haven't called init_cell_pool yet; need to check for it. */ if (cell_pool) { mp_pool_destroy(cell_pool); cell_pool = NULL; } } /** Free excess storage in cell pool. */ void clean_cell_pool(void) { tor_assert(cell_pool); mp_pool_clean(cell_pool, 0, 1); } /** Release storage held by <b>cell</b>. */ static INLINE void packed_cell_free_unchecked(packed_cell_t *cell) { --total_cells_allocated; mp_pool_release(cell); } /** Allocate and return a new packed_cell_t. */ static INLINE packed_cell_t * packed_cell_new(void) { ++total_cells_allocated; return mp_pool_get(cell_pool); } /** Return a packed cell used outside by channel_t lower layer */ void packed_cell_free(packed_cell_t *cell) { packed_cell_free_unchecked(cell); } /** Log current statistics for cell pool allocation at log level * <b>severity</b>. */ void dump_cell_pool_usage(int severity) { circuit_t *c; int n_circs = 0; int n_cells = 0; for (c = circuit_get_global_list_(); c; c = c->next) { n_cells += c->n_chan_cells.n; if (!CIRCUIT_IS_ORIGIN(c)) n_cells += TO_OR_CIRCUIT(c)->p_chan_cells.n; ++n_circs; } tor_log(severity, LD_MM, "%d cells allocated on %d circuits. %d cells leaked.", n_cells, n_circs, (int)total_cells_allocated - n_cells); mp_pool_log_status(cell_pool, severity); } /** Allocate a new copy of packed <b>cell</b>. */ static INLINE packed_cell_t * packed_cell_copy(const cell_t *cell, int wide_circ_ids) { packed_cell_t *c = packed_cell_new(); cell_pack(c, cell, wide_circ_ids); c->next = NULL; return c; } /** Append <b>cell</b> to the end of <b>queue</b>. */ void cell_queue_append(cell_queue_t *queue, packed_cell_t *cell) { if (queue->tail) { tor_assert(!queue->tail->next); queue->tail->next = cell; } else { queue->head = cell; } queue->tail = cell; cell->next = NULL; ++queue->n; } /** Append a newly allocated copy of <b>cell</b> to the end of <b>queue</b> */ void cell_queue_append_packed_copy(cell_queue_t *queue, const cell_t *cell, int wide_circ_ids) { struct timeval now; packed_cell_t *copy = packed_cell_copy(cell, wide_circ_ids); tor_gettimeofday_cached(&now); copy->inserted_time = (uint32_t)tv_to_msec(&now); cell_queue_append(queue, copy); } /** Remove and free every cell in <b>queue</b>. */ void cell_queue_clear(cell_queue_t *queue) { packed_cell_t *cell, *next; cell = queue->head; while (cell) { next = cell->next; packed_cell_free_unchecked(cell); cell = next; } queue->head = queue->tail = NULL; queue->n = 0; } /** Extract and return the cell at the head of <b>queue</b>; return NULL if * <b>queue</b> is empty. */ static INLINE packed_cell_t * cell_queue_pop(cell_queue_t *queue) { packed_cell_t *cell = queue->head; if (!cell) return NULL; queue->head = cell->next; if (cell == queue->tail) { tor_assert(!queue->head); queue->tail = NULL; } --queue->n; return cell; } /** Return the total number of bytes used for each packed_cell in a queue. * Approximate. */ size_t packed_cell_mem_cost(void) { return sizeof(packed_cell_t) + MP_POOL_ITEM_OVERHEAD; } /** Check whether we've got too much space used for cells. If so, * call the OOM handler and return 1. Otherwise, return 0. */ static int cell_queues_check_size(void) { size_t alloc = total_cells_allocated * packed_cell_mem_cost(); if (alloc >= get_options()->MaxMemInCellQueues) { circuits_handle_oom(alloc); return 1; } return 0; } /** * Update the number of cells available on the circuit's n_chan or p_chan's * circuit mux. */ void update_circuit_on_cmux_(circuit_t *circ, cell_direction_t direction, const char *file, int lineno) { channel_t *chan = NULL; or_circuit_t *or_circ = NULL; circuitmux_t *cmux = NULL; tor_assert(circ); /* Okay, get the channel */ if (direction == CELL_DIRECTION_OUT) { chan = circ->n_chan; } else { or_circ = TO_OR_CIRCUIT(circ); chan = or_circ->p_chan; } tor_assert(chan); tor_assert(chan->cmux); /* Now get the cmux */ cmux = chan->cmux; /* Cmux sanity check */ if (! circuitmux_is_circuit_attached(cmux, circ)) { log_warn(LD_BUG, "called on non-attachd circuit from %s:%d", file, lineno); return; } tor_assert(circuitmux_attached_circuit_direction(cmux, circ) == direction); assert_cmux_ok_paranoid(chan); /* Update the number of cells we have for the circuit mux */ if (direction == CELL_DIRECTION_OUT) { circuitmux_set_num_cells(cmux, circ, circ->n_chan_cells.n); } else { circuitmux_set_num_cells(cmux, circ, or_circ->p_chan_cells.n); } assert_cmux_ok_paranoid(chan); } /** Remove all circuits from the cmux on <b>chan</b>. */ void channel_unlink_all_circuits(channel_t *chan) { tor_assert(chan); tor_assert(chan->cmux); circuitmux_detach_all_circuits(chan->cmux); chan->num_n_circuits = 0; chan->num_p_circuits = 0; } /** Block (if <b>block</b> is true) or unblock (if <b>block</b> is false) * every edge connection that is using <b>circ</b> to write to <b>chan</b>, * and start or stop reading as appropriate. * * If <b>stream_id</b> is nonzero, block only the edge connection whose * stream_id matches it. * * Returns the number of streams whose status we changed. */ static int set_streams_blocked_on_circ(circuit_t *circ, channel_t *chan, int block, streamid_t stream_id) { edge_connection_t *edge = NULL; int n = 0; if (circ->n_chan == chan) { circ->streams_blocked_on_n_chan = block; if (CIRCUIT_IS_ORIGIN(circ)) edge = TO_ORIGIN_CIRCUIT(circ)->p_streams; } else { circ->streams_blocked_on_p_chan = block; tor_assert(!CIRCUIT_IS_ORIGIN(circ)); edge = TO_OR_CIRCUIT(circ)->n_streams; } for (; edge; edge = edge->next_stream) { connection_t *conn = TO_CONN(edge); if (stream_id && edge->stream_id != stream_id) continue; if (edge->edge_blocked_on_circ != block) { ++n; edge->edge_blocked_on_circ = block; } if (!conn->read_event && !HAS_BUFFEREVENT(conn)) { /* This connection is a placeholder for something; probably a DNS * request. It can't actually stop or start reading.*/ continue; } if (block) { if (connection_is_reading(conn)) connection_stop_reading(conn); } else { /* Is this right? */ if (!connection_is_reading(conn)) connection_start_reading(conn); } } return n; } /** Pull as many cells as possible (but no more than <b>max</b>) from the * queue of the first active circuit on <b>chan</b>, and write them to * <b>chan</b>-&gt;outbuf. Return the number of cells written. Advance * the active circuit pointer to the next active circuit in the ring. */ int channel_flush_from_first_active_circuit(channel_t *chan, int max) { circuitmux_t *cmux = NULL; int n_flushed = 0; cell_queue_t *queue; circuit_t *circ; or_circuit_t *or_circ; int streams_blocked; packed_cell_t *cell; /* Get the cmux */ tor_assert(chan); tor_assert(chan->cmux); cmux = chan->cmux; /* Main loop: pick a circuit, send a cell, update the cmux */ while (n_flushed < max) { circ = circuitmux_get_first_active_circuit(cmux); /* If it returns NULL, no cells left to send */ if (!circ) break; assert_cmux_ok_paranoid(chan); if (circ->n_chan == chan) { queue = &circ->n_chan_cells; streams_blocked = circ->streams_blocked_on_n_chan; } else { or_circ = TO_OR_CIRCUIT(circ); tor_assert(or_circ->p_chan == chan); queue = &TO_OR_CIRCUIT(circ)->p_chan_cells; streams_blocked = circ->streams_blocked_on_p_chan; } /* Circuitmux told us this was active, so it should have cells */ tor_assert(queue->n > 0); /* * Get just one cell here; once we've sent it, that can change the circuit * selection, so we have to loop around for another even if this circuit * has more than one. */ cell = cell_queue_pop(queue); /* Calculate the exact time that this cell has spent in the queue. */ if (get_options()->CellStatistics && !CIRCUIT_IS_ORIGIN(circ)) { uint32_t msec_waiting; struct timeval tvnow; or_circ = TO_OR_CIRCUIT(circ); tor_gettimeofday_cached(&tvnow); msec_waiting = ((uint32_t)tv_to_msec(&tvnow)) - cell->inserted_time; or_circ->total_cell_waiting_time += msec_waiting; or_circ->processed_cells++; } /* If we just flushed our queue and this circuit is used for a * tunneled directory request, possibly advance its state. */ if (queue->n == 0 && chan->dirreq_id) geoip_change_dirreq_state(chan->dirreq_id, DIRREQ_TUNNELED, DIRREQ_CIRC_QUEUE_FLUSHED); /* Now send the cell */ channel_write_packed_cell(chan, cell); cell = NULL; /* * Don't packed_cell_free_unchecked(cell) here because the channel will * do so when it gets out of the channel queue (probably already did, in * which case that was an immediate double-free bug). */ /* Update the counter */ ++n_flushed; /* * Now update the cmux; tell it we've just sent a cell, and how many * we have left. */ circuitmux_notify_xmit_cells(cmux, circ, 1); circuitmux_set_num_cells(cmux, circ, queue->n); if (queue->n == 0) log_debug(LD_GENERAL, "Made a circuit inactive."); /* Is the cell queue low enough to unblock all the streams that are waiting * to write to this circuit? */ if (streams_blocked && queue->n <= CELL_QUEUE_LOWWATER_SIZE) set_streams_blocked_on_circ(circ, chan, 0, 0); /* unblock streams */ /* If n_flushed < max still, loop around and pick another circuit */ } /* Okay, we're done sending now */ assert_cmux_ok_paranoid(chan); return n_flushed; } /** Add <b>cell</b> to the queue of <b>circ</b> writing to <b>chan</b> * transmitting in <b>direction</b>. */ void append_cell_to_circuit_queue(circuit_t *circ, channel_t *chan, cell_t *cell, cell_direction_t direction, streamid_t fromstream) { or_circuit_t *orcirc = NULL; cell_queue_t *queue; int streams_blocked; if (circ->marked_for_close) return; if (direction == CELL_DIRECTION_OUT) { queue = &circ->n_chan_cells; streams_blocked = circ->streams_blocked_on_n_chan; } else { orcirc = TO_OR_CIRCUIT(circ); queue = &orcirc->p_chan_cells; streams_blocked = circ->streams_blocked_on_p_chan; } /* * Disabling this for now because of a possible guard discovery attack */ #if 0 /* Are we a middle circuit about to exceed ORCIRC_MAX_MIDDLE_CELLS? */ if ((circ->n_chan != NULL) && CIRCUIT_IS_ORCIRC(circ)) { orcirc = TO_OR_CIRCUIT(circ); if (orcirc->p_chan) { if (queue->n + 1 >= ORCIRC_MAX_MIDDLE_CELLS) { /* Queueing this cell would put queue over the cap */ log_warn(LD_CIRC, "Got a cell exceeding the cap of %u in the %s direction " "on middle circ ID %u on chan ID " U64_FORMAT "; killing the circuit.", ORCIRC_MAX_MIDDLE_CELLS, (direction == CELL_DIRECTION_OUT) ? "n" : "p", (direction == CELL_DIRECTION_OUT) ? circ->n_circ_id : orcirc->p_circ_id, U64_PRINTF_ARG( (direction == CELL_DIRECTION_OUT) ? circ->n_chan->global_identifier : orcirc->p_chan->global_identifier)); circuit_mark_for_close(circ, END_CIRC_REASON_RESOURCELIMIT); return; } } } #endif cell_queue_append_packed_copy(queue, cell, chan->wide_circ_ids); if (PREDICT_UNLIKELY(cell_queues_check_size())) { /* We ran the OOM handler */ if (circ->marked_for_close) return; } /* If we have too many cells on the circuit, we should stop reading from * the edge streams for a while. */ if (!streams_blocked && queue->n >= CELL_QUEUE_HIGHWATER_SIZE) set_streams_blocked_on_circ(circ, chan, 1, 0); /* block streams */ if (streams_blocked && fromstream) { /* This edge connection is apparently not blocked; block it. */ set_streams_blocked_on_circ(circ, chan, 1, fromstream); } update_circuit_on_cmux(circ, direction); if (queue->n == 1) { /* This was the first cell added to the queue. We just made this * circuit active. */ log_debug(LD_GENERAL, "Made a circuit active."); } if (!channel_has_queued_writes(chan)) { /* There is no data at all waiting to be sent on the outbuf. Add a * cell, so that we can notice when it gets flushed, flushed_some can * get called, and we can start putting more data onto the buffer then. */ log_debug(LD_GENERAL, "Primed a buffer."); channel_flush_from_first_active_circuit(chan, 1); } } /** Append an encoded value of <b>addr</b> to <b>payload_out</b>, which must * have at least 18 bytes of free space. The encoding is, as specified in * tor-spec.txt: * RESOLVED_TYPE_IPV4 or RESOLVED_TYPE_IPV6 [1 byte] * LENGTH [1 byte] * ADDRESS [length bytes] * Return the number of bytes added, or -1 on error */ int append_address_to_payload(uint8_t *payload_out, const tor_addr_t *addr) { uint32_t a; switch (tor_addr_family(addr)) { case AF_INET: payload_out[0] = RESOLVED_TYPE_IPV4; payload_out[1] = 4; a = tor_addr_to_ipv4n(addr); memcpy(payload_out+2, &a, 4); return 6; case AF_INET6: payload_out[0] = RESOLVED_TYPE_IPV6; payload_out[1] = 16; memcpy(payload_out+2, tor_addr_to_in6_addr8(addr), 16); return 18; case AF_UNSPEC: default: return -1; } } /** Given <b>payload_len</b> bytes at <b>payload</b>, starting with an address * encoded as by append_address_to_payload(), try to decode the address into * *<b>addr_out</b>. Return the next byte in the payload after the address on * success, or NULL on failure. */ const uint8_t * decode_address_from_payload(tor_addr_t *addr_out, const uint8_t *payload, int payload_len) { if (payload_len < 2) return NULL; if (payload_len < 2+payload[1]) return NULL; switch (payload[0]) { case RESOLVED_TYPE_IPV4: if (payload[1] != 4) return NULL; tor_addr_from_ipv4n(addr_out, get_uint32(payload+2)); break; case RESOLVED_TYPE_IPV6: if (payload[1] != 16) return NULL; tor_addr_from_ipv6_bytes(addr_out, (char*)(payload+2)); break; default: tor_addr_make_unspec(addr_out); break; } return payload + 2 + payload[1]; } /** Remove all the cells queued on <b>circ</b> for <b>chan</b>. */ void circuit_clear_cell_queue(circuit_t *circ, channel_t *chan) { cell_queue_t *queue; cell_direction_t direction; if (circ->n_chan == chan) { queue = &circ->n_chan_cells; direction = CELL_DIRECTION_OUT; } else { or_circuit_t *orcirc = TO_OR_CIRCUIT(circ); tor_assert(orcirc->p_chan == chan); queue = &orcirc->p_chan_cells; direction = CELL_DIRECTION_IN; } /* Clear the queue */ cell_queue_clear(queue); /* Update the cell counter in the cmux */ if (chan->cmux && circuitmux_is_circuit_attached(chan->cmux, circ)) update_circuit_on_cmux(circ, direction); } /** Fail with an assert if the circuit mux on chan is corrupt */ void assert_circuit_mux_okay(channel_t *chan) { tor_assert(chan); tor_assert(chan->cmux); circuitmux_assert_okay(chan->cmux); } /** Return 1 if we shouldn't restart reading on this circuit, even if * we get a SENDME. Else return 0. */ static int circuit_queue_streams_are_blocked(circuit_t *circ) { if (CIRCUIT_IS_ORIGIN(circ)) { return circ->streams_blocked_on_n_chan; } else { return circ->streams_blocked_on_p_chan; } }
./CrossVul/dataset_final_sorted/CWE-617/c/bad_2489_1
crossvul-cpp_data_bad_3156_0
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * These functions interface with the sockets layer to implement the * SCTP Extensions for the Sockets API. * * Note that the descriptions from the specification are USER level * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Narasimha Budihal <narsi@refcode.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <samudrala@us.ibm.com> * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Anup Pemmaiah <pemmaiah@cc.usu.edu> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/hash.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/compat.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/busy_poll.h> #include <linux/socket.h> /* for sa_family_t */ #include <linux/export.h> #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); static void sctp_wait_for_close(struct sock *sk, long timeo); static void sctp_destruct_sock(struct sock *sk); static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len); static int sctp_bindx_add(struct sock *, struct sockaddr *, int); static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk); static int sctp_do_bind(struct sock *, union sctp_addr *, int); static int sctp_autobind(struct sock *sk); static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static int sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { sctp_memory_pressure = 1; } /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { int amt; if (asoc->ep->sndbuf_policy) amt = asoc->sndbuf_used; else amt = sk_wmem_alloc_get(asoc->base.sk); if (amt >= asoc->base.sk->sk_sndbuf) { if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) amt = 0; else { amt = sk_stream_wspace(asoc->base.sk); if (amt < 0) amt = 0; } } else { amt = asoc->base.sk->sk_sndbuf - amt; } return amt; } /* Increment the used sndbuf space count of the corresponding association by * the size of the outgoing data chunk. * Also, set the skb destructor for sndbuf accounting later. * * Since it is always 1-1 between chunk and skb, and also a new skb is always * allocated for chunk bundling in sctp_packet_transmit(), we can use the * destructor in the data chunk skb for the purpose of the sndbuf space * tracking. */ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; /* The sndbuf space is tracked per association. */ sctp_association_hold(asoc); skb_set_owner_w(chunk->skb, sk); chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ skb_shinfo(chunk->skb)->destructor_arg = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); sk->sk_wmem_queued += chunk->skb->truesize; sk_mem_charge(sk, chunk->skb->truesize); } /* Verify that this is a valid address. */ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, int len) { struct sctp_af *af; /* Verify basic sockaddr. */ af = sctp_sockaddr_af(sctp_sk(sk), addr, len); if (!af) return -EINVAL; /* Is this a valid SCTP address? */ if (!af->addr_valid(addr, sctp_sk(sk), NULL)) return -EINVAL; if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) return -EINVAL; return 0; } /* Look up the association by its id. If this is not a UDP-style * socket, the ID field is always ignored. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) { struct sctp_association *asoc = NULL; /* If this is not a UDP-style socket, assoc id should be ignored. */ if (!sctp_style(sk, UDP)) { /* Return NULL if the socket state is not ESTABLISHED. It * could be a TCP-style listening socket or a socket which * hasn't yet called connect() to establish an association. */ if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING)) return NULL; /* Get the first and the only association from the list. */ if (!list_empty(&sctp_sk(sk)->ep->asocs)) asoc = list_entry(sctp_sk(sk)->ep->asocs.next, struct sctp_association, asocs); return asoc; } /* Otherwise this is a UDP-style socket. */ if (!id || (id == (sctp_assoc_t)-1)) return NULL; spin_lock_bh(&sctp_assocs_id_lock); asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); spin_unlock_bh(&sctp_assocs_id_lock); if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) return NULL; return asoc; } /* Look up the transport from an address and an assoc id. If both address and * id are specified, the associations matching the address and the id should be * the same. */ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, struct sockaddr_storage *addr, sctp_assoc_t id) { struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; struct sctp_af *af = sctp_get_af_specific(addr->ss_family); union sctp_addr *laddr = (union sctp_addr *)addr; struct sctp_transport *transport; if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, laddr, &transport); if (!addr_asoc) return NULL; id_asoc = sctp_id2assoc(sk, id); if (id_asoc && (id_asoc != addr_asoc)) return NULL; sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)addr); return transport; } /* API 3.1.2 bind() - UDP Style Syntax * The syntax of bind() is, * * ret = bind(int sd, struct sockaddr *addr, int addrlen); * * sd - the socket descriptor returned by socket(). * addr - the address structure (struct sockaddr_in or struct * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0; lock_sock(sk); pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Disallow binding twice. */ if (!sctp_sk(sk)->ep->base.bind_addr.port) retval = sctp_do_bind(sk, (union sctp_addr *)addr, addr_len); else retval = -EINVAL; release_sock(sk); return retval; } static long sctp_get_port_local(struct sock *, union sctp_addr *); /* Verify this is a valid sockaddr. */ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, union sctp_addr *addr, int len) { struct sctp_af *af; /* Check minimum size. */ if (len < sizeof (struct sockaddr)) return NULL; /* V4 mapped address are really of AF_INET family */ if (addr->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { if (!opt->pf->af_supported(AF_INET, opt)) return NULL; } else { /* Does this PF support this AF? */ if (!opt->pf->af_supported(addr->sa.sa_family, opt)) return NULL; } /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); if (len < af->sockaddr_len) return NULL; return af; } /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct sctp_bind_addr *bp = &ep->base.bind_addr; struct sctp_af *af; unsigned short snum; int ret = 0; /* Common sockaddr verification. */ af = sctp_sockaddr_af(sp, addr, len); if (!af) { pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", __func__, sk, addr, len); return -EINVAL; } snum = ntohs(addr->v4.sin_port); pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", __func__, sk, &addr->sa, bp->port, snum, len); /* PF specific bind() address verification. */ if (!sp->pf->bind_verify(sp, addr)) return -EADDRNOTAVAIL; /* We must either be unbound, or bind to the same port. * It's OK to allow 0 ports if we are already bound. * We'll just inhert an already bound port in this case */ if (bp->port) { if (!snum) snum = bp->port; else if (snum != bp->port) { pr_debug("%s: new port %d doesn't match existing port " "%d\n", __func__, snum, bp->port); return -EINVAL; } } if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; /* See if the address matches any of the addresses we may have * already bound before checking against other endpoints. */ if (sctp_bind_addr_match(bp, addr, sp)) return -EINVAL; /* Make sure we are allowed to bind here. * The function sctp_get_port_local() does duplicate address * detection. */ addr->v4.sin_port = htons(snum); if ((ret = sctp_get_port_local(sk, addr))) { return -EADDRINUSE; } /* Refresh ephemeral port. */ if (!bp->port) bp->port = inet_sk(sk)->inet_num; /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. */ ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, SCTP_ADDR_SRC, GFP_ATOMIC); /* Copy back into socket for getsockname() use. */ if (!ret) { inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); sp->pf->to_sk_saddr(addr, sk); } return ret; } /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks * * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged * at any one time. If a sender, after sending an ASCONF chunk, decides * it needs to transfer another ASCONF Chunk, it MUST wait until the * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a * subsequent ASCONF. Note this restriction binds each side, so at any * time two ASCONF may be in-transit on any given association (one sent * from each endpoint). */ static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk) { struct net *net = sock_net(asoc->base.sk); int retval = 0; /* If there is an outstanding ASCONF chunk, queue it for later * transmission. */ if (asoc->addip_last_asconf) { list_add_tail(&chunk->list, &asoc->addip_chunk_list); goto out; } /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(chunk); retval = sctp_primitive_ASCONF(net, asoc, chunk); if (retval) sctp_chunk_free(chunk); else asoc->addip_last_asconf = chunk; out: return retval; } /* Add a list of addresses as bind addresses to local endpoint or * association. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_do_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were added will be removed. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) { int cnt; int retval = 0; void *addr_buf; struct sockaddr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* The list may contain either IPv4 or IPv6 address; * determine the address length for walking thru the list. */ sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); if (!af) { retval = -EINVAL; goto err_bindx_add; } retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, af->sockaddr_len); addr_buf += af->sockaddr_len; err_bindx_add: if (retval < 0) { /* Failed. Cleanup the ones that have been added */ if (cnt > 0) sctp_bindx_rem(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Add IP address parameters to all the peers of the * associations that are part of the endpoint indicating that a list of local * addresses are added to the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_add_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; struct sctp_sockaddr_entry *laddr; union sctp_addr *addr; union sctp_addr saveaddr; void *addr_buf; struct sctp_af *af; struct list_head *p; int i; int retval = 0; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * in the bind address list of the association. If so, * do not send the asconf chunk to its peer, but continue with * other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (sctp_assoc_lookup_laddr(asoc, addr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Use the first valid address in bind addr list of * association as Address Parameter of ASCONF CHUNK. */ bp = &asoc->base.bind_addr; p = bp->address_list.next; laddr = list_entry(p, struct sctp_sockaddr_entry, list); chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, addrcnt, SCTP_PARAM_ADD_IP); if (!chunk) { retval = -ENOMEM; goto out; } /* Add the new addresses to the bind address list with * use_as_src set to 0. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); memcpy(&saveaddr, addr, af->sockaddr_len); retval = sctp_add_bind_addr(bp, &saveaddr, sizeof(saveaddr), SCTP_ADDR_NEW, GFP_ATOMIC); addr_buf += af->sockaddr_len; } if (asoc->src_out_of_asoc_ok) { struct sctp_transport *trans; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { /* Clear the source and route cache */ dst_release(trans->dst); trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); trans->ssthresh = asoc->peer.i.a_rwnd; trans->rto = asoc->rto_initial; sctp_max_rto(asoc, trans); trans->rtt = trans->srtt = trans->rttvar = 0; sctp_transport_route(trans, NULL, sctp_sk(asoc->base.sk)); } } retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* Remove a list of addresses from bind addresses list. Do not remove the * last address. * * Basically run through each address specified in the addrs/addrcnt * array/length pair, determine if it is IPv6 or IPv4 and call * sctp_del_bind() on it. * * If any of them fails, then the operation will be reversed and the * ones that were removed will be added back. * * At least one address has to be left; if only one address is * available, the operation will return -EBUSY. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; int cnt; struct sctp_bind_addr *bp = &ep->base.bind_addr; int retval = 0; void *addr_buf; union sctp_addr *sa_addr; struct sctp_af *af; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); addr_buf = addrs; for (cnt = 0; cnt < addrcnt; cnt++) { /* If the bind address list is empty or if there is only one * bind address, there is nothing more to be removed (we need * at least one address here). */ if (list_empty(&bp->address_list) || (sctp_list_single_entry(&bp->address_list))) { retval = -EBUSY; goto err_bindx_rem; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); if (!af) { retval = -EINVAL; goto err_bindx_rem; } if (!af->addr_valid(sa_addr, sp, NULL)) { retval = -EADDRNOTAVAIL; goto err_bindx_rem; } if (sa_addr->v4.sin_port && sa_addr->v4.sin_port != htons(bp->port)) { retval = -EINVAL; goto err_bindx_rem; } if (!sa_addr->v4.sin_port) sa_addr->v4.sin_port = htons(bp->port); /* FIXME - There is probably a need to check if sk->sk_saddr and * sk->sk_rcv_addr are currently set to one of the addresses to * be removed. This is something which needs to be looked into * when we are fixing the outstanding issues with multi-homing * socket routing and failover schemes. Refer to comments in * sctp_do_bind(). -daisy */ retval = sctp_del_bind_addr(bp, sa_addr); addr_buf += af->sockaddr_len; err_bindx_rem: if (retval < 0) { /* Failed. Add the ones that has been removed back */ if (cnt > 0) sctp_bindx_add(sk, addrs, cnt); return retval; } } return retval; } /* Send an ASCONF chunk with Delete IP address parameters to all the peers of * the associations that are part of the endpoint indicating that a list of * local addresses are removed from the endpoint. * * If any of the addresses is already in the bind address list of the * association, we do not send the chunk for that association. But it will not * affect other associations. * * Only sctp_setsockopt_bindx() is supposed to call this function. */ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr *addrs, int addrcnt) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc; struct sctp_transport *transport; struct sctp_bind_addr *bp; struct sctp_chunk *chunk; union sctp_addr *laddr; void *addr_buf; struct sctp_af *af; struct sctp_sockaddr_entry *saddr; int i; int retval = 0; int stored = 0; chunk = NULL; if (!net->sctp.addip_enable) return retval; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, addrs, addrcnt); list_for_each_entry(asoc, &ep->asocs, asocs) { if (!asoc->peer.asconf_capable) continue; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) continue; if (!sctp_state(asoc, ESTABLISHED)) continue; /* Check if any address in the packed array of addresses is * not present in the bind address list of the association. * If so, do not send the asconf chunk to its peer, but * continue with other associations. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); if (!af) { retval = -EINVAL; goto out; } if (!sctp_assoc_lookup_laddr(asoc, laddr)) break; addr_buf += af->sockaddr_len; } if (i < addrcnt) continue; /* Find one address in the association's bind address list * that is not in the packed array of addresses. This is to * make sure that we do not delete all the addresses in the * association. */ bp = &asoc->base.bind_addr; laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, addrcnt, sp); if ((laddr == NULL) && (addrcnt == 1)) { if (asoc->asconf_addr_del_pending) continue; asoc->asconf_addr_del_pending = kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); if (asoc->asconf_addr_del_pending == NULL) { retval = -ENOMEM; goto out; } asoc->asconf_addr_del_pending->sa.sa_family = addrs->sa_family; asoc->asconf_addr_del_pending->v4.sin_port = htons(bp->port); if (addrs->sa_family == AF_INET) { struct sockaddr_in *sin; sin = (struct sockaddr_in *)addrs; asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; } else if (addrs->sa_family == AF_INET6) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", __func__, asoc, &asoc->asconf_addr_del_pending->sa, asoc->asconf_addr_del_pending); asoc->src_out_of_asoc_ok = 1; stored = 1; goto skip_mkasconf; } if (laddr == NULL) return -EINVAL; /* We do not need RCU protection throughout this loop * because this is done under a socket lock from the * setsockopt call. */ chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, SCTP_PARAM_DEL_IP); if (!chunk) { retval = -ENOMEM; goto out; } skip_mkasconf: /* Reset use_as_src flag for the addresses in the bind address * list that are to be deleted. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { laddr = addr_buf; af = sctp_get_af_specific(laddr->v4.sin_family); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, laddr)) saddr->state = SCTP_ADDR_DEL; } addr_buf += af->sockaddr_len; } /* Update the route and saddr entries for all the transports * as some of the addresses in the bind address list are * about to be deleted and cannot be used as source addresses. */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); sctp_transport_route(transport, NULL, sctp_sk(asoc->base.sk)); } if (stored) /* We don't need to transmit ASCONF */ continue; retval = sctp_send_asconf(asoc, chunk); } out: return retval; } /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) { struct sock *sk = sctp_opt2sk(sp); union sctp_addr *addr; struct sctp_af *af; /* It is safe to write port space in caller. */ addr = &addrw->a; addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); af = sctp_get_af_specific(addr->sa.sa_family); if (!af) return -EINVAL; if (sctp_verify_addr(sk, addr, af->sockaddr_len)) return -EINVAL; if (addrw->state == SCTP_ADDR_NEW) return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); else return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); } /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() * * API 8.1 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, * int flags); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distinguish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns * -1, and sets errno to the appropriate error code. * * For SCTP, the port given in each socket address must be the same, or * sctp_bindx() will fail, setting errno to EINVAL. * * The flags parameter is formed from the bitwise OR of zero or more of * the following currently defined flags: * * SCTP_BINDX_ADD_ADDR * * SCTP_BINDX_REM_ADDR * * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given * addresses from the association. The two flags are mutually exclusive; * if both are given, sctp_bindx() will fail with EINVAL. A caller may * not remove all addresses from an association; sctp_bindx() will * reject such an attempt with EINVAL. * * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate * additional addresses with an endpoint after calling bind(). Or use * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening * socket is associated with so that no new association accepted will be * associated with those addresses. If the endpoint supports dynamic * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a * endpoint to send the appropriate message to the peer to change the * peers address lists. * * Adding and removing addresses from a connected association is * optional functionality. Implementations that do not support this * functionality should return EOPNOTSUPP. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() * from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * op Operation to perform (add or remove, see the flags of * sctp_bindx) * * Returns 0 if ok, <0 errno code on error. */ static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, int op) { struct sockaddr *kaddrs; int err; int addrcnt = 0; int walk_size = 0; struct sockaddr *sa_addr; void *addr_buf; struct sctp_af *af; pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", __func__, sk, addrs, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { kfree(kaddrs); return -EFAULT; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { kfree(kaddrs); return -EINVAL; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { kfree(kaddrs); return -EINVAL; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* Do the work. */ switch (op) { case SCTP_BINDX_ADD_ADDR: err = sctp_bindx_add(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); break; case SCTP_BINDX_REM_ADDR: err = sctp_bindx_rem(sk, kaddrs, addrcnt); if (err) goto out; err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); break; default: err = -EINVAL; break; } out: kfree(kaddrs); return err; } /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) * * Common routine for handling connect() and sctp_connectx(). * Connect will come in with just a single address. */ static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, int addrs_size, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; sctp_scope_t scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { struct sctp_af *af; if (walk_size + sizeof(sa_family_t) > addrs_size) { err = -EINVAL; goto out_free; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } port = ntohs(sa_addr->v4.sin_port); /* Save current address so we can work with it */ memcpy(&to, sa_addr, af->sockaddr_len); err = sctp_verify_addr(sk, &to, af->sockaddr_len); if (err) goto out_free; /* Make sure the destination port is correctly set * in all addresses. */ if (asoc && asoc->peer.port && asoc->peer.port != port) { err = -EINVAL; goto out_free; } /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(&to); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ if (assoc_id) { err = sctp_assoc_set_id(asoc, GFP_KERNEL); if (err < 0) goto out_free; } err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); sp->pf->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). */ if (sk->sk_socket->file) f_flags = sk->sk_socket->file->f_flags; timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); if (assoc_id) *assoc_id = asoc->assoc_id; err = sctp_wait_for_connect(asoc, &timeo); /* Note: the asoc may be freed after the return of * sctp_wait_for_connect. */ /* Don't free association on exit. */ asoc = NULL; out_free: pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", __func__, asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop * if it wasn't hashed so we're safe */ sctp_association_free(asoc); } return err; } /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() * * API 8.9 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, * sctp_assoc_t *asoc); * * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. * If the sd is an IPv6 socket, the addresses passed can either be IPv4 * or IPv6 addresses. * * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see * Section 3.1.2 for this usage. * * addrs is a pointer to an array of one or more socket addresses. Each * address is contained in its appropriate structure (i.e. struct * sockaddr_in or struct sockaddr_in6) the family of the address type * must be used to distengish the address length (note that this * representation is termed a "packed array" of addresses). The caller * specifies the number of addresses in the array with addrcnt. * * On success, sctp_connectx() returns 0. It also sets the assoc_id to * the association id of the new association. On failure, sctp_connectx() * returns -1, and sets errno to the appropriate error code. The assoc_id * is not touched by the kernel. * * For SCTP, the port given in each socket address must be the same, or * sctp_connectx() will fail, setting errno to EINVAL. * * An application can use sctp_connectx to initiate an association with * an endpoint that is multi-homed. Much like sctp_bindx() this call * allows a caller to specify multiple addresses at which a peer can be * reached. The way the SCTP stack uses the list of addresses to set up * the association is implementation dependent. This function only * specifies that the stack will try to make use of all the addresses in * the list when needed. * * Note that the list of addresses passed in is only used for setting up * the association. It does not necessarily equal the set of addresses * the peer uses for the resulting association. If the caller wants to * find out the set of peer addresses, it must use sctp_getpaddrs() to * retrieve them after the association has been set up. * * Basically do nothing but copying the addresses from user to kernel * land and invoking either sctp_connectx(). This is used for tunneling * the sctp_connectx() request through sctp_setsockopt() from userspace. * * We don't use copy_from_user() for optimization: we first do the * sanity checks (buffer size -fast- and access check-healthy * pointer); if all of those succeed, then we can alloc the memory * (expensive operation) needed to copy the data to kernel. Then we do * the copying without checking the user space area * (__copy_from_user()). * * On exit there is no need to do sockfd_put(), sys_setsockopt() does * it. * * sk The sk of the socket * addrs The pointer to the addresses in user land * addrssize Size of the addrs buffer * * Returns >=0 if ok, <0 errno code on error. */ static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) { struct sockaddr *kaddrs; gfp_t gfp = GFP_KERNEL; int err = 0; pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", __func__, sk, addrs, addrs_size); if (unlikely(addrs_size <= 0)) return -EINVAL; /* Check the user passed a healthy pointer. */ if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) return -EFAULT; /* Alloc space for the address array in kernel memory. */ if (sk->sk_socket->file) gfp = GFP_USER | __GFP_NOWARN; kaddrs = kmalloc(addrs_size, gfp); if (unlikely(!kaddrs)) return -ENOMEM; if (__copy_from_user(kaddrs, addrs, addrs_size)) { err = -EFAULT; } else { err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); } kfree(kaddrs); return err; } /* * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ static int sctp_setsockopt_connectx_old(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } /* * New interface for the API. The since the API is done with a socket * option, to make it simple we feed back the association id is as a return * indication to the call. Error is always negative and association id is * always positive. */ static int sctp_setsockopt_connectx(struct sock *sk, struct sockaddr __user *addrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); if (err) return err; else return assoc_id; } /* * New (hopefully final) interface for the API. * We use the sctp_getaddrs_old structure so that use-space library * can avoid any unnecessary allocations. The only different part * is that we store the actual length of the address buffer into the * addrs_num structure member. That way we can re-use the existing * code. */ #ifdef CONFIG_COMPAT struct compat_sctp_getaddrs_old { sctp_assoc_t assoc_id; s32 addr_num; compat_uptr_t addrs; /* struct sockaddr * */ }; #endif static int sctp_getsockopt_connectx3(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; int err = 0; #ifdef CONFIG_COMPAT if (in_compat_syscall()) { struct compat_sctp_getaddrs_old param32; if (len < sizeof(param32)) return -EINVAL; if (copy_from_user(&param32, optval, sizeof(param32))) return -EFAULT; param.assoc_id = param32.assoc_id; param.addr_num = param32.addr_num; param.addrs = compat_ptr(param32.addrs); } else #endif { if (len < sizeof(param)) return -EINVAL; if (copy_from_user(&param, optval, sizeof(param))) return -EFAULT; } err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) param.addrs, param.addr_num, &assoc_id); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; if (put_user(sizeof(assoc_id), optlen)) return -EFAULT; } return err; } /* API 3.1.4 close() - UDP Style Syntax * Applications use close() to perform graceful shutdown (as described in * Section 10.1 of [SCTP]) on ALL the associations currently represented * by a UDP-style socket. * * The syntax is * * ret = close(int sd); * * sd - the socket descriptor of the associations to be closed. * * To gracefully shutdown a specific association represented by the * UDP-style socket, an application should use the sendmsg() call, * passing no user data, but including the appropriate flag in the * ancillary data (see Section xxxx). * * If sd in the close() call is a branched-off socket representing only * one association, the shutdown is performed on that association only. * * 4.1.6 close() - TCP Style Syntax * * Applications use close() to gracefully close down an association. * * The syntax is: * * int close(int sd); * * sd - the socket descriptor of the association to be closed. * * After an application calls close() on a socket descriptor, no further * socket operations will succeed on that descriptor. * * API 7.1.4 SO_LINGER * * An application using the TCP-style socket can use this option to * perform the SCTP ABORT primitive. The linger option structure is: * * struct linger { * int l_onoff; // option on/off * int l_linger; // linger time * }; * * To enable the option, set l_onoff to 1. If the l_linger value is set * to 0, calling close() is the same as the ABORT primitive. If the * value is set to a negative value, the setsockopt() call will return * an error. If the value is set to a positive value linger_time, the * close() can be blocked for at most linger_time ms. If the graceful * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ static void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; unsigned int data_was_unread; pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING; ep = sctp_sk(sk)->ep; /* Clean up any skbs sitting on the receive queue. */ data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); /* Walk all associations on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); if (sctp_style(sk, TCP)) { /* A closed association can still be in the list if * it belongs to a TCP-style listening socket that is * not yet accepted. If so, free it. If not, send an * ABORT or SHUTDOWN based on the linger options. */ if (sctp_state(asoc, CLOSED)) { sctp_association_free(asoc); continue; } } if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || !skb_queue_empty(&asoc->ulpq.reasm) || (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { struct sctp_chunk *chunk; chunk = sctp_make_abort_user(asoc, NULL, 0); sctp_primitive_ABORT(net, asoc, chunk); } else sctp_primitive_SHUTDOWN(net, asoc, NULL); } /* On a TCP-style socket, block for at most linger_time if set. */ if (sctp_style(sk, TCP) && timeout) sctp_wait_for_close(sk, timeout); /* This will run the backlog queue. */ release_sock(sk); /* Supposedly, no process has access to the socket, but * the net layers still may. * Also, sctp_destroy_sock() needs to be called with addr_wq_lock * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); bh_lock_sock(sk); /* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. */ sock_hold(sk); sk_common_release(sk); bh_unlock_sock(sk); spin_unlock_bh(&net->sctp.addr_wq_lock); sock_put(sk); SCTP_DBG_OBJCNT_DEC(sock); } /* Handle EPIPE error. */ static int sctp_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } /* API 3.1.3 sendmsg() - UDP Style Syntax * * An application uses sendmsg() and recvmsg() calls to transmit data to * and receive data from its peer. * * ssize_t sendmsg(int socket, const struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. * * Note: This function could use a rewrite especially when explicit * connect support comes in. */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *new_asoc = NULL, *asoc = NULL; struct sctp_transport *transport, *chunk_tp; struct sctp_chunk *chunk; union sctp_addr to; struct sockaddr *msg_name = NULL; struct sctp_sndrcvinfo default_sinfo; struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; sctp_scope_t scope; bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; long timeo; int err; err = 0; sp = sctp_sk(sk); ep = sp->ep; pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, msg, msg_len, ep); /* We cannot send a message over a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { err = -EPIPE; goto out_nounlock; } /* Parse out the SCTP CMSGs. */ err = sctp_msghdr_parse(msg, &cmsgs); if (err) { pr_debug("%s: msghdr parse err:%x\n", __func__, err); goto out_nounlock; } /* Fetch the destination address for this packet. This * address only selects the association--it is not necessarily * the address we will send to. * For a peeled-off socket, msg_name is ignored. */ if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { int msg_namelen = msg->msg_namelen; err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, msg_namelen); if (err) return err; if (msg_namelen > sizeof(to)) msg_namelen = sizeof(to); memcpy(&to, msg->msg_name, msg_namelen); msg_name = msg->msg_name; } sinit = cmsgs.init; if (cmsgs.sinfo != NULL) { memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; sinfo = &default_sinfo; fill_sinfo_ttl = true; } else { sinfo = cmsgs.srinfo; } /* Did the user specify SNDINFO/SNDRCVINFO? */ if (sinfo) { sinfo_flags = sinfo->sinfo_flags; associd = sinfo->sinfo_assoc_id; } pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, msg_len, sinfo_flags); /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_EOF is set, no data can be sent. Disallow sending zero * length messages when SCTP_EOF|SCTP_ABORT is not set. * If SCTP_ABORT is set, the message length could be non zero with * the msg_iov set to the user abort reason. */ if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { err = -EINVAL; goto out_nounlock; } /* If SCTP_ADDR_OVER is set, there must be an address * specified in msg_name. */ if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { err = -EINVAL; goto out_nounlock; } transport = NULL; pr_debug("%s: about to look up association\n", __func__); lock_sock(sk); /* If a msg_name has been specified, assume this is to be used. */ if (msg_name) { /* Look for a matching association on the endpoint. */ asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); /* If we could not find a matching association on the * endpoint, make sure that it is not a TCP-style * socket that already has an association or there is * no peeled-off association on another socket. */ if (!asoc && ((sctp_style(sk, TCP) && (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING))) || sctp_endpoint_is_peeled_off(ep, &to))) { err = -EADDRNOTAVAIL; goto out_unlock; } } else { asoc = sctp_id2assoc(sk, associd); if (!asoc) { err = -EPIPE; goto out_unlock; } } if (asoc) { pr_debug("%s: just looked up association:%p\n", __func__, asoc); /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED * socket that has an association in CLOSED state. This can * happen when an accepted socket has an association that is * already CLOSED. */ if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { err = -EPIPE; goto out_unlock; } if (sinfo_flags & SCTP_EOF) { pr_debug("%s: shutting down association:%p\n", __func__, asoc); sctp_primitive_SHUTDOWN(net, asoc, NULL); err = 0; goto out_unlock; } if (sinfo_flags & SCTP_ABORT) { chunk = sctp_make_abort_user(asoc, msg, msg_len); if (!chunk) { err = -ENOMEM; goto out_unlock; } pr_debug("%s: aborting association:%p\n", __func__, asoc); sctp_primitive_ABORT(net, asoc, chunk); err = 0; goto out_unlock; } } /* Do we need to create the association? */ if (!asoc) { pr_debug("%s: there is no association yet\n", __func__); if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { err = -EINVAL; goto out_unlock; } /* Check for invalid stream against the stream counts, * either the default or the user specified stream counts. */ if (sinfo) { if (!sinit || !sinit->sinit_num_ostreams) { /* Check against the defaults. */ if (sinfo->sinfo_stream >= sp->initmsg.sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } else { /* Check against the requested. */ if (sinfo->sinfo_stream >= sinit->sinit_num_ostreams) { err = -EINVAL; goto out_unlock; } } } /* * API 3.1.2 bind() - UDP Style Syntax * If a bind() or sctp_bindx() is not called prior to a * sendmsg() call that initiates a new association, the * system picks an ephemeral port and will choose an address * set equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_unlock; } } else { /* * If an unprivileged user inherits a one-to-many * style socket with open associations on a privileged * port, it MAY be permitted to accept new associations, * but it SHOULD NOT be permitted to open new * associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_unlock; } } scope = sctp_scope(&to); new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!new_asoc) { err = -ENOMEM; goto out_unlock; } asoc = new_asoc; err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { err = -ENOMEM; goto out_free; } /* If the SCTP_INIT ancillary data is specified, set all * the association init values accordingly. */ if (sinit) { if (sinit->sinit_num_ostreams) { asoc->c.sinit_num_ostreams = sinit->sinit_num_ostreams; } if (sinit->sinit_max_instreams) { asoc->c.sinit_max_instreams = sinit->sinit_max_instreams; } if (sinit->sinit_max_attempts) { asoc->max_init_attempts = sinit->sinit_max_attempts; } if (sinit->sinit_max_init_timeo) { asoc->max_init_timeo = msecs_to_jiffies(sinit->sinit_max_init_timeo); } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } } /* ASSERT: we have a valid association at this point. */ pr_debug("%s: we have a valid association\n", __func__); if (!sinfo) { /* If the user didn't specify SNDINFO/SNDRCVINFO, make up * one with some defaults. */ memset(&default_sinfo, 0, sizeof(default_sinfo)); default_sinfo.sinfo_stream = asoc->default_stream; default_sinfo.sinfo_flags = asoc->default_flags; default_sinfo.sinfo_ppid = asoc->default_ppid; default_sinfo.sinfo_context = asoc->default_context; default_sinfo.sinfo_timetolive = asoc->default_timetolive; default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); sinfo = &default_sinfo; } else if (fill_sinfo_ttl) { /* In case SNDINFO was specified, we still need to fill * it with a default ttl from the assoc here. */ sinfo->sinfo_timetolive = asoc->default_timetolive; } /* API 7.1.7, the sndbuf size per association bounds the * maximum size of data that can be sent in a single send call. */ if (msg_len > sk->sk_sndbuf) { err = -EMSGSIZE; goto out_free; } if (asoc->pmtu_pending) sctp_assoc_pending_pmtu(sk, asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D * does not specify what this error is, but this looks like * a great fit. */ if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { err = -EMSGSIZE; goto out_free; } /* Check for invalid stream. */ if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { err = -EINVAL; goto out_free; } if (sctp_wspace(asoc) < msg_len) sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if (!sctp_wspace(asoc)) { err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) goto out_free; } /* If an address is passed with the sendto/sendmsg call, it is used * to override the primary destination address in the TCP model, or * when SCTP_ADDR_OVER flag is set in the UDP model. */ if ((sctp_style(sk, TCP) && msg_name) || (sinfo_flags & SCTP_ADDR_OVER)) { chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); if (!chunk_tp) { err = -EINVAL; goto out_free; } } else chunk_tp = NULL; /* Auto-connect, if we aren't connected already. */ if (sctp_state(asoc, CLOSED)) { err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) goto out_free; wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } /* Break the message into multiple chunks of maximum size. */ datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); if (IS_ERR(datamsg)) { err = PTR_ERR(datamsg); goto out_free; } /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { sctp_chunk_hold(chunk); /* Do accounting for the write space. */ sctp_set_owner_w(chunk); chunk->transport = chunk_tp; } /* Send it to the lower layers. Note: all chunks * must either fail or succeed. The lower layer * works that way today. Keep it that way or this * breaks. */ err = sctp_primitive_SEND(net, asoc, datamsg); /* Did the lower layer accept the chunk? */ if (err) { sctp_datamsg_free(datamsg); goto out_free; } pr_debug("%s: we sent primitively\n", __func__); sctp_datamsg_put(datamsg); err = msg_len; if (unlikely(wait_connect)) { timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); sctp_wait_for_connect(asoc, &timeo); } /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ goto out_unlock; out_free: if (new_asoc) sctp_association_free(asoc); out_unlock: release_sock(sk); out_nounlock: return sctp_error(sk, msg_flags, err); #if 0 do_sock_err: if (msg_len) err = msg_len; else err = sock_error(sk); goto out; do_interrupted: if (msg_len) err = msg_len; goto out; #endif /* 0 */ } /* This is an extended version of skb_pull() that removes the data from the * start of a skb even when data is spread across the list of skb's in the * frag_list. len specifies the total amount of data that needs to be removed. * when 'len' bytes could be removed from the skb, it returns 0. * If 'len' exceeds the total skb length, it returns the no. of bytes that * could not be removed. */ static int sctp_skb_pull(struct sk_buff *skb, int len) { struct sk_buff *list; int skb_len = skb_headlen(skb); int rlen; if (len <= skb_len) { __skb_pull(skb, len); return 0; } len -= skb_len; __skb_pull(skb, skb_len); skb_walk_frags(skb, list) { rlen = sctp_skb_pull(list, len); skb->len -= (len-rlen); skb->data_len -= (len-rlen); if (!rlen) return 0; len = rlen; } return len; } /* API 3.1.3 recvmsg() - UDP Style Syntax * * ssize_t recvmsg(int socket, struct msghdr *message, * int flags); * * socket - the socket descriptor of the endpoint. * message - pointer to the msghdr structure which contains a single * user message and possibly some ancillary data. * * See Section 5 for complete description of the data * structures. * * flags - flags sent or received with the user message, see Section * 5 for complete description of the flags. */ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); struct sk_buff *skb, *head_skb; int copied; int err = 0; int skb_len; pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, addr_len); lock_sock(sk); if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) { err = -ENOTCONN; goto out; } skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; /* Get the total length of the skb including any skb's in the * frag_list. */ skb_len = skb->len; copied = skb_len; if (copied > len) copied = len; err = skb_copy_datagram_msg(skb, 0, msg, copied); event = sctp_skb2event(skb); if (err) goto out_free; if (event->chunk && event->chunk->head_skb) head_skb = event->chunk->head_skb; else head_skb = skb; sock_recv_ts_and_drops(msg, sk, head_skb); if (sctp_ulpevent_is_notification(event)) { msg->msg_flags |= MSG_NOTIFICATION; sp->pf->event_msgname(event, msg->msg_name, addr_len); } else { sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len); } /* Check if we allow SCTP_NXTINFO. */ if (sp->recvnxtinfo) sctp_ulpevent_read_nxtinfo(event, msg, sk); /* Check if we allow SCTP_RCVINFO. */ if (sp->recvrcvinfo) sctp_ulpevent_read_rcvinfo(event, msg); /* Check if we allow SCTP_SNDRCVINFO. */ if (sp->subscribe.sctp_data_io_event) sctp_ulpevent_read_sndrcvinfo(event, msg); err = copied; /* If skb's length exceeds the user's buffer, update the skb and * push it back to the receive_queue so that the next call to * recvmsg() will return the remaining data. Don't set MSG_EOR. */ if (skb_len > copied) { msg->msg_flags &= ~MSG_EOR; if (flags & MSG_PEEK) goto out_free; sctp_skb_pull(skb, copied); skb_queue_head(&sk->sk_receive_queue, skb); /* When only partial message is copied to the user, increase * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the event is freed. */ if (!sctp_ulpevent_is_notification(event)) sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) msg->msg_flags |= MSG_EOR; else msg->msg_flags &= ~MSG_EOR; out_free: if (flags & MSG_PEEK) { /* Release the skb reference acquired after peeking the skb in * sctp_skb_recv_datagram(). */ kfree_skb(skb); } else { /* Free the event which includes releasing the reference to * the owner of the skb, freeing the skb and updating the * rwnd. */ sctp_ulpevent_free(event); } out: release_sock(sk); return err; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_setsockopt_disable_fragments(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_events(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_association *asoc; struct sctp_ulpevent *event; if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, * if there is no data to be sent or retransmit, the stack will * immediately send up this notification. */ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, &sctp_sk(sk)->subscribe)) { asoc = sctp_id2assoc(sk, 0); if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); if (!event) return -ENOMEM; sctp_ulpq_tail_event(&asoc->ulpq, event); } } return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct net *net = sock_net(sk); /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; if (sp->autoclose > net->sctp.max_autoclose) sp->autoclose = net->sctp.max_autoclose; return 0; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_HB_TIME_IS_ZERO - Specify's that the time for * heartbeat delayis to be set to the value of 0 * milliseconds. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, struct sctp_transport *trans, struct sctp_association *asoc, struct sctp_sock *sp, int hb_change, int pmtud_change, int sackdelay_change) { int error; if (params->spp_flags & SPP_HB_DEMAND && trans) { struct net *net = sock_net(trans->asoc->base.sk); error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); if (error) return error; } /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of * this field is ignored. Note also that a value of zero indicates * the current setting should be left unchanged. */ if (params->spp_flags & SPP_HB_ENABLE) { /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is * set. This lets us use 0 value when this flag * is set. */ if (params->spp_flags & SPP_HB_TIME_IS_ZERO) params->spp_hbinterval = 0; if (params->spp_hbinterval || (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { if (trans) { trans->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else if (asoc) { asoc->hbinterval = msecs_to_jiffies(params->spp_hbinterval); } else { sp->hbinterval = params->spp_hbinterval; } } } if (hb_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_HB) | hb_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_HB) | hb_change; } else { sp->param_flags = (sp->param_flags & ~SPP_HB) | hb_change; } } /* When Path MTU discovery is disabled the value specified here will * be the "fixed" path mtu (i.e. the value of the spp_flags field must * include the flag SPP_PMTUD_DISABLE for this field to have any * effect). */ if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; sctp_frag_point(asoc, params->spp_pathmtu); } else { sp->pathmtu = params->spp_pathmtu; } } if (pmtud_change) { if (trans) { int update = (trans->param_flags & SPP_PMTUD_DISABLE) && (params->spp_flags & SPP_PMTUD_ENABLE); trans->param_flags = (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); } } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; } else { sp->param_flags = (sp->param_flags & ~SPP_PMTUD) | pmtud_change; } } /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the * value of this field is ignored. Note also that a value of zero * indicates the current setting should be left unchanged. */ if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { if (trans) { trans->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else if (asoc) { asoc->sackdelay = msecs_to_jiffies(params->spp_sackdelay); } else { sp->sackdelay = params->spp_sackdelay; } } if (sackdelay_change) { if (trans) { trans->param_flags = (trans->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else if (asoc) { asoc->param_flags = (asoc->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } else { sp->param_flags = (sp->param_flags & ~SPP_SACKDELAY) | sackdelay_change; } } /* Note that a value of zero indicates the current setting should be left unchanged. */ if (params->spp_pathmaxrxt) { if (trans) { trans->pathmaxrxt = params->spp_pathmaxrxt; } else if (asoc) { asoc->pathmaxrxt = params->spp_pathmaxrxt; } else { sp->pathmaxrxt = params->spp_pathmaxrxt; } } return 0; } static int sctp_setsockopt_peer_addr_params(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); int error; int hb_change, pmtud_change, sackdelay_change; if (optlen != sizeof(struct sctp_paddrparams)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; /* Validate flags and value parameters. */ hb_change = params.spp_flags & SPP_HB; pmtud_change = params.spp_flags & SPP_PMTUD; sackdelay_change = params.spp_flags & SPP_SACKDELAY; if (hb_change == SPP_HB || pmtud_change == SPP_PMTUD || sackdelay_change == SPP_SACKDELAY || params.spp_sackdelay > 500 || (params.spp_pathmtu && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) return -EINVAL; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) return -EINVAL; } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Heartbeat demand can only be sent on a transport or * association, but not a socket. */ if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) return -EINVAL; /* Process parameters. */ error = sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); if (error) return error; /* If changes are for association, also apply parameters to each * transport. */ if (!trans && asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { sctp_apply_peer_addr_params(&params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); } } return 0; } static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; } static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) { return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_setsockopt_delayed_ack(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sack_info params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (optlen == sizeof(struct sctp_sack_info)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0 && params.sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, optlen)) return -EFAULT; if (params.sack_delay == 0) params.sack_freq = 1; else params.sack_freq = 0; } else return -EINVAL; /* Validate value parameter. */ if (params.sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (params.sack_delay) { if (asoc) { asoc->sackdelay = msecs_to_jiffies(params.sack_delay); asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackdelay = params.sack_delay; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } if (params.sack_freq == 1) { if (asoc) { asoc->param_flags = sctp_spp_sackdelay_disable(asoc->param_flags); } else { sp->param_flags = sctp_spp_sackdelay_disable(sp->param_flags); } } else if (params.sack_freq > 1) { if (asoc) { asoc->sackfreq = params.sack_freq; asoc->param_flags = sctp_spp_sackdelay_enable(asoc->param_flags); } else { sp->sackfreq = params.sack_freq; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } /* If change is for association, also apply to each transport. */ if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (params.sack_delay) { trans->sackdelay = msecs_to_jiffies(params.sack_delay); trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } if (params.sack_freq == 1) { trans->param_flags = sctp_spp_sackdelay_disable(trans->param_flags); } else if (params.sack_freq > 1) { trans->sackfreq = params.sack_freq; trans->param_flags = sctp_spp_sackdelay_enable(trans->param_flags); } } } return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_initmsg sinit; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_initmsg)) return -EINVAL; if (copy_from_user(&sinit, optval, optlen)) return -EFAULT; if (sinit.sinit_num_ostreams) sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; if (sinit.sinit_max_instreams) sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; if (sinit.sinit_max_attempts) sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; if (sinit.sinit_max_init_timeo) sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; return 0; } /* * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. */ static int sctp_setsockopt_default_send_param(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.sinfo_stream; asoc->default_flags = info.sinfo_flags; asoc->default_ppid = info.sinfo_ppid; asoc->default_context = info.sinfo_context; asoc->default_timetolive = info.sinfo_timetolive; } else { sp->default_stream = info.sinfo_stream; sp->default_flags = info.sinfo_flags; sp->default_ppid = info.sinfo_ppid; sp->default_context = info.sinfo_context; sp->default_timetolive = info.sinfo_timetolive; } return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_setsockopt_default_sndinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (optlen != sizeof(info)) return -EINVAL; if (copy_from_user(&info, optval, optlen)) return -EFAULT; if (info.snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { asoc->default_stream = info.snd_sid; asoc->default_flags = info.snd_flags; asoc->default_ppid = info.snd_ppid; asoc->default_context = info.snd_context; } else { sp->default_stream = info.snd_sid; sp->default_flags = info.snd_flags; sp->default_ppid = info.snd_ppid; sp->default_context = info.snd_context; } return 0; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_prim prim; struct sctp_transport *trans; if (optlen != sizeof(struct sctp_prim)) return -EINVAL; if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) return -EFAULT; trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); if (!trans) return -EINVAL; sctp_assoc_set_primary(trans->asoc, trans); return 0; } /* * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; unsigned long rto_min, rto_max; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; if (copy_from_user(&rtoinfo, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); /* Set the values to the specific association */ if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; rto_max = rtoinfo.srto_max; rto_min = rtoinfo.srto_min; if (rto_max) rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; else rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; if (rto_min) rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; else rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; if (rto_min > rto_max) return -EINVAL; if (asoc) { if (rtoinfo.srto_initial != 0) asoc->rto_initial = msecs_to_jiffies(rtoinfo.srto_initial); asoc->rto_max = rto_max; asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ if (rtoinfo.srto_initial != 0) sp->rtoinfo.srto_initial = rtoinfo.srto_initial; sp->rtoinfo.srto_max = rto_max; sp->rtoinfo.srto_min = rto_min; } return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assocparams)) return -EINVAL; if (copy_from_user(&assocparams, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Set the values to the specific association */ if (asoc) { if (assocparams.sasoc_asocmaxrxt != 0) { __u32 path_sum = 0; int paths = 0; struct sctp_transport *peer_addr; list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, transports) { path_sum += peer_addr->pathmaxrxt; paths++; } /* Only validate asocmaxrxt if we have more than * one path/transport. We do this because path * retransmissions are only counted when we have more * then one path. */ if (paths > 1 && assocparams.sasoc_asocmaxrxt > path_sum) return -EINVAL; asoc->max_retrans = assocparams.sasoc_asocmaxrxt; } if (assocparams.sasoc_cookie_life != 0) asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); if (assocparams.sasoc_asocmaxrxt != 0) sp->assocparams.sasoc_asocmaxrxt = assocparams.sasoc_asocmaxrxt; if (assocparams.sasoc_cookie_life != 0) sp->assocparams.sasoc_cookie_life = assocparams.sasoc_cookie_life; } return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (val) sp->v4mapped = 1; else sp->v4mapped = 0; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); int val; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; params.assoc_id = 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; } else return -EINVAL; if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { if (val == 0) { val = asoc->pathmtu; val -= sp->pf->af->net_header_len; val -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); } asoc->user_frag = val; asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); } else { sp->user_frag = val; } return 0; } /* * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) * * Requests that the peer mark the enclosed address as the association * primary. The enclosed address must be one of the association's * locally bound addresses. The following structure is used to make a * set primary request: */ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, unsigned int optlen) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; sp = sctp_sk(sk); if (!net->sctp.addip_enable) return -EPERM; if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; if (copy_from_user(&prim, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.asconf_capable) return -EPERM; if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) return -EPERM; if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; af = sctp_get_af_specific(prim.sspp_addr.ss_family); if (!af) return -EINVAL; if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, (union sctp_addr *)&prim.sspp_addr); if (!chunk) return -ENOMEM; err = sctp_send_asconf(asoc, chunk); pr_debug("%s: we set peer primary addr primitively\n", __func__); return err; } static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_setadaptation adaptation; if (optlen != sizeof(struct sctp_setadaptation)) return -EINVAL; if (copy_from_user(&adaptation, optval, optlen)) return -EFAULT; sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * * The context field in the sctp_sndrcvinfo structure is normally only * used when a failed message is retrieved holding the value that was * sent down on the actual send call. This option allows the setting of * a default context on an association basis that will be received on * reading messages from the peer. This is especially helpful in the * one-2-many model for an application to keep some reference to an * internal state machine that is processing messages on the * association. Note that the setting of this value only effects * received messages from the peer and does not effect the value that is * saved with outbound messages. */ static int sctp_setsockopt_context(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; asoc->default_rcv_context = params.assoc_value; } else { sp->default_rcv_context = params.assoc_value; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * * This options will at a minimum specify if the implementation is doing * fragmented interleave. Fragmented interleave, for a one to many * socket, is when subsequent calls to receive a message may return * parts of messages from different associations. Some implementations * may allow you to turn this value on or off. If so, when turned off, * no fragment interleave will occur (which will cause a head of line * blocking amongst multiple associations sharing the same one to many * socket). When this option is turned on, then each receive call may * come from a different association (thus the user must receive data * with the extended calls (e.g. sctp_recvmsg) to keep track of which * association each receive belongs to. * * This option takes a boolean value. A non-zero value indicates that * fragmented interleave is on. A value of zero indicates that * fragmented interleave is off. * * Note that it is important that an implementation that allows this * option to be turned on, have it off by default. Otherwise an unaware * application using the one to many model may become confused and act * incorrectly. */ static int sctp_setsockopt_fragment_interleave(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen != sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; return 0; } /* * 8.1.21. Set or Get the SCTP Partial Delivery Point * (SCTP_PARTIAL_DELIVERY_POINT) * * This option will set or get the SCTP partial delivery point. This * point is the size of a message where the partial delivery API will be * invoked to help free up rwnd space for the peer. Setting this to a * lower value will cause partial deliveries to happen more often. The * calls argument is an integer that sets or gets the partial delivery * point. Note also that the call will fail if the user attempts to set * this value larger than the socket receive buffer size. * * Note that any single message having a length smaller than or equal to * the SCTP partial delivery point will be delivered in one single read * call as long as the user provided buffer is large enough to hold the * message. */ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, char __user *optval, unsigned int optlen) { u32 val; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; /* Note: We double the receive buffer from what the user sets * it to be, also initial rwnd is based on rcvbuf/2. */ if (val > (sk->sk_rcvbuf >> 1)) return -EINVAL; sctp_sk(sk)->pd_point = val; return 0; /* is this the right error code? */ } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * * This option will allow a user to change the maximum burst of packets * that can be emitted by this association. Note that the default value * is 4, and some implementations may restrict this setting so that it * can only be lowered. * * NOTE: This text doesn't seem right. Do this on a socket basis with * future associations inheriting the socket value. */ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; int val; int assoc_id = 0; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option deprecated.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&val, optval, optlen)) return -EFAULT; } else if (optlen == sizeof(struct sctp_assoc_value)) { if (copy_from_user(&params, optval, optlen)) return -EFAULT; val = params.assoc_value; assoc_id = params.assoc_id; } else return -EINVAL; sp = sctp_sk(sk); if (assoc_id != 0) { asoc = sctp_id2assoc(sk, assoc_id); if (!asoc) return -EINVAL; asoc->max_burst = val; } else sp->max_burst = val; return 0; } /* * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) * * This set option adds a chunk type that the user is requesting to be * received only in an authenticated way. Changes to the list of chunks * will only effect future associations on the socket. */ static int sctp_setsockopt_auth_chunk(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunk val; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; switch (val.sauth_chunk) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: case SCTP_CID_AUTH: return -EINVAL; } /* add this chunk id to the endpoint */ return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); } /* * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) * * This option gets or sets the list of HMAC algorithms that the local * endpoint requires the peer to use. */ static int sctp_setsockopt_hmac_ident(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo *hmacs; u32 idents; int err; if (!ep->auth_enable) return -EACCES; if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; hmacs = memdup_user(optval, optlen); if (IS_ERR(hmacs)) return PTR_ERR(hmacs); idents = hmacs->shmac_num_idents; if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { err = -EINVAL; goto out; } err = sctp_auth_ep_set_hmacs(ep, hmacs); out: kfree(hmacs); return err; } /* * 7.1.20. Set a shared key (SCTP_AUTH_KEY) * * This option will set a shared secret key which is used to build an * association shared key. */ static int sctp_setsockopt_auth_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkey *authkey; struct sctp_association *asoc; int ret; if (!ep->auth_enable) return -EACCES; if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; authkey = memdup_user(optval, optlen); if (IS_ERR(authkey)) return PTR_ERR(authkey); if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { ret = -EINVAL; goto out; } asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; goto out; } ret = sctp_auth_set_key(ep, asoc, authkey); out: kzfree(authkey); return ret; } /* * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) * * This option will get or set the active shared key to be used to build * the association shared key. */ static int sctp_setsockopt_active_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); } /* * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) * * This set option will delete a shared secret key from use. */ static int sctp_setsockopt_del_key(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); } /* * 8.1.23 SCTP_AUTO_ASCONF * * This option will enable or disable the use of the automatic generation of * ASCONF chunks to add and delete addresses to an existing association. Note * that this option has two caveats namely: a) it only affects sockets that * are bound to all addresses available to the SCTP stack, and b) the system * administrator may have an overriding control that turns the ASCONF feature * off no matter what setting the socket option may have. * This option expects an integer boolean flag, where a non-zero value turns on * the option, and a zero value turns off the option. * Note. In this implementation, socket operation overrides default parameter * being set by sysctl as well as FreeBSD implementation */ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, unsigned int optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (!sctp_is_ep_boundall(sk) && val) return -EINVAL; if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) return 0; spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); if (val == 0 && sp->do_auto_asconf) { list_del(&sp->auto_asconf_list); sp->do_auto_asconf = 0; } else if (val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; } spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to alter the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (optlen < sizeof(struct sctp_paddrthlds)) return -EINVAL; if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, sizeof(struct sctp_paddrthlds))) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } if (val.spt_pathmaxrxt) asoc->pathmaxrxt = val.spt_pathmaxrxt; asoc->pf_retrans = val.spt_pathpfthld; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; if (val.spt_pathmaxrxt) trans->pathmaxrxt = val.spt_pathmaxrxt; trans->pf_retrans = val.spt_pathpfthld; } return 0; } static int sctp_setsockopt_recvrcvinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_recvnxtinfo(struct sock *sk, char __user *optval, unsigned int optlen) { int val; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; return 0; } static int sctp_setsockopt_pr_supported(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(params)) goto out; if (copy_from_user(&params, optval, optlen)) { retval = -EFAULT; goto out; } asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { asoc->prsctp_enable = !!params.assoc_value; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); sp->ep->prsctp_enable = !!params.assoc_value; } else { goto out; } retval = 0; out: return retval; } static int sctp_setsockopt_default_prinfo(struct sock *sk, char __user *optval, unsigned int optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EINVAL; if (optlen != sizeof(info)) goto out; if (copy_from_user(&info, optval, sizeof(info))) { retval = -EFAULT; goto out; } if (info.pr_policy & ~SCTP_PR_SCTP_MASK) goto out; if (info.pr_policy == SCTP_PR_SCTP_NONE) info.pr_value = 0; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); asoc->default_timetolive = info.pr_value; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); sp->default_timetolive = info.pr_value; } else { goto out; } retval = 0; out: return retval; } /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve * socket options. Socket options are used to change the default * behavior of sockets calls. They are described in Section 7. * * The syntax is: * * ret = getsockopt(int sd, int level, int optname, void __user *optval, * int __user *optlen); * ret = setsockopt(int sd, int level, int optname, const void __user *optval, * int optlen); * * sd - the socket descript. * level - set to IPPROTO_SCTP for all SCTP options. * optname - the option name. * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ static int sctp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { int retval = 0; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of setsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->setsockopt(sk, level, optname, optval, optlen); goto out_nounlock; } lock_sock(sk); switch (optname) { case SCTP_SOCKOPT_BINDX_ADD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_ADD_ADDR); break; case SCTP_SOCKOPT_BINDX_REM: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, optlen, SCTP_BINDX_REM_ADDR); break; case SCTP_SOCKOPT_CONNECTX_OLD: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx_old(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_SOCKOPT_CONNECTX: /* 'optlen' is the size of the addresses buffer. */ retval = sctp_setsockopt_connectx(sk, (struct sockaddr __user *)optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); break; case SCTP_EVENTS: retval = sctp_setsockopt_events(sk, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_setsockopt_autoclose(sk, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); break; case SCTP_INITMSG: retval = sctp_setsockopt_initmsg(sk, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_setsockopt_default_send_param(sk, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_setsockopt_primary_addr(sk, optval, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); break; case SCTP_NODELAY: retval = sctp_setsockopt_nodelay(sk, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_setsockopt_associnfo(sk, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_setsockopt_mappedv4(sk, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_setsockopt_maxseg(sk, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; case SCTP_AUTH_CHUNK: retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); break; case SCTP_HMAC_IDENT: retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); break; case SCTP_AUTH_KEY: retval = sctp_setsockopt_auth_key(sk, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_setsockopt_active_key(sk, optval, optlen); break; case SCTP_AUTH_DELETE_KEY: retval = sctp_setsockopt_del_key(sk, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); break; case SCTP_PR_SUPPORTED: retval = sctp_setsockopt_pr_supported(sk, optval, optlen); break; case SCTP_DEFAULT_PRINFO: retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); out_nounlock: return retval; } /* API 3.1.6 connect() - UDP Style Syntax * * An application may use the connect() call in the UDP model to initiate an * association without sending data. * * The syntax is: * * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); * * sd: the socket descriptor to have a new association added to. * * nam: the address structure (either struct sockaddr_in or struct * sockaddr_in6 defined in RFC2553 [7]). * * len: the size of the address. */ static int sctp_connect(struct sock *sk, struct sockaddr *addr, int addr_len) { int err = 0; struct sctp_af *af; lock_sock(sk); pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, addr, addr_len); /* Validate addr_len before calling common connect/connectx routine. */ af = sctp_get_af_specific(addr->sa_family); if (!af || addr_len < af->sockaddr_len) { err = -EINVAL; } else { /* Pass correct addr len to common routine (so it knows there * is only one address being passed. */ err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); } release_sock(sk); return err; } /* FIXME: Write comments. */ static int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } /* 4.1.4 accept() - TCP Style Syntax * * Applications use accept() call to remove an established SCTP * association from the accept queue of the endpoint. A new socket * descriptor will be returned from accept() to represent the newly * formed association. */ static struct sock *sctp_accept(struct sock *sk, int flags, int *err) { struct sctp_sock *sp; struct sctp_endpoint *ep; struct sock *newsk = NULL; struct sctp_association *asoc; long timeo; int error = 0; lock_sock(sk); sp = sctp_sk(sk); ep = sp->ep; if (!sctp_style(sk, TCP)) { error = -EOPNOTSUPP; goto out; } if (!sctp_sstate(sk, LISTENING)) { error = -EINVAL; goto out; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); error = sctp_wait_for_accept(sk, timeo); if (error) goto out; /* We treat the list of associations on the endpoint as the accept * queue and pick the first association on the list. */ asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); newsk = sp->pf->create_accept_sk(sk, asoc); if (!newsk) { error = -ENOMEM; goto out; } /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); out: release_sock(sk); *err = error; return newsk; } /* The SCTP ioctl handler. */ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; lock_sock(sk); /* * SEQPACKET-style sockets in LISTENING state are valid, for * SCTP, so only discard TCP-style sockets in LISTENING state. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned int amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); break; } default: rc = -ENOIOCTLCMD; break; } out: release_sock(sk); return rc; } /* This is the function which gets called during socket creation to * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ static int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); sp = sctp_sk(sk); /* Initialize the SCTP per socket area. */ switch (sk->sk_type) { case SOCK_SEQPACKET: sp->type = SCTP_SOCKET_UDP; break; case SOCK_STREAM: sp->type = SCTP_SOCKET_TCP; break; default: return -ESOCKTNOSUPPORT; } sk->sk_gso_type = SKB_GSO_SCTP; /* Initialize default send parameters. These parameters can be * modified with the SCTP_DEFAULT_SEND_PARAM socket option. */ sp->default_stream = 0; sp->default_ppid = 0; sp->default_flags = 0; sp->default_context = 0; sp->default_timetolive = 0; sp->default_rcv_context = 0; sp->max_burst = net->sctp.max_burst; sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; /* Initialize default setup parameters. These parameters * can be modified with the SCTP_INITMSG socket option or * overridden by the SCTP_INIT CMSG. */ sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; sp->initmsg.sinit_max_instreams = sctp_max_instreams; sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; /* Initialize default RTO related parameters. These parameters can * be modified for with the SCTP_RTOINFO socket option. */ sp->rtoinfo.srto_initial = net->sctp.rto_initial; sp->rtoinfo.srto_max = net->sctp.rto_max; sp->rtoinfo.srto_min = net->sctp.rto_min; /* Initialize default association related parameters. These parameters * can be modified with the SCTP_ASSOCINFO socket option. */ sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; sp->assocparams.sasoc_number_peer_destinations = 0; sp->assocparams.sasoc_peer_rwnd = 0; sp->assocparams.sasoc_local_rwnd = 0; sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; /* Initialize default event subscriptions. By default, all the * options are off. */ memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); /* Default Peer Address Parameters. These defaults can * be modified via SCTP_PEER_ADDR_PARAMS */ sp->hbinterval = net->sctp.hb_interval; sp->pathmaxrxt = net->sctp.max_retrans_path; sp->pathmtu = 0; /* allow default discovery */ sp->sackdelay = net->sctp.sack_timeout; sp->sackfreq = 2; sp->param_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE | SPP_SACKDELAY_ENABLE; /* If enabled no SCTP message fragmentation will be performed. * Configure through SCTP_DISABLE_FRAGMENTS socket option. */ sp->disable_fragments = 0; /* Enable Nagle algorithm by default. */ sp->nodelay = 0; sp->recvrcvinfo = 0; sp->recvnxtinfo = 0; /* Enable by default. */ sp->v4mapped = 1; /* Auto-close idle associations after the configured * number of seconds. A value of 0 disables this * feature. Configure through the SCTP_AUTOCLOSE socket option, * for UDP-style sockets only. */ sp->autoclose = 0; /* User specified fragmentation limit. */ sp->user_frag = 0; sp->adaptation_ind = 0; sp->pf = sctp_get_pf_specific(sk->sk_family); /* Control variables for partial data delivery. */ atomic_set(&sp->pd_mode, 0); skb_queue_head_init(&sp->pd_lobby); sp->frag_interleave = 0; /* Create a per socket endpoint structure. Even if we * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); if (!sp->ep) return -ENOMEM; sp->hmac = NULL; sk->sk_destruct = sctp_destruct_sock; SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); percpu_counter_inc(&sctp_sockets_allocated); sock_prot_inuse_add(net, sk->sk_prot, 1); /* Nothing can fail after this block, otherwise * sctp_destroy_sock() will be called without addr_wq_lock held */ if (net->sctp.default_auto_asconf) { spin_lock(&sock_net(sk)->sctp.addr_wq_lock); list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); } else { sp->do_auto_asconf = 0; } local_bh_enable(); return 0; } /* Cleanup any SCTP per socket resources. Must be called with * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true */ static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp; pr_debug("%s: sk:%p\n", __func__, sk); /* Release our hold on the endpoint. */ sp = sctp_sk(sk); /* This could happen during socket init, thus we bail out * early, since the rest of the below is not setup either. */ if (sp->ep == NULL) return; if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); } sctp_endpoint_free(sp->ep); local_bh_disable(); percpu_counter_dec(&sctp_sockets_allocated); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } /* Triggered when there are no references on the socket anymore */ static void sctp_destruct_sock(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_shash(sp->hmac); inet_sock_destruct(sk); } /* API 4.1.7 shutdown() - TCP Style Syntax * int shutdown(int socket, int how); * * sd - the socket descriptor of the association to be closed. * how - Specifies the type of shutdown. The values are * as follows: * SHUT_RD * Disables further receive operations. No SCTP * protocol action is taken. * SHUT_WR * Disables further send operations, and initiates * the SCTP shutdown sequence. * SHUT_RDWR * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; if (!sctp_style(sk, TCP)) return; ep = sctp_sk(sk)->ep; if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { struct sctp_association *asoc; sk->sk_state = SCTP_SS_CLOSING; asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); sctp_primitive_SHUTDOWN(net, asoc, NULL); } } int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, struct sctp_info *info) { struct sctp_transport *prim; struct list_head *pos; int mask; memset(info, 0, sizeof(*info)); if (!asoc) { struct sctp_sock *sp = sctp_sk(sk); info->sctpi_s_autoclose = sp->autoclose; info->sctpi_s_adaptation_ind = sp->adaptation_ind; info->sctpi_s_pd_point = sp->pd_point; info->sctpi_s_nodelay = sp->nodelay; info->sctpi_s_disable_fragments = sp->disable_fragments; info->sctpi_s_v4mapped = sp->v4mapped; info->sctpi_s_frag_interleave = sp->frag_interleave; info->sctpi_s_type = sp->type; return 0; } info->sctpi_tag = asoc->c.my_vtag; info->sctpi_state = asoc->state; info->sctpi_rwnd = asoc->a_rwnd; info->sctpi_unackdata = asoc->unack_data; info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); info->sctpi_instrms = asoc->c.sinit_max_instreams; info->sctpi_outstrms = asoc->c.sinit_num_ostreams; list_for_each(pos, &asoc->base.inqueue.in_chunk_list) info->sctpi_inqueue++; list_for_each(pos, &asoc->outqueue.out_chunk_list) info->sctpi_outqueue++; info->sctpi_overall_error = asoc->overall_error_count; info->sctpi_max_burst = asoc->max_burst; info->sctpi_maxseg = asoc->frag_point; info->sctpi_peer_rwnd = asoc->peer.rwnd; info->sctpi_peer_tag = asoc->c.peer_vtag; mask = asoc->peer.ecn_capable << 1; mask = (mask | asoc->peer.ipv4_address) << 1; mask = (mask | asoc->peer.ipv6_address) << 1; mask = (mask | asoc->peer.hostname_address) << 1; mask = (mask | asoc->peer.asconf_capable) << 1; mask = (mask | asoc->peer.prsctp_capable) << 1; mask = (mask | asoc->peer.auth_capable); info->sctpi_peer_capable = mask; mask = asoc->peer.sack_needed << 1; mask = (mask | asoc->peer.sack_generation) << 1; mask = (mask | asoc->peer.zero_window_announced); info->sctpi_peer_sack = mask; info->sctpi_isacks = asoc->stats.isacks; info->sctpi_osacks = asoc->stats.osacks; info->sctpi_opackets = asoc->stats.opackets; info->sctpi_ipackets = asoc->stats.ipackets; info->sctpi_rtxchunks = asoc->stats.rtxchunks; info->sctpi_outofseqtsns = asoc->stats.outofseqtsns; info->sctpi_idupchunks = asoc->stats.idupchunks; info->sctpi_gapcnt = asoc->stats.gapcnt; info->sctpi_ouodchunks = asoc->stats.ouodchunks; info->sctpi_iuodchunks = asoc->stats.iuodchunks; info->sctpi_oodchunks = asoc->stats.oodchunks; info->sctpi_iodchunks = asoc->stats.iodchunks; info->sctpi_octrlchunks = asoc->stats.octrlchunks; info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; prim = asoc->peer.primary_path; memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(struct sockaddr_storage)); info->sctpi_p_state = prim->state; info->sctpi_p_cwnd = prim->cwnd; info->sctpi_p_srtt = prim->srtt; info->sctpi_p_rto = jiffies_to_msecs(prim->rto); info->sctpi_p_hbinterval = prim->hbinterval; info->sctpi_p_pathmaxrxt = prim->pathmaxrxt; info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay); info->sctpi_p_ssthresh = prim->ssthresh; info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked; info->sctpi_p_flight_size = prim->flight_size; info->sctpi_p_error = prim->error_count; return 0; } EXPORT_SYMBOL_GPL(sctp_get_sctp_info); /* use callback to avoid exporting the core structure */ int sctp_transport_walk_start(struct rhashtable_iter *iter) { int err; rhltable_walk_enter(&sctp_transport_hashtable, iter); err = rhashtable_walk_start(iter); if (err && err != -EAGAIN) { rhashtable_walk_stop(iter); rhashtable_walk_exit(iter); return err; } return 0; } void sctp_transport_walk_stop(struct rhashtable_iter *iter) { rhashtable_walk_stop(iter); rhashtable_walk_exit(iter); } struct sctp_transport *sctp_transport_get_next(struct net *net, struct rhashtable_iter *iter) { struct sctp_transport *t; t = rhashtable_walk_next(iter); for (; t; t = rhashtable_walk_next(iter)) { if (IS_ERR(t)) { if (PTR_ERR(t) == -EAGAIN) continue; break; } if (net_eq(sock_net(t->asoc->base.sk), net) && t->asoc->peer.primary_path == t) break; } return t; } struct sctp_transport *sctp_transport_get_idx(struct net *net, struct rhashtable_iter *iter, int pos) { void *obj = SEQ_START_TOKEN; while (pos && (obj = sctp_transport_get_next(net, iter)) && !IS_ERR(obj)) pos--; return obj; } int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p) { int err = 0; int hash = 0; struct sctp_ep_common *epb; struct sctp_hashbucket *head; for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { read_lock(&head->lock); sctp_for_each_hentry(epb, &head->chain) { err = cb(sctp_ep(epb), p); if (err) break; } read_unlock(&head->lock); } return err; } EXPORT_SYMBOL_GPL(sctp_for_each_endpoint); int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, void *p) { struct sctp_transport *transport; int err; rcu_read_lock(); transport = sctp_addrs_lookup_transport(net, laddr, paddr); rcu_read_unlock(); if (!transport) return -ENOENT; err = cb(transport, p); sctp_transport_put(transport); return err; } EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), struct net *net, int pos, void *p) { struct rhashtable_iter hti; void *obj; int err; err = sctp_transport_walk_start(&hti); if (err) return err; sctp_transport_get_idx(net, &hti, pos); obj = sctp_transport_get_next(net, &hti); for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { struct sctp_transport *transport = obj; if (!sctp_transport_hold(transport)) continue; err = cb(transport, p); sctp_transport_put(transport); if (err) break; } sctp_transport_walk_stop(&hti); return err; } EXPORT_SYMBOL_GPL(sctp_for_each_transport); /* 7.2.1 Association Status (SCTP_STATUS) * Applications can retrieve current status information about an * association, including association state, peer receiver window size, * number of unacked data chunks, and number of data chunks pending * receipt. This information is read-only. */ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_status status; struct sctp_association *asoc = NULL; struct sctp_transport *transport; sctp_assoc_t associd; int retval = 0; if (len < sizeof(status)) { retval = -EINVAL; goto out; } len = sizeof(status); if (copy_from_user(&status, optval, len)) { retval = -EFAULT; goto out; } associd = status.sstat_assoc_id; asoc = sctp_id2assoc(sk, associd); if (!asoc) { retval = -EINVAL; goto out; } transport = asoc->peer.primary_path; status.sstat_assoc_id = sctp_assoc2id(asoc); status.sstat_state = sctp_assoc_to_state(asoc); status.sstat_rwnd = asoc->peer.rwnd; status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); status.sstat_instrms = asoc->c.sinit_max_instreams; status.sstat_outstrms = asoc->c.sinit_num_ostreams; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, transport->af_specific->sockaddr_len); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), (union sctp_addr *)&status.sstat_primary.spinfo_address); status.sstat_primary.spinfo_state = transport->state; status.sstat_primary.spinfo_cwnd = transport->cwnd; status.sstat_primary.spinfo_srtt = transport->srtt; status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); status.sstat_primary.spinfo_mtu = transport->pathmtu; if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) status.sstat_primary.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", __func__, len, status.sstat_state, status.sstat_rwnd, status.sstat_assoc_id); if (copy_to_user(optval, &status, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) * * Applications can retrieve information about a specific peer address * of an association, including its reachability state, congestion * window, and retransmission timer values. This information is * read-only. */ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrinfo pinfo; struct sctp_transport *transport; int retval = 0; if (len < sizeof(pinfo)) { retval = -EINVAL; goto out; } len = sizeof(pinfo); if (copy_from_user(&pinfo, optval, len)) { retval = -EFAULT; goto out; } transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, pinfo.spinfo_assoc_id); if (!transport) return -EINVAL; pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); pinfo.spinfo_state = transport->state; pinfo.spinfo_cwnd = transport->cwnd; pinfo.spinfo_srtt = transport->srtt; pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); pinfo.spinfo_mtu = transport->pathmtu; if (pinfo.spinfo_state == SCTP_UNKNOWN) pinfo.spinfo_state = SCTP_ACTIVE; if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &pinfo, len)) { retval = -EFAULT; goto out; } out: return retval; } /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) * * This option is a on/off flag. If enabled no SCTP message * fragmentation will be performed. Instead if a message being sent * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->disable_fragments == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) * * This socket option is used to specify various notifications and * ancillary data the user wishes to receive. */ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len == 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; } /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) * * This socket option is applicable to the UDP-style socket only. When * set it will cause associations that are idle for more than the * specified number of seconds to automatically close. An association * being idle is defined an association that has NOT sent or received * user data. The special value of '0' indicates that no automatic * close of any associations should be performed. The option expects an * integer defining the number of seconds of idle time before an * association is closed. */ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) { /* Applicable to UDP-style socket only */ if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) return -EFAULT; return 0; } /* Helper routine to branch off an association to a new socket. */ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) { struct sctp_association *asoc = sctp_id2assoc(sk, id); struct sctp_sock *sp = sctp_sk(sk); struct socket *sock; int err = 0; if (!asoc) return -EINVAL; /* An association cannot be branched off from an already peeled-off * socket, nor is this supported for tcp style sockets. */ if (!sctp_style(sk, UDP)) return -EINVAL; /* Create a new socket. */ err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); if (err < 0) return err; sctp_copy_sock(sock->sk, sk, asoc); /* Make peeled-off sockets more like 1-1 accepted sockets. * Set the daddr and initialize id to something more random */ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); /* Populate the fields of the newsk from the oldsk and migrate the * asoc to the newsk. */ sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); *sockp = sock; return err; } EXPORT_SYMBOL(sctp_do_peeloff); static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) { sctp_peeloff_arg_t peeloff; struct socket *newsock; struct file *newfile; int retval = 0; if (len < sizeof(sctp_peeloff_arg_t)) return -EINVAL; len = sizeof(sctp_peeloff_arg_t); if (copy_from_user(&peeloff, optval, len)) return -EFAULT; retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); if (retval < 0) goto out; /* Map the socket to an unused fd that can be returned to the user. */ retval = get_unused_fd_flags(0); if (retval < 0) { sock_release(newsock); goto out; } newfile = sock_alloc_file(newsock, 0, NULL); if (IS_ERR(newfile)) { put_unused_fd(retval); sock_release(newsock); return PTR_ERR(newfile); } pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, retval); /* Return the fd mapped to the new socket. */ if (put_user(len, optlen)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } peeloff.sd = retval; if (copy_to_user(optval, &peeloff, len)) { fput(newfile); put_unused_fd(retval); return -EFAULT; } fd_install(retval, newfile); out: return retval; } /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) * * Applications can enable or disable heartbeats for any peer address of * an association, modify an address's heartbeat interval, force a * heartbeat to be sent immediately, and adjust the address's maximum * number of retransmissions sent before an address is considered * unreachable. The following structure is used to access and modify an * address's parameters: * * struct sctp_paddrparams { * sctp_assoc_t spp_assoc_id; * struct sockaddr_storage spp_address; * uint32_t spp_hbinterval; * uint16_t spp_pathmaxrxt; * uint32_t spp_pathmtu; * uint32_t spp_sackdelay; * uint32_t spp_flags; * }; * * spp_assoc_id - (one-to-many style socket) This is filled in the * application, and identifies the association for * this query. * spp_address - This specifies which address is of interest. * spp_hbinterval - This contains the value of the heartbeat interval, * in milliseconds. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmaxrxt - This contains the maximum number of * retransmissions before this address shall be * considered unreachable. If a value of zero * is present in this field then no changes are to * be made to this parameter. * spp_pathmtu - When Path MTU discovery is disabled the value * specified here will be the "fixed" path mtu. * Note that if the spp_address field is empty * then all associations on this address will * have this fixed path mtu set upon them. * * spp_sackdelay - When delayed sack is enabled, this value specifies * the number of milliseconds that sacks will be delayed * for. This value will apply to all addresses of an * association if the spp_address field is empty. Note * also, that if delayed sack is enabled and this * value is set to 0, no change is made to the last * recorded delayed sack timer value. * * spp_flags - These flags are used to control various features * on an association. The flag field may contain * zero or more of the following options. * * SPP_HB_ENABLE - Enable heartbeats on the * specified address. Note that if the address * field is empty all addresses for the association * have heartbeats enabled upon them. * * SPP_HB_DISABLE - Disable heartbeats on the * speicifed address. Note that if the address * field is empty all addresses for the association * will have their heartbeats disabled. Note also * that SPP_HB_ENABLE and SPP_HB_DISABLE are * mutually exclusive, only one of these two should * be specified. Enabling both fields will have * undetermined results. * * SPP_HB_DEMAND - Request a user initiated heartbeat * to be made immediately. * * SPP_PMTUD_ENABLE - This field will enable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. * * SPP_PMTUD_DISABLE - This field will disable PMTU * discovery upon the specified address. Note that * if the address feild is empty then all addresses * on the association are effected. Not also that * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually * exclusive. Enabling both will have undetermined * results. * * SPP_SACKDELAY_ENABLE - Setting this flag turns * on delayed sack. The time specified in spp_sackdelay * is used to specify the sack delay for this address. Note * that if spp_address is empty then all addresses will * enable delayed sack and take on the sack delay * value specified in spp_sackdelay. * SPP_SACKDELAY_DISABLE - Setting this flag turns * off delayed sack. If the spp_address field is blank then * delayed sack is disabled for the entire association. Note * also that this field is mutually exclusive to * SPP_SACKDELAY_ENABLE, setting both will have undefined * results. */ static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_paddrparams)) return -EINVAL; len = sizeof(struct sctp_paddrparams); if (copy_from_user(&params, optval, len)) return -EFAULT; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) { trans = sctp_addr_id2transport(sk, &params.spp_address, params.spp_assoc_id); if (!trans) { pr_debug("%s: failed no transport\n", __func__); return -EINVAL; } } /* Get association, if assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.spp_assoc_id); if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { pr_debug("%s: failed no association\n", __func__); return -EINVAL; } if (trans) { /* Fetch transport values. */ params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); params.spp_pathmtu = trans->pathmtu; params.spp_pathmaxrxt = trans->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = trans->param_flags; } else if (asoc) { /* Fetch association values. */ params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); params.spp_pathmtu = asoc->pathmtu; params.spp_pathmaxrxt = asoc->pathmaxrxt; params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = asoc->param_flags; } else { /* Fetch socket values. */ params.spp_hbinterval = sp->hbinterval; params.spp_pathmtu = sp->pathmtu; params.spp_sackdelay = sp->sackdelay; params.spp_pathmaxrxt = sp->pathmaxrxt; /*draft-11 doesn't say what to return in spp_flags*/ params.spp_flags = sp->param_flags; } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) * * This option will effect the way delayed acks are performed. This * option allows you to get or set the delayed ack time, in * milliseconds. It also allows changing the delayed ack frequency. * Changing the frequency to 1 disables the delayed sack algorithm. If * the assoc_id is 0, then this sets or gets the endpoints default * values. If the assoc_id field is non-zero, then the set or get * effects the specified association for the one to many model (the * assoc_id field is ignored by the one to one model). Note that if * sack_delay or sack_freq are 0 when setting this option, then the * current values will remain unchanged. * * struct sctp_sack_info { * sctp_assoc_t sack_assoc_id; * uint32_t sack_delay; * uint32_t sack_freq; * }; * * sack_assoc_id - This parameter, indicates which association the user * is performing an action upon. Note that if this field's value is * zero then the endpoints default value is changed (effecting future * associations only). * * sack_delay - This parameter contains the number of milliseconds that * the user is requesting the delayed ACK timer be set to. Note that * this value is defined in the standard to be between 200 and 500 * milliseconds. * * sack_freq - This parameter contains the number of packets that must * be received before a sack is sent without waiting for the delay * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sack_info params; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); if (len >= sizeof(struct sctp_sack_info)) { len = sizeof(struct sctp_sack_info); if (copy_from_user(&params, optval, len)) return -EFAULT; } else if (len == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; /* Get association, if sack_assoc_id != 0 and the socket is a one * to many style socket, and an association was not found, then * the id was invalid. */ asoc = sctp_id2assoc(sk, params.sack_assoc_id); if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { /* Fetch association values. */ if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = jiffies_to_msecs( asoc->sackdelay); params.sack_freq = asoc->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } else { /* Fetch socket values. */ if (sp->param_flags & SPP_SACKDELAY_ENABLE) { params.sack_delay = sp->sackdelay; params.sack_freq = sp->sackfreq; } else { params.sack_delay = 0; params.sack_freq = 1; } } if (copy_to_user(optval, &params, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; return 0; } /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association * initialization. The option name argument to setsockopt() and getsockopt() * is SCTP_INITMSG. * * Setting initialization parameters is effective only on an unconnected * socket (for UDP-style sockets only future associations are effected * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len < sizeof(struct sctp_initmsg)) return -EINVAL; len = sizeof(struct sctp_initmsg); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; size_t space_left; int bytes_copied; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* For UDP-style sockets, id specifies the association to query. */ asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); list_for_each_entry(from, &asoc->peer.transport_addr_list, transports) { memcpy(&temp, &from->ipaddr, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) return -ENOMEM; if (copy_to_user(to, &temp, addrlen)) return -EFAULT; to += addrlen; cnt++; space_left -= addrlen; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) return -EFAULT; bytes_copied = ((char __user *)to) - optval; if (put_user(bytes_copied, optlen)) return -EFAULT; return 0; } static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, size_t space_left, int *bytes_copied) { struct sctp_sockaddr_entry *addr; union sctp_addr temp; int cnt = 0; int addrlen; struct net *net = sock_net(sk); rcu_read_lock(); list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { if (!addr->valid) continue; if ((PF_INET == sk->sk_family) && (AF_INET6 == addr->a.sa.sa_family)) continue; if ((PF_INET6 == sk->sk_family) && inet_v6_ipv6only(sk) && (AF_INET == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); if (!temp.v4.sin_port) temp.v4.sin_port = htons(port); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sctp_sk(sk), &temp); if (space_left < addrlen) { cnt = -ENOMEM; break; } memcpy(to, &temp, addrlen); to += addrlen; cnt++; space_left -= addrlen; *bytes_copied += addrlen; } rcu_read_unlock(); return cnt; } static int sctp_getsockopt_local_addrs(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_bind_addr *bp; struct sctp_association *asoc; int cnt = 0; struct sctp_getaddrs getaddrs; struct sctp_sockaddr_entry *addr; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; int err = 0; size_t space_left; int bytes_copied = 0; void *addrs; void *buf; if (len < sizeof(struct sctp_getaddrs)) return -EINVAL; if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) return -EFAULT; /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound * addresses are returned without regard to any particular * association. */ if (0 == getaddrs.assoc_id) { bp = &sctp_sk(sk)->ep->base.bind_addr; } else { asoc = sctp_id2assoc(sk, getaddrs.assoc_id); if (!asoc) return -EINVAL; bp = &asoc->base.bind_addr; } to = optval + offsetof(struct sctp_getaddrs, addrs); space_left = len - offsetof(struct sctp_getaddrs, addrs); addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); if (!addrs) return -ENOMEM; /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid * addresses from the global local address list. */ if (sctp_list_single_entry(&bp->address_list)) { addr = list_entry(bp->address_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(sk, &addr->a)) { cnt = sctp_copy_laddrs(sk, bp->port, addrs, space_left, &bytes_copied); if (cnt < 0) { err = cnt; goto out; } goto copy_getaddrs; } } buf = addrs; /* Protection on the bound address list is not needed since * in the socket option context we hold a socket lock and * thus the bound address list can't change. */ list_for_each_entry(addr, &bp->address_list, list) { memcpy(&temp, &addr->a, sizeof(temp)); addrlen = sctp_get_pf_specific(sk->sk_family) ->addr_to_user(sp, &temp); if (space_left < addrlen) { err = -ENOMEM; /*fixme: right error?*/ goto out; } memcpy(buf, &temp, addrlen); buf += addrlen; bytes_copied += addrlen; cnt++; space_left -= addrlen; } copy_getaddrs: if (copy_to_user(to, addrs, bytes_copied)) { err = -EFAULT; goto out; } if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { err = -EFAULT; goto out; } if (put_user(bytes_copied, optlen)) err = -EFAULT; out: kfree(addrs); return err; } /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as * the association primary. The enclosed address must be one of the * association peer's addresses. */ static int sctp_getsockopt_primary_addr(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prim prim; struct sctp_association *asoc; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(struct sctp_prim)) return -EINVAL; len = sizeof(struct sctp_prim); if (copy_from_user(&prim, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); if (!asoc) return -EINVAL; if (!asoc->peer.primary_path) return -ENOTCONN; memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, asoc->peer.primary_path->af_specific->sockaddr_len); sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, (union sctp_addr *)&prim.ssp_addr); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &prim, len)) return -EFAULT; return 0; } /* * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) * * Requests that the local endpoint set the specified Adaptation Layer * Indication parameter for all future INIT and INIT-ACK exchanges. */ static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_setadaptation adaptation; if (len < sizeof(struct sctp_setadaptation)) return -EINVAL; len = sizeof(struct sctp_setadaptation); adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &adaptation, len)) return -EFAULT; return 0; } /* * * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) * * Applications that wish to use the sendto() system call may wish to * specify a default set of parameters that would normally be supplied * through the inclusion of ancillary data. This socket option allows * such an application to set the default sctp_sndrcvinfo structure. * The application that wishes to use this socket option simply passes * in to this call the sctp_sndrcvinfo structure defined in Section * 5.2.2) The input parameters accepted by this call include * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, * sinfo_timetolive. The user must provide the sinfo_assoc_id field in * to this call if the caller is using the UDP model. * * For getsockopt, it get the default sctp_sndrcvinfo structure. */ static int sctp_getsockopt_default_send_param(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndrcvinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.sinfo_stream = asoc->default_stream; info.sinfo_flags = asoc->default_flags; info.sinfo_ppid = asoc->default_ppid; info.sinfo_context = asoc->default_context; info.sinfo_timetolive = asoc->default_timetolive; } else { info.sinfo_stream = sp->default_stream; info.sinfo_flags = sp->default_flags; info.sinfo_ppid = sp->default_ppid; info.sinfo_context = sp->default_context; info.sinfo_timetolive = sp->default_timetolive; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* RFC6458, Section 8.1.31. Set/get Default Send Parameters * (SCTP_DEFAULT_SNDINFO) */ static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_sndinfo info; if (len < sizeof(info)) return -EINVAL; len = sizeof(info); if (copy_from_user(&info, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, info.snd_assoc_id); if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { info.snd_sid = asoc->default_stream; info.snd_flags = asoc->default_flags; info.snd_ppid = asoc->default_ppid; info.snd_context = asoc->default_context; } else { info.snd_sid = sp->default_stream; info.snd_flags = sp->default_flags; info.snd_ppid = sp->default_ppid; info.snd_context = sp->default_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &info, len)) return -EFAULT; return 0; } /* * * 7.1.5 SCTP_NODELAY * * Turn on/off any Nagle-like algorithm. This means that packets are * generally sent as soon as possible and no unnecessary delays are * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ static int sctp_getsockopt_nodelay(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = (sctp_sk(sk)->nodelay == 1); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * * 7.1.1 SCTP_RTOINFO * * The protocol parameters used to initialize and bound retransmission * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access * and modify these parameters. * All parameters are time values, in milliseconds. A value of 0, when * modifying the parameters, indicates that the current value should not * be changed. * */ static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; if (len < sizeof (struct sctp_rtoinfo)) return -EINVAL; len = sizeof(struct sctp_rtoinfo); if (copy_from_user(&rtoinfo, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values corresponding to the specific association. */ if (asoc) { rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); } else { /* Values corresponding to the endpoint. */ struct sctp_sock *sp = sctp_sk(sk); rtoinfo.srto_initial = sp->rtoinfo.srto_initial; rtoinfo.srto_max = sp->rtoinfo.srto_max; rtoinfo.srto_min = sp->rtoinfo.srto_min; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &rtoinfo, len)) return -EFAULT; return 0; } /* * * 7.1.2 SCTP_ASSOCINFO * * This option is used to tune the maximum retransmission attempts * of the association. * Returns an error if the new association retransmission value is * greater than the sum of the retransmission value of the peer. * See [SCTP] for more information. * */ static int sctp_getsockopt_associnfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assocparams assocparams; struct sctp_association *asoc; struct list_head *pos; int cnt = 0; if (len < sizeof (struct sctp_assocparams)) return -EINVAL; len = sizeof(struct sctp_assocparams); if (copy_from_user(&assocparams, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) return -EINVAL; /* Values correspoinding to the specific association */ if (asoc) { assocparams.sasoc_asocmaxrxt = asoc->max_retrans; assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; assocparams.sasoc_local_rwnd = asoc->a_rwnd; assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); list_for_each(pos, &asoc->peer.transport_addr_list) { cnt++; } assocparams.sasoc_number_peer_destinations = cnt; } else { /* Values corresponding to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; assocparams.sasoc_cookie_life = sp->assocparams.sasoc_cookie_life; assocparams.sasoc_number_peer_destinations = sp->assocparams. sasoc_number_peer_destinations; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &assocparams, len)) return -EFAULT; return 0; } /* * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) * * This socket option is a boolean flag which turns on or off mapped V4 * addresses. If this option is turned on and the socket is type * PF_INET6, then IPv4 addresses will be mapped to V6 representation. * If this option is turned off, then no mapping will be done of V4 * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ static int sctp_getsockopt_mappedv4(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; struct sctp_sock *sp = sctp_sk(sk); if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sp->v4mapped; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.29. Set or Get the default context (SCTP_CONTEXT) * (chapter and verse is quoted at sctp_setsockopt_context()) */ static int sctp_getsockopt_context(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len < sizeof(struct sctp_assoc_value)) return -EINVAL; len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->default_rcv_context; } else { params.assoc_value = sp->default_rcv_context; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &params, len)) return -EFAULT; return 0; } /* * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) * This option will get or set the maximum size to put in any outgoing * SCTP DATA chunk. If a message is larger than this size it will be * fragmented by SCTP into the specified size. Note that the underlying * SCTP implementation may fragment into smaller sized chunks when the * PMTU of the underlying association is smaller than the value set by * the user. The default value for this option is '0' which indicates * the user is NOT limiting fragmentation and only the PMTU will effect * SCTP's choice of DATA chunk size. Note also that values set larger * than the maximum size of an IP datagram will effectively let SCTP * control fragmentation (i.e. the same as setting this option to 0). * * The following structure is used to access and modify this parameter: * * struct sctp_assoc_value { * sctp_assoc_t assoc_id; * uint32_t assoc_value; * }; * * assoc_id: This parameter is ignored for one-to-one style sockets. * For one-to-many style sockets this parameter indicates which * association the user is performing an action upon. Note that if * this field's value is zero then the endpoints default value is * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ static int sctp_getsockopt_maxseg(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, sizeof(params))) return -EFAULT; } else return -EINVAL; asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc && params.assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) params.assoc_value = asoc->frag_point; else params.assoc_value = sctp_sk(sk)->user_frag; if (put_user(len, optlen)) return -EFAULT; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } /* * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) */ static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = sctp_sk(sk)->frag_interleave; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.25. Set or Get the sctp partial delivery point * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) */ static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, char __user *optval, int __user *optlen) { u32 val; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); val = sctp_sk(sk)->pd_point; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) * (chapter and verse is quoted at sctp_setsockopt_maxburst()) */ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (len == sizeof(int)) { pr_warn_ratelimited(DEPRECATED "%s (pid %d) " "Use of int in max_burst socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); params.assoc_id = 0; } else if (len >= sizeof(struct sctp_assoc_value)) { len = sizeof(struct sctp_assoc_value); if (copy_from_user(&params, optval, len)) return -EFAULT; } else return -EINVAL; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; params.assoc_value = asoc->max_burst; } else params.assoc_value = sp->max_burst; if (len == sizeof(int)) { if (copy_to_user(optval, &params.assoc_value, len)) return -EFAULT; } else { if (copy_to_user(optval, &params, len)) return -EFAULT; } return 0; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents; int i; if (!ep->auth_enable) return -EACCES; hmacs = ep->auth_hmacs_list; data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; len = sizeof(struct sctp_hmacalgo) + data_len; num_idents = data_len / sizeof(u16); if (put_user(len, optlen)) return -EFAULT; if (put_user(num_idents, &p->shmac_num_idents)) return -EFAULT; for (i = 0; i < num_idents; i++) { __u16 hmacid = ntohs(hmacs->hmac_ids[i]); if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) return -EFAULT; } return 0; } static int sctp_getsockopt_active_key(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authkeyid val; struct sctp_association *asoc; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) return -EFAULT; asoc = sctp_id2assoc(sk, val.scact_assoc_id); if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) val.scact_keynumber = asoc->active_key_id; else val.scact_keynumber = ep->active_key_id; len = sizeof(struct sctp_authkeyid); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc) return -EINVAL; ch = asoc->peer.peer_chunks; if (!ch) goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_authchunks __user *p = (void __user *)optval; struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; u32 num_chunks = 0; char __user *to; if (!ep->auth_enable) return -EACCES; if (len < sizeof(struct sctp_authchunks)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; asoc = sctp_id2assoc(sk, val.gauth_assoc_id); if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) return -EINVAL; if (asoc) ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; else ch = ep->auth_chunk_list; if (!ch) goto num; num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; num: len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; return 0; } /* * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) * This option gets the current number of associations that are attached * to a one-to-many style socket. The option value is an uint32_t. */ static int sctp_getsockopt_assoc_number(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; u32 val = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(u32)) return -EINVAL; len = sizeof(u32); list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { val++; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.1.23 SCTP_AUTO_ASCONF * See the corresponding setsockopt entry as description */ static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * 8.2.6. Get the Current Identifiers of Associations * (SCTP_GET_ASSOC_ID_LIST) * * This option gets the current list of SCTP association identifiers of * the SCTP associations handled by a one-to-many style socket. */ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; struct sctp_assoc_ids *ids; u32 num = 0; if (sctp_style(sk, TCP)) return -EOPNOTSUPP; if (len < sizeof(struct sctp_assoc_ids)) return -EINVAL; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { num++; } if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) return -EINVAL; len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; ids = kmalloc(len, GFP_USER | __GFP_NOWARN); if (unlikely(!ids)) return -ENOMEM; ids->gaids_number_of_ids = num; num = 0; list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { ids->gaids_assoc_id[num++] = asoc->assoc_id; } if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { kfree(ids); return -EFAULT; } kfree(ids); return 0; } /* * SCTP_PEER_ADDR_THLDS * * This option allows us to fetch the partially failed threshold for one or all * transports in an association. See Section 6.1 of: * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_getsockopt_paddr_thresholds(struct sock *sk, char __user *optval, int len, int __user *optlen) { struct sctp_paddrthlds val; struct sctp_transport *trans; struct sctp_association *asoc; if (len < sizeof(struct sctp_paddrthlds)) return -EINVAL; len = sizeof(struct sctp_paddrthlds); if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) return -EFAULT; if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { asoc = sctp_id2assoc(sk, val.spt_assoc_id); if (!asoc) return -ENOENT; val.spt_pathpfthld = asoc->pf_retrans; val.spt_pathmaxrxt = asoc->pathmaxrxt; } else { trans = sctp_addr_id2transport(sk, &val.spt_address, val.spt_assoc_id); if (!trans) return -ENOENT; val.spt_pathmaxrxt = trans->pathmaxrxt; val.spt_pathpfthld = trans->pf_retrans; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* * SCTP_GET_ASSOC_STATS * * This option retrieves local per endpoint statistics. It is modeled * after OpenSolaris' implementation */ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_stats sas; struct sctp_association *asoc = NULL; /* User must provide at least the assoc id */ if (len < sizeof(sctp_assoc_t)) return -EINVAL; /* Allow the struct to grow and fill in as much as possible */ len = min_t(size_t, len, sizeof(sas)); if (copy_from_user(&sas, optval, len)) return -EFAULT; asoc = sctp_id2assoc(sk, sas.sas_assoc_id); if (!asoc) return -EINVAL; sas.sas_rtxchunks = asoc->stats.rtxchunks; sas.sas_gapcnt = asoc->stats.gapcnt; sas.sas_outofseqtsns = asoc->stats.outofseqtsns; sas.sas_osacks = asoc->stats.osacks; sas.sas_isacks = asoc->stats.isacks; sas.sas_octrlchunks = asoc->stats.octrlchunks; sas.sas_ictrlchunks = asoc->stats.ictrlchunks; sas.sas_oodchunks = asoc->stats.oodchunks; sas.sas_iodchunks = asoc->stats.iodchunks; sas.sas_ouodchunks = asoc->stats.ouodchunks; sas.sas_iuodchunks = asoc->stats.iuodchunks; sas.sas_idupchunks = asoc->stats.idupchunks; sas.sas_opackets = asoc->stats.opackets; sas.sas_ipackets = asoc->stats.ipackets; /* New high max rto observed, will return 0 if not a single * RTO update took place. obs_rto_ipaddr will be bogus * in such a case */ sas.sas_maxrto = asoc->stats.max_obs_rto; memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, sizeof(struct sockaddr_storage)); /* Mark beginning of a new observation period */ asoc->stats.max_obs_rto = asoc->rto_min; if (put_user(len, optlen)) return -EFAULT; pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); if (copy_to_user(optval, &sas, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvrcvinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { int val = 0; if (len < sizeof(int)) return -EINVAL; len = sizeof(int); if (sctp_sk(sk)->recvnxtinfo) val = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } static int sctp_getsockopt_pr_supported(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(params)) { retval = -EINVAL; goto out; } len = sizeof(params); if (copy_from_user(&params, optval, len)) goto out; asoc = sctp_id2assoc(sk, params.assoc_id); if (asoc) { params.assoc_value = asoc->prsctp_enable; } else if (!params.assoc_id) { struct sctp_sock *sp = sctp_sk(sk); params.assoc_value = sp->ep->prsctp_enable; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &params, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt_default_prinfo(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EFAULT; if (len < sizeof(info)) { retval = -EINVAL; goto out; } len = sizeof(info); if (copy_from_user(&info, optval, len)) goto out; asoc = sctp_id2assoc(sk, info.pr_assoc_id); if (asoc) { info.pr_policy = SCTP_PR_POLICY(asoc->default_flags); info.pr_value = asoc->default_timetolive; } else if (!info.pr_assoc_id) { struct sctp_sock *sp = sctp_sk(sk); info.pr_policy = SCTP_PR_POLICY(sp->default_flags); info.pr_value = sp->default_timetolive; } else { retval = -EINVAL; goto out; } if (put_user(len, optlen)) goto out; if (copy_to_user(optval, &info, len)) goto out; retval = 0; out: return retval; } static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, char __user *optval, int __user *optlen) { struct sctp_prstatus params; struct sctp_association *asoc; int policy; int retval = -EINVAL; if (len < sizeof(params)) goto out; len = sizeof(params); if (copy_from_user(&params, optval, len)) { retval = -EFAULT; goto out; } policy = params.sprstat_policy; if (policy & ~SCTP_PR_SCTP_MASK) goto out; asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); if (!asoc) goto out; if (policy == SCTP_PR_SCTP_NONE) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0; for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { params.sprstat_abandoned_unsent += asoc->abandoned_unsent[policy]; params.sprstat_abandoned_sent += asoc->abandoned_sent[policy]; } } else { params.sprstat_abandoned_unsent = asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)]; params.sprstat_abandoned_sent = asoc->abandoned_sent[__SCTP_PR_INDEX(policy)]; } if (put_user(len, optlen)) { retval = -EFAULT; goto out; } if (copy_to_user(optval, &params, len)) { retval = -EFAULT; goto out; } retval = 0; out: return retval; } static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int retval = 0; int len; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); /* I can hardly begin to describe how wrong this is. This is * so broken as to be worse than useless. The API draft * REALLY is NOT helpful here... I am not convinced that the * semantics of getsockopt() with a level OTHER THAN SOL_SCTP * are at all well-founded. */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; retval = af->getsockopt(sk, level, optname, optval, optlen); return retval; } if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; lock_sock(sk); switch (optname) { case SCTP_STATUS: retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); break; case SCTP_DISABLE_FRAGMENTS: retval = sctp_getsockopt_disable_fragments(sk, len, optval, optlen); break; case SCTP_EVENTS: retval = sctp_getsockopt_events(sk, len, optval, optlen); break; case SCTP_AUTOCLOSE: retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); break; case SCTP_SOCKOPT_PEELOFF: retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_PARAMS: retval = sctp_getsockopt_peer_addr_params(sk, len, optval, optlen); break; case SCTP_DELAYED_SACK: retval = sctp_getsockopt_delayed_ack(sk, len, optval, optlen); break; case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: retval = sctp_getsockopt_peer_addrs(sk, len, optval, optlen); break; case SCTP_GET_LOCAL_ADDRS: retval = sctp_getsockopt_local_addrs(sk, len, optval, optlen); break; case SCTP_SOCKOPT_CONNECTX3: retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_getsockopt_default_send_param(sk, len, optval, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_getsockopt_default_sndinfo(sk, len, optval, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); break; case SCTP_NODELAY: retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); break; case SCTP_RTOINFO: retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); break; case SCTP_ASSOCINFO: retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); break; case SCTP_MAXSEG: retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDR_INFO: retval = sctp_getsockopt_peer_addr_info(sk, len, optval, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_getsockopt_adaptation_layer(sk, len, optval, optlen); break; case SCTP_CONTEXT: retval = sctp_getsockopt_context(sk, len, optval, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_getsockopt_fragment_interleave(sk, len, optval, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, optlen); break; case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; case SCTP_AUTH_KEY: case SCTP_AUTH_CHUNK: case SCTP_AUTH_DELETE_KEY: retval = -EOPNOTSUPP; break; case SCTP_HMAC_IDENT: retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_getsockopt_active_key(sk, len, optval, optlen); break; case SCTP_PEER_AUTH_CHUNKS: retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, optlen); break; case SCTP_LOCAL_AUTH_CHUNKS: retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_NUMBER: retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); break; case SCTP_GET_ASSOC_ID_LIST: retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); break; case SCTP_GET_ASSOC_STATS: retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); break; case SCTP_RECVRCVINFO: retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); break; case SCTP_PR_SUPPORTED: retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen); break; case SCTP_DEFAULT_PRINFO: retval = sctp_getsockopt_default_prinfo(sk, len, optval, optlen); break; case SCTP_PR_ASSOC_STATUS: retval = sctp_getsockopt_pr_assocstatus(sk, len, optval, optlen); break; default: retval = -ENOPROTOOPT; break; } release_sock(sk); return retval; } static int sctp_hash(struct sock *sk) { /* STUB */ return 0; } static void sctp_unhash(struct sock *sk) { /* STUB */ } /* Check if port is acceptable. Possibly find first available port. * * The port hash table (contained in the 'global' SCTP protocol storage * returned by struct sctp_protocol *sctp_get_protocol()). The hash * table is an array of 4096 lists (sctp_bind_hashbucket). Each * list (the list number is the port number hashed out, so as you * would expect from a hash function, all the ports in a given list have * such a number that hashes out to the same list number; you were * expecting that, right?); so each list has a set of ports, with a * link to the socket (struct sock) that uses it, the port number and * a fastreuse flag (FIXME: NPI ipg). */ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) { struct sctp_bind_hashbucket *head; /* hash list */ struct sctp_bind_bucket *pp; unsigned short snum; int ret; snum = ntohs(addr->v4.sin_port); pr_debug("%s: begins, snum:%d\n", __func__, snum); local_bh_disable(); if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; unsigned int rover; struct net *net = sock_net(sk); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; do { rover++; if ((rover < low) || (rover > high)) rover = low; if (inet_is_local_reserved_port(net, rover)) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: spin_unlock(&head->lock); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) goto fail; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's * mutex. */ snum = rover; } else { /* We are given an specific port number; we verify * that it is not being used. If it is used, we will * exahust the search in the hash list corresponding * to the port number (snum) - we detect that with the * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; spin_lock(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; } } pp = NULL; goto pp_not_found; pp_found: if (!hlist_empty(&pp->owner)) { /* We had a port hash table hit - there is an * available port (pp != NULL) and it is being * used by other socket (pp->owner not empty); that other * socket is going to be sk2. */ int reuse = sk->sk_reuse; struct sock *sk2; pr_debug("%s: found a possible match\n", __func__); if (pp->fastreuse && sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) goto success; /* Run through the list of sockets bound to the port * (pp->port) [via the pointers bind_next and * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, * we get the endpoint they describe and run through * the endpoint's list of IP (v4 or v6) addresses, * comparing each of the addresses with the address of * the socket sk. If we find a match, then that means * that this port/socket (sk) combination are already * in an endpoint. */ sk_for_each_bound(sk2, &pp->owner) { struct sctp_endpoint *ep2; ep2 = sctp_sk(sk2)->ep; if (sk == sk2 || (reuse && sk2->sk_reuse && sk2->sk_state != SCTP_SS_LISTENING)) continue; if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, sctp_sk(sk2), sctp_sk(sk))) { ret = (long)sk2; goto fail_unlock; } } pr_debug("%s: found a match\n", __func__); } pp_not_found: /* If there was a hash table miss, create a new port. */ ret = 1; if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) goto fail_unlock; /* In either case (hit or miss), make sure fastreuse is 1 only * if sk->sk_reuse is too (that is, if the caller requested * SO_REUSEADDR on this socket -sk-). */ if (hlist_empty(&pp->owner)) { if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) pp->fastreuse = 1; else pp->fastreuse = 0; } else if (pp->fastreuse && (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) pp->fastreuse = 0; /* We are set, so fill up all the data in the hash table * entry, tie the socket list information with the rest of the * sockets FIXME: Blurry, NPI (ipg). */ success: if (!sctp_sk(sk)->bind_hash) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &pp->owner); sctp_sk(sk)->bind_hash = pp; } ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral * port is requested. */ static int sctp_get_port(struct sock *sk, unsigned short snum) { union sctp_addr addr; struct sctp_af *af = sctp_sk(sk)->pf->af; /* Set up a dummy address struct from the sk. */ af->from_sk(&addr, sk); addr.v4.sin_port = htons(snum); /* Note: sk->sk_num gets filled in if ephemeral port request. */ return !!sctp_get_port_local(sk, &addr); } /* * Move a socket to LISTENING state. */ static int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; struct crypto_shash *tfm = NULL; char alg[32]; /* Allocate HMAC for generating cookie. */ if (!sp->hmac && sp->sctp_hmac_alg) { sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); tfm = crypto_alloc_shash(alg, 0, 0); if (IS_ERR(tfm)) { net_info_ratelimited("failed to load transform for %s: %ld\n", sp->sctp_hmac_alg, PTR_ERR(tfm)); return -ENOSYS; } sctp_sk(sk)->hmac = tfm; } /* * If a bind() or sctp_bindx() is not called prior to a listen() * call that allows new associations to be accepted, the system * picks an ephemeral port and will choose an address set equivalent * to binding with a wildcard address. * * This is not currently spelled out in the SCTP sockets * extensions draft, but follows the practice as seen in TCP * sockets. * */ sk->sk_state = SCTP_SS_LISTENING; if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) return -EAGAIN; } else { if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { sk->sk_state = SCTP_SS_CLOSED; return -EADDRINUSE; } } sk->sk_max_ack_backlog = backlog; sctp_hash_endpoint(ep); return 0; } /* * 4.1.3 / 5.1.3 listen() * * By default, new associations are not accepted for UDP style sockets. * An application uses listen() to mark a socket as being able to * accept new associations. * * On TCP style sockets, applications use listen() to ready the SCTP * endpoint for accepting inbound associations. * * On both types of endpoints a backlog of '0' disables listening. * * Move a socket to LISTENING state. */ int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct sctp_endpoint *ep = sctp_sk(sk)->ep; int err = -EINVAL; if (unlikely(backlog < 0)) return err; lock_sock(sk); /* Peeled-off sockets are not allowed to listen(). */ if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) goto out; if (sock->state != SS_UNCONNECTED) goto out; /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) goto out; err = 0; sctp_unhash_endpoint(ep); sk->sk_state = SCTP_SS_CLOSED; if (sk->sk_reuse) sctp_sk(sk)->bind_hash->fastreuse = 1; goto out; } /* If we are already listening, just update the backlog */ if (sctp_sstate(sk, LISTENING)) sk->sk_max_ack_backlog = backlog; else { err = sctp_listen_start(sk, backlog); if (err) goto out; } err = 0; out: release_sock(sk); return err; } /* * This function is done by modeling the current datagram_poll() and the * tcp_poll(). Note that, based on these implementations, we don't * lock the socket in this function, even though it seems that, * ideally, locking or some other mechanisms can be used to ensure * the integrity of the counters (sndbuf and wmem_alloc) used * in this place. We assume that we don't need locks either until proven * otherwise. * * Another thing to note is that we include the Async I/O support * here, again, by modeling the current TCP/UDP code. We don't have * a good way to test with it yet. */ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct sctp_sock *sp = sctp_sk(sk); unsigned int mask; poll_wait(file, sk_sleep(sk), wait); sock_rps_record_flow(sk); /* A TCP-style listening socket becomes readable when the accept queue * is not empty. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) return (!list_empty(&sp->ep->asocs)) ? (POLLIN | POLLRDNORM) : 0; mask = 0; /* Is there any exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; /* The association is either gone or not ready. */ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) return mask; /* Is it writable? */ if (sctp_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); /* * Since the socket is not locked, the buffer * might be made available after the writeable check and * before the bit is set. This could cause a lost I/O * signal. tcp_poll() has a race breaker for this race * condition. Based on their implementation, we put * in the following code to cover it as well. */ if (sctp_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } return mask; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ static struct sctp_bind_bucket *sctp_bucket_create( struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) { struct sctp_bind_bucket *pp; pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); if (pp) { SCTP_DBG_OBJCNT_INC(bind_bucket); pp->port = snum; pp->fastreuse = 0; INIT_HLIST_HEAD(&pp->owner); pp->net = net; hlist_add_head(&pp->node, &head->chain); } return pp; } /* Caller must hold hashbucket lock for this tb with local BH disabled */ static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) { if (pp && hlist_empty(&pp->owner)) { __hlist_del(&pp->node); kmem_cache_free(sctp_bucket_cachep, pp); SCTP_DBG_OBJCNT_DEC(bind_bucket); } } /* Release this socket's reference to a local port. */ static inline void __sctp_put_port(struct sock *sk) { struct sctp_bind_hashbucket *head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), inet_sk(sk)->inet_num)]; struct sctp_bind_bucket *pp; spin_lock(&head->lock); pp = sctp_sk(sk)->bind_hash; __sk_del_bind_node(sk); sctp_sk(sk)->bind_hash = NULL; inet_sk(sk)->inet_num = 0; sctp_bucket_destroy(pp); spin_unlock(&head->lock); } void sctp_put_port(struct sock *sk) { local_bh_disable(); __sctp_put_port(sk); local_bh_enable(); } /* * The system picks an ephemeral port and choose an address set equivalent * to binding with a wildcard address. * One of those addresses will be the primary address for the association. * This automatically enables the multihoming capability of SCTP. */ static int sctp_autobind(struct sock *sk) { union sctp_addr autoaddr; struct sctp_af *af; __be16 port; /* Initialize a local sockaddr structure to INADDR_ANY. */ af = sctp_sk(sk)->pf->af; port = htons(inet_sk(sk)->inet_num); af->inaddr_any(&autoaddr, port); return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); } /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. * * From RFC 2292 * 4.2 The cmsghdr Structure * * * When ancillary data is sent or received, any number of ancillary data * objects can be specified by the msg_control and msg_controllen members of * the msghdr structure, because each object is preceded by * a cmsghdr structure defining the object's length (the cmsg_len member). * Historically Berkeley-derived implementations have passed only one object * at a time, but this API allows multiple objects to be * passed in a single call to sendmsg() or recvmsg(). The following example * shows two ancillary data objects in a control buffer. * * |<--------------------------- msg_controllen -------------------------->| * | | * * |<----- ancillary data object ----->|<----- ancillary data object ----->| * * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| * | | | * * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | * * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | * | | | | | * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| * * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| * * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ * ^ * | * * msg_control * points here */ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) { struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; for_each_cmsghdr(cmsg, my_msg) { if (!CMSG_OK(my_msg, cmsg)) return -EINVAL; /* Should we parse this header or ignore? */ if (cmsg->cmsg_level != IPPROTO_SCTP) continue; /* Strictly check lengths following example in SCM code. */ switch (cmsg->cmsg_type) { case SCTP_INIT: /* SCTP Socket API Extension * 5.3.1 SCTP Initiation Structure (SCTP_INIT) * * This cmsghdr structure provides information for * initializing new SCTP associations with sendmsg(). * The SCTP_INITMSG socket option uses this same data * structure. This structure is not used for * recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) return -EINVAL; cmsgs->init = CMSG_DATA(cmsg); break; case SCTP_SNDRCV: /* SCTP Socket API Extension * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) * * This cmsghdr structure specifies SCTP options for * sendmsg() and describes SCTP header information * about a received message through recvmsg(). * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ ---------------------- * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) return -EINVAL; cmsgs->srinfo = CMSG_DATA(cmsg); if (cmsgs->srinfo->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; case SCTP_SNDINFO: /* SCTP Socket API Extension * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) * * This cmsghdr structure specifies SCTP options for * sendmsg(). This structure and SCTP_RCVINFO replaces * SCTP_SNDRCV which has been deprecated. * * cmsg_level cmsg_type cmsg_data[] * ------------ ------------ --------------------- * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo */ if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) return -EINVAL; cmsgs->sinfo = CMSG_DATA(cmsg); if (cmsgs->sinfo->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; default: return -EINVAL; } } return 0; } /* * Wait for a packet.. * Note: This function is the same function as in core/datagram.c * with a few modifications to make lksctp work. */ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) { int error; DEFINE_WAIT(wait); prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); /* Socket errors? */ error = sock_error(sk); if (error) goto out; if (!skb_queue_empty(&sk->sk_receive_queue)) goto ready; /* Socket shut down? */ if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; /* Sequenced packets can come disconnected. If so we report the * problem. */ error = -ENOTCONN; /* Is there a good reason to think that we may receive some data? */ if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) goto out; /* Handle signals. */ if (signal_pending(current)) goto interrupted; /* Let another process have a go. Since we are going to sleep * anyway. Note: This may cause odd behaviors if the message * does not fit in the user's buffer, but this seems to be the * only way to honor MSG_DONTWAIT realistically. */ release_sock(sk); *timeo_p = schedule_timeout(*timeo_p); lock_sock(sk); ready: finish_wait(sk_sleep(sk), &wait); return 0; interrupted: error = sock_intr_errno(*timeo_p); out: finish_wait(sk_sleep(sk), &wait); *err = error; return error; } /* Receive a datagram. * Note: This is pretty much the same routine as in core/datagram.c * with a few changes to make lksctp work. */ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, int noblock, int *err) { int error; struct sk_buff *skb; long timeo; timeo = sock_rcvtimeo(sk, noblock); pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, MAX_SCHEDULE_TIMEOUT); do { /* Again only user level code calls this function, * so nothing interrupt level * will suddenly eat the receive_queue. * * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ if (flags & MSG_PEEK) { skb = skb_peek(&sk->sk_receive_queue); if (skb) atomic_inc(&skb->users); } else { skb = __skb_dequeue(&sk->sk_receive_queue); } if (skb) return skb; /* Caller is allowed not to check sk->sk_err before calling. */ error = sock_error(sk); if (error) goto no_packet; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk_can_busy_loop(sk) && sk_busy_loop(sk, noblock)) continue; /* User doesn't want to wait. */ error = -EAGAIN; if (!timeo) goto no_packet; } while (sctp_wait_for_packet(sk, err, &timeo) == 0); return NULL; no_packet: *err = error; return NULL; } /* If sndbuf has changed, wake up per association sndbuf waiters. */ static void __sctp_write_space(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; if (sctp_wspace(asoc) <= 0) return; if (waitqueue_active(&asoc->wait)) wake_up_interruptible(&asoc->wait); if (sctp_writeable(sk)) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq) { if (waitqueue_active(&wq->wait)) wake_up_interruptible(&wq->wait); /* Note that we try to include the Async I/O support * here by modeling from the current TCP/UDP code. * We have not tested with it yet. */ if (!(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } } static void sctp_wake_up_waiters(struct sock *sk, struct sctp_association *asoc) { struct sctp_association *tmp = asoc; /* We do accounting for the sndbuf space per association, * so we only need to wake our own association. */ if (asoc->ep->sndbuf_policy) return __sctp_write_space(asoc); /* If association goes down and is just flushing its * outq, then just normally notify others. */ if (asoc->base.dead) return sctp_write_space(sk); /* Accounting for the sndbuf space is per socket, so we * need to wake up others, try to be fair and in case of * other associations, let them have a go first instead * of just doing a sctp_write_space() call. * * Note that we reach sctp_wake_up_waiters() only when * associations free up queued chunks, thus we are under * lock and the list of associations on a socket is * guaranteed not to change. */ for (tmp = list_next_entry(tmp, asocs); 1; tmp = list_next_entry(tmp, asocs)) { /* Manually skip the head element. */ if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) continue; /* Wake up association. */ __sctp_write_space(tmp); /* We've reached the end. */ if (tmp == asoc) break; } } /* Do accounting for the sndbuf space. * Decrement the used sndbuf space of the corresponding association by the * data size which was just transmitted(freed). */ static void sctp_wfree(struct sk_buff *skb) { struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; struct sctp_association *asoc = chunk->asoc; struct sock *sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); /* * This undoes what is done via sctp_set_owner_w and sk_mem_charge */ sk->sk_wmem_queued -= skb->truesize; sk_mem_uncharge(sk, skb->truesize); sock_wfree(skb); sctp_wake_up_waiters(sk, asoc); sctp_association_put(asoc); } /* Do accounting for the receive space on the socket. * Accounting for the association is done in ulpevent.c * We set this as a destructor for the cloned data skbs so that * accounting is done at the correct time. */ void sctp_sock_rfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); /* * Mimic the behavior of sock_rfree */ sk_mem_uncharge(sk, event->rmem_len); } /* Helper function to wait for space in the sndbuf. */ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); /* Increment the association's refcnt. */ sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (msg_len <= sctp_wspace(asoc)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); BUG_ON(sk != asoc->base.sk); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: err = -EPIPE; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EAGAIN; goto out; } void sctp_data_ready(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLRDNORM | POLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } /* If socket sndbuf has changed, wake up all per association waiters. */ void sctp_write_space(struct sock *sk) { struct sctp_association *asoc; /* Wake up the tasks in each wait queue. */ list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { __sctp_write_space(asoc); } } /* Is there any sndbuf space available on the socket? * * Note that sk_wmem_alloc is the sum of the send buffers on all of the * associations on the same socket. For a UDP-style socket with * multiple associations, it is possible for it to be "unwriteable" * prematurely. I assume that this is acceptable because * a premature "unwriteable" is better than an accidental "writeable" which * would cause an unwanted block under certain circumstances. For the 1-1 * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ static int sctp_writeable(struct sock *sk) { int amt = 0; amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amt < 0) amt = 0; return amt; } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, * returns immediately with EINPROGRESS. */ static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) { struct sock *sk = asoc->base.sk; int err = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); /* Increment the association's refcnt. */ sctp_association_hold(asoc); for (;;) { prepare_to_wait_exclusive(&asoc->wait, &wait, TASK_INTERRUPTIBLE); if (!*timeo_p) goto do_nonblock; if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || asoc->base.dead) goto do_error; if (signal_pending(current)) goto do_interrupted; if (sctp_state(asoc, ESTABLISHED)) break; /* Let another process have a go. Since we are going * to sleep anyway. */ release_sock(sk); current_timeo = schedule_timeout(current_timeo); lock_sock(sk); *timeo_p = current_timeo; } out: finish_wait(&asoc->wait, &wait); /* Release the association's refcnt. */ sctp_association_put(asoc); return err; do_error: if (asoc->init_err_counter + 1 > asoc->max_init_attempts) err = -ETIMEDOUT; else err = -ECONNREFUSED; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; do_nonblock: err = -EINPROGRESS; goto out; } static int sctp_wait_for_accept(struct sock *sk, long timeo) { struct sctp_endpoint *ep; int err = 0; DEFINE_WAIT(wait); ep = sctp_sk(sk)->ep; for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&ep->asocs)) { release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); } err = -EINVAL; if (!sctp_sstate(sk, LISTENING)) break; err = 0; if (!list_empty(&ep->asocs)) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } static void sctp_wait_for_close(struct sock *sk, long timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (list_empty(&sctp_sk(sk)->ep->asocs)) break; release_sock(sk); timeout = schedule_timeout(timeout); lock_sock(sk); } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) { struct sk_buff *frag; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) sctp_skb_set_owner_r_frag(frag, sk); done: sctp_skb_set_owner_r(skb, sk); } void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc) { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; newsk->sk_type = sk->sk_type; newsk->sk_bound_dev_if = sk->sk_bound_dev_if; newsk->sk_flags = sk->sk_flags; newsk->sk_tsflags = sk->sk_tsflags; newsk->sk_no_check_tx = sk->sk_no_check_tx; newsk->sk_no_check_rx = sk->sk_no_check_rx; newsk->sk_reuse = sk->sk_reuse; newsk->sk_shutdown = sk->sk_shutdown; newsk->sk_destruct = sctp_destruct_sock; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; newsk->sk_sndbuf = sk->sk_sndbuf; newsk->sk_rcvbuf = sk->sk_rcvbuf; newsk->sk_lingertime = sk->sk_lingertime; newsk->sk_rcvtimeo = sk->sk_rcvtimeo; newsk->sk_sndtimeo = sk->sk_sndtimeo; newsk->sk_rxhash = sk->sk_rxhash; newinet = inet_sk(newsk); /* Initialize sk's sport, dport, rcv_saddr and daddr for * getsockname() and getpeername() */ newinet->inet_sport = inet->inet_sport; newinet->inet_saddr = inet->inet_saddr; newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; newinet->inet_id = asoc->next_tsn ^ jiffies; newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; newinet->mc_ttl = 1; newinet->mc_index = 0; newinet->mc_list = NULL; if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) net_enable_timestamp(); security_sk_clone(sk, newsk); } static inline void sctp_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { int ancestor_size = sizeof(struct inet_sock) + sizeof(struct sctp_sock) - offsetof(struct sctp_sock, auto_asconf_list); if (sk_from->sk_family == PF_INET6) ancestor_size += sizeof(struct ipv6_pinfo); __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } /* Populate the fields of the newsk from the oldsk and migrate the assoc * and its messages to the newsk. */ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, sctp_socket_type_t type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; /* Migrate socket buffer sizes and all the socket level options to the * new socket. */ newsk->sk_sndbuf = oldsk->sk_sndbuf; newsk->sk_rcvbuf = oldsk->sk_rcvbuf; /* Brute force copy old sctp opt. */ sctp_copy_descendant(newsk, oldsk); /* Restore the ep value that was overwritten with the above structure * copy. */ newsp->ep = newep; newsp->hmac = NULL; /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; spin_lock_bh(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; spin_unlock_bh(&head->lock); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clean up any messages pending delivery due to partial * delivery. Three cases: * 1) No partial deliver; no work. * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ if (assoc->ulpq.pd_mode) { queue = &newsp->pd_lobby; } else queue = &newsk->sk_receive_queue; /* Walk through the pd_lobby, looking for skbs that * need moved to the new socket. */ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clear up any skbs waiting for the partial * delivery to finish. */ if (assoc->ulpq.pd_mode) sctp_clear_pd(oldsk, NULL); } sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_skb_set_owner_r_frag(skb, newsk); sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) sctp_skb_set_owner_r_frag(skb, newsk); /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. */ newsp->type = type; /* Mark the new socket "in-use" by the user so that any packets * that may arrive on the association after we've moved it are * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. * * The caller has just allocated newsk so we can guarantee that other * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. */ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) { newsk->sk_state = SCTP_SS_CLOSED; newsk->sk_shutdown |= RCV_SHUTDOWN; } else { newsk->sk_state = SCTP_SS_ESTABLISHED; } release_sock(newsk); } /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #if IS_ENABLED(CONFIG_IPV6) #include <net/transp_v6.h> static void sctp_v6_destroy_sock(struct sock *sk) { sctp_destroy_sock(sk); inet6_destroy_sock(sk); } struct proto sctpv6_prot = { .name = "SCTPv6", .owner = THIS_MODULE, .close = sctp_close, .connect = sctp_connect, .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, .init = sctp_init_sock, .destroy = sctp_v6_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, .backlog_rcv = sctp_backlog_rcv, .hash = sctp_hash, .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, .sockets_allocated = &sctp_sockets_allocated, }; #endif /* IS_ENABLED(CONFIG_IPV6) */
./CrossVul/dataset_final_sorted/CWE-617/c/bad_3156_0
crossvul-cpp_data_bad_3936_4
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2014 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_option.h" #include "nghttp2_session.h" int nghttp2_option_new(nghttp2_option **option_ptr) { *option_ptr = calloc(1, sizeof(nghttp2_option)); if (*option_ptr == NULL) { return NGHTTP2_ERR_NOMEM; } return 0; } void nghttp2_option_del(nghttp2_option *option) { free(option); } void nghttp2_option_set_no_auto_window_update(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE; option->no_auto_window_update = val; } void nghttp2_option_set_peer_max_concurrent_streams(nghttp2_option *option, uint32_t val) { option->opt_set_mask |= NGHTTP2_OPT_PEER_MAX_CONCURRENT_STREAMS; option->peer_max_concurrent_streams = val; } void nghttp2_option_set_no_recv_client_magic(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_RECV_CLIENT_MAGIC; option->no_recv_client_magic = val; } void nghttp2_option_set_no_http_messaging(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_HTTP_MESSAGING; option->no_http_messaging = val; } void nghttp2_option_set_max_reserved_remote_streams(nghttp2_option *option, uint32_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_RESERVED_REMOTE_STREAMS; option->max_reserved_remote_streams = val; } static void set_ext_type(uint8_t *ext_types, uint8_t type) { ext_types[type / 8] = (uint8_t)(ext_types[type / 8] | (1 << (type & 0x7))); } void nghttp2_option_set_user_recv_extension_type(nghttp2_option *option, uint8_t type) { if (type < 10) { return; } option->opt_set_mask |= NGHTTP2_OPT_USER_RECV_EXT_TYPES; set_ext_type(option->user_recv_ext_types, type); } void nghttp2_option_set_builtin_recv_extension_type(nghttp2_option *option, uint8_t type) { switch (type) { case NGHTTP2_ALTSVC: option->opt_set_mask |= NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES; option->builtin_recv_ext_types |= NGHTTP2_TYPEMASK_ALTSVC; return; case NGHTTP2_ORIGIN: option->opt_set_mask |= NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES; option->builtin_recv_ext_types |= NGHTTP2_TYPEMASK_ORIGIN; return; default: return; } } void nghttp2_option_set_no_auto_ping_ack(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_AUTO_PING_ACK; option->no_auto_ping_ack = val; } void nghttp2_option_set_max_send_header_block_length(nghttp2_option *option, size_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_SEND_HEADER_BLOCK_LENGTH; option->max_send_header_block_length = val; } void nghttp2_option_set_max_deflate_dynamic_table_size(nghttp2_option *option, size_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_DEFLATE_DYNAMIC_TABLE_SIZE; option->max_deflate_dynamic_table_size = val; } void nghttp2_option_set_no_closed_streams(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_CLOSED_STREAMS; option->no_closed_streams = val; } void nghttp2_option_set_max_outbound_ack(nghttp2_option *option, size_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_OUTBOUND_ACK; option->max_outbound_ack = val; }
./CrossVul/dataset_final_sorted/CWE-707/c/bad_3936_4
crossvul-cpp_data_good_3937_0
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_session.h" #include <string.h> #include <stddef.h> #include <stdio.h> #include <assert.h> #include <stdarg.h> #include "nghttp2_helper.h" #include "nghttp2_net.h" #include "nghttp2_priority_spec.h" #include "nghttp2_option.h" #include "nghttp2_http.h" #include "nghttp2_pq.h" #include "nghttp2_debug.h" /* * Returns non-zero if the number of outgoing opened streams is larger * than or equal to * remote_settings.max_concurrent_streams. */ static int session_is_outgoing_concurrent_streams_max(nghttp2_session *session) { return session->remote_settings.max_concurrent_streams <= session->num_outgoing_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * local_settings.max_concurrent_streams. */ static int session_is_incoming_concurrent_streams_max(nghttp2_session *session) { return session->local_settings.max_concurrent_streams <= session->num_incoming_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * session->pending_local_max_concurrent_stream. */ static int session_is_incoming_concurrent_streams_pending_max(nghttp2_session *session) { return session->pending_local_max_concurrent_stream <= session->num_incoming_streams; } /* * Returns non-zero if |lib_error| is non-fatal error. */ static int is_non_fatal(int lib_error_code) { return lib_error_code < 0 && lib_error_code > NGHTTP2_ERR_FATAL; } int nghttp2_is_fatal(int lib_error_code) { return lib_error_code < NGHTTP2_ERR_FATAL; } static int session_enforce_http_messaging(nghttp2_session *session) { return (session->opt_flags & NGHTTP2_OPTMASK_NO_HTTP_MESSAGING) == 0; } /* * Returns nonzero if |frame| is trailer headers. */ static int session_trailer_headers(nghttp2_session *session, nghttp2_stream *stream, nghttp2_frame *frame) { if (!stream || frame->hd.type != NGHTTP2_HEADERS) { return 0; } if (session->server) { return frame->headers.cat == NGHTTP2_HCAT_HEADERS; } return frame->headers.cat == NGHTTP2_HCAT_HEADERS && (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) == 0; } /* Returns nonzero if the |stream| is in reserved(remote) state */ static int state_reserved_remote(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && !nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* Returns nonzero if the |stream| is in reserved(local) state */ static int state_reserved_local(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* * Checks whether received stream_id is valid. This function returns * 1 if it succeeds, or 0. */ static int session_is_new_peer_stream_id(nghttp2_session *session, int32_t stream_id) { return stream_id != 0 && !nghttp2_session_is_my_stream_id(session, stream_id) && session->last_recv_stream_id < stream_id; } static int session_detect_idle_stream(nghttp2_session *session, int32_t stream_id) { /* Assume that stream object with stream_id does not exist */ if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (session->last_sent_stream_id < stream_id) { return 1; } return 0; } if (session_is_new_peer_stream_id(session, stream_id)) { return 1; } return 0; } static int check_ext_type_set(const uint8_t *ext_types, uint8_t type) { return (ext_types[type / 8] & (1 << (type & 0x7))) > 0; } static int session_call_error_callback(nghttp2_session *session, int lib_error_code, const char *fmt, ...) { size_t bufsize; va_list ap; char *buf; int rv; nghttp2_mem *mem; if (!session->callbacks.error_callback && !session->callbacks.error_callback2) { return 0; } mem = &session->mem; va_start(ap, fmt); rv = vsnprintf(NULL, 0, fmt, ap); va_end(ap); if (rv < 0) { return NGHTTP2_ERR_NOMEM; } bufsize = (size_t)(rv + 1); buf = nghttp2_mem_malloc(mem, bufsize); if (buf == NULL) { return NGHTTP2_ERR_NOMEM; } va_start(ap, fmt); rv = vsnprintf(buf, bufsize, fmt, ap); va_end(ap); if (rv < 0) { nghttp2_mem_free(mem, buf); /* vsnprintf may return error because of various things we can imagine, but typically we don't want to drop session just for debug callback. */ DEBUGF("error_callback: vsnprintf failed. The template was %s\n", fmt); return 0; } if (session->callbacks.error_callback2) { rv = session->callbacks.error_callback2(session, lib_error_code, buf, (size_t)rv, session->user_data); } else { rv = session->callbacks.error_callback(session, buf, (size_t)rv, session->user_data); } nghttp2_mem_free(mem, buf); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_terminate_session(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const char *reason) { int rv; const uint8_t *debug_data; size_t debug_datalen; if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return 0; } /* Ignore all incoming frames because we are going to tear down the session. */ session->iframe.state = NGHTTP2_IB_IGN_ALL; if (reason == NULL) { debug_data = NULL; debug_datalen = 0; } else { debug_data = (const uint8_t *)reason; debug_datalen = strlen(reason); } rv = nghttp2_session_add_goaway(session, last_stream_id, error_code, debug_data, debug_datalen, NGHTTP2_GOAWAY_AUX_TERM_ON_SEND); if (rv != 0) { return rv; } session->goaway_flags |= NGHTTP2_GOAWAY_TERM_ON_SEND; return 0; } int nghttp2_session_terminate_session(nghttp2_session *session, uint32_t error_code) { return session_terminate_session(session, session->last_proc_stream_id, error_code, NULL); } int nghttp2_session_terminate_session2(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code) { return session_terminate_session(session, last_stream_id, error_code, NULL); } int nghttp2_session_terminate_session_with_reason(nghttp2_session *session, uint32_t error_code, const char *reason) { return session_terminate_session(session, session->last_proc_stream_id, error_code, reason); } int nghttp2_session_is_my_stream_id(nghttp2_session *session, int32_t stream_id) { int rem; if (stream_id == 0) { return 0; } rem = stream_id & 0x1; if (session->server) { return rem == 0; } return rem == 1; } nghttp2_stream *nghttp2_session_get_stream(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); if (stream == NULL || (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) || stream->state == NGHTTP2_STREAM_IDLE) { return NULL; } return stream; } nghttp2_stream *nghttp2_session_get_stream_raw(nghttp2_session *session, int32_t stream_id) { return (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); } static void session_inbound_frame_reset(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_mem *mem = &session->mem; /* A bit risky code, since if this function is called from nghttp2_session_new(), we rely on the fact that iframe->frame.hd.type is 0, so that no free is performed. */ switch (iframe->frame.hd.type) { case NGHTTP2_DATA: break; case NGHTTP2_HEADERS: nghttp2_frame_headers_free(&iframe->frame.headers, mem); break; case NGHTTP2_PRIORITY: nghttp2_frame_priority_free(&iframe->frame.priority); break; case NGHTTP2_RST_STREAM: nghttp2_frame_rst_stream_free(&iframe->frame.rst_stream); break; case NGHTTP2_SETTINGS: nghttp2_frame_settings_free(&iframe->frame.settings, mem); nghttp2_mem_free(mem, iframe->iv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; break; case NGHTTP2_PUSH_PROMISE: nghttp2_frame_push_promise_free(&iframe->frame.push_promise, mem); break; case NGHTTP2_PING: nghttp2_frame_ping_free(&iframe->frame.ping); break; case NGHTTP2_GOAWAY: nghttp2_frame_goaway_free(&iframe->frame.goaway, mem); break; case NGHTTP2_WINDOW_UPDATE: nghttp2_frame_window_update_free(&iframe->frame.window_update); break; default: /* extension frame */ if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { nghttp2_frame_extension_free(&iframe->frame.ext); } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { break; } nghttp2_frame_altsvc_free(&iframe->frame.ext, mem); break; case NGHTTP2_ORIGIN: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN) == 0) { break; } nghttp2_frame_origin_free(&iframe->frame.ext, mem); break; } } break; } memset(&iframe->frame, 0, sizeof(nghttp2_frame)); memset(&iframe->ext_frame_payload, 0, sizeof(nghttp2_ext_frame_payload)); iframe->state = NGHTTP2_IB_READ_HEAD; nghttp2_buf_wrap_init(&iframe->sbuf, iframe->raw_sbuf, sizeof(iframe->raw_sbuf)); iframe->sbuf.mark += NGHTTP2_FRAME_HDLEN; nghttp2_buf_free(&iframe->lbuf, mem); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); iframe->raw_lbuf = NULL; iframe->payloadleft = 0; iframe->padlen = 0; } static void init_settings(nghttp2_settings_storage *settings) { settings->header_table_size = NGHTTP2_HD_DEFAULT_MAX_BUFFER_SIZE; settings->enable_push = 1; settings->max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; settings->initial_window_size = NGHTTP2_INITIAL_WINDOW_SIZE; settings->max_frame_size = NGHTTP2_MAX_FRAME_SIZE_MIN; settings->max_header_list_size = UINT32_MAX; } static void active_outbound_item_reset(nghttp2_active_outbound_item *aob, nghttp2_mem *mem) { DEBUGF("send: reset nghttp2_active_outbound_item\n"); DEBUGF("send: aob->item = %p\n", aob->item); nghttp2_outbound_item_free(aob->item, mem); nghttp2_mem_free(mem, aob->item); aob->item = NULL; nghttp2_bufs_reset(&aob->framebufs); aob->state = NGHTTP2_OB_POP_ITEM; } int nghttp2_enable_strict_preface = 1; static int session_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, int server, const nghttp2_option *option, nghttp2_mem *mem) { int rv; size_t nbuffer; size_t max_deflate_dynamic_table_size = NGHTTP2_HD_DEFAULT_MAX_DEFLATE_BUFFER_SIZE; if (mem == NULL) { mem = nghttp2_mem_default(); } *session_ptr = nghttp2_mem_calloc(mem, 1, sizeof(nghttp2_session)); if (*session_ptr == NULL) { rv = NGHTTP2_ERR_NOMEM; goto fail_session; } (*session_ptr)->mem = *mem; mem = &(*session_ptr)->mem; /* next_stream_id is initialized in either nghttp2_session_client_new2 or nghttp2_session_server_new2 */ nghttp2_stream_init(&(*session_ptr)->root, 0, NGHTTP2_STREAM_FLAG_NONE, NGHTTP2_STREAM_IDLE, NGHTTP2_DEFAULT_WEIGHT, 0, 0, NULL, mem); (*session_ptr)->remote_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->recv_window_size = 0; (*session_ptr)->consumed_size = 0; (*session_ptr)->recv_reduction = 0; (*session_ptr)->local_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->goaway_flags = NGHTTP2_GOAWAY_NONE; (*session_ptr)->local_last_stream_id = (1u << 31) - 1; (*session_ptr)->remote_last_stream_id = (1u << 31) - 1; (*session_ptr)->pending_local_max_concurrent_stream = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; (*session_ptr)->pending_enable_push = 1; if (server) { (*session_ptr)->server = 1; } init_settings(&(*session_ptr)->remote_settings); init_settings(&(*session_ptr)->local_settings); (*session_ptr)->max_incoming_reserved_streams = NGHTTP2_MAX_INCOMING_RESERVED_STREAMS; /* Limit max outgoing concurrent streams to sensible value */ (*session_ptr)->remote_settings.max_concurrent_streams = 100; (*session_ptr)->max_send_header_block_length = NGHTTP2_MAX_HEADERSLEN; (*session_ptr)->max_outbound_ack = NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM; (*session_ptr)->max_settings = NGHTTP2_DEFAULT_MAX_SETTINGS; if (option) { if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE) && option->no_auto_window_update) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE; } if (option->opt_set_mask & NGHTTP2_OPT_PEER_MAX_CONCURRENT_STREAMS) { (*session_ptr)->remote_settings.max_concurrent_streams = option->peer_max_concurrent_streams; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_RESERVED_REMOTE_STREAMS) { (*session_ptr)->max_incoming_reserved_streams = option->max_reserved_remote_streams; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_RECV_CLIENT_MAGIC) && option->no_recv_client_magic) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_HTTP_MESSAGING) && option->no_http_messaging) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_HTTP_MESSAGING; } if (option->opt_set_mask & NGHTTP2_OPT_USER_RECV_EXT_TYPES) { memcpy((*session_ptr)->user_recv_ext_types, option->user_recv_ext_types, sizeof((*session_ptr)->user_recv_ext_types)); } if (option->opt_set_mask & NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES) { (*session_ptr)->builtin_recv_ext_types = option->builtin_recv_ext_types; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_PING_ACK) && option->no_auto_ping_ack) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_PING_ACK; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_SEND_HEADER_BLOCK_LENGTH) { (*session_ptr)->max_send_header_block_length = option->max_send_header_block_length; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_DEFLATE_DYNAMIC_TABLE_SIZE) { max_deflate_dynamic_table_size = option->max_deflate_dynamic_table_size; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_CLOSED_STREAMS) && option->no_closed_streams) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_CLOSED_STREAMS; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_OUTBOUND_ACK) { (*session_ptr)->max_outbound_ack = option->max_outbound_ack; } if ((option->opt_set_mask & NGHTTP2_OPT_MAX_SETTINGS) && option->max_settings) { (*session_ptr)->max_settings = option->max_settings; } } rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater, max_deflate_dynamic_table_size, mem); if (rv != 0) { goto fail_hd_deflater; } rv = nghttp2_hd_inflate_init(&(*session_ptr)->hd_inflater, mem); if (rv != 0) { goto fail_hd_inflater; } rv = nghttp2_map_init(&(*session_ptr)->streams, mem); if (rv != 0) { goto fail_map; } nbuffer = ((*session_ptr)->max_send_header_block_length + NGHTTP2_FRAMEBUF_CHUNKLEN - 1) / NGHTTP2_FRAMEBUF_CHUNKLEN; if (nbuffer == 0) { nbuffer = 1; } /* 1 for Pad Field. */ rv = nghttp2_bufs_init3(&(*session_ptr)->aob.framebufs, NGHTTP2_FRAMEBUF_CHUNKLEN, nbuffer, 1, NGHTTP2_FRAME_HDLEN + 1, mem); if (rv != 0) { goto fail_aob_framebuf; } active_outbound_item_reset(&(*session_ptr)->aob, mem); (*session_ptr)->callbacks = *callbacks; (*session_ptr)->user_data = user_data; session_inbound_frame_reset(*session_ptr); if (nghttp2_enable_strict_preface) { nghttp2_inbound_frame *iframe = &(*session_ptr)->iframe; if (server && ((*session_ptr)->opt_flags & NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC) == 0) { iframe->state = NGHTTP2_IB_READ_CLIENT_MAGIC; iframe->payloadleft = NGHTTP2_CLIENT_MAGIC_LEN; } else { iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } if (!server) { (*session_ptr)->aob.state = NGHTTP2_OB_SEND_CLIENT_MAGIC; nghttp2_bufs_add(&(*session_ptr)->aob.framebufs, NGHTTP2_CLIENT_MAGIC, NGHTTP2_CLIENT_MAGIC_LEN); } } return 0; fail_aob_framebuf: nghttp2_map_free(&(*session_ptr)->streams); fail_map: nghttp2_hd_inflate_free(&(*session_ptr)->hd_inflater); fail_hd_inflater: nghttp2_hd_deflate_free(&(*session_ptr)->hd_deflater); fail_hd_deflater: nghttp2_mem_free(mem, *session_ptr); fail_session: return rv; } int nghttp2_session_client_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_client_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_client_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 0, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 1; *session_ptr = session; return 0; } int nghttp2_session_server_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_server_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_server_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 1, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 2; *session_ptr = session; return 0; } static int free_streams(nghttp2_map_entry *entry, void *ptr) { nghttp2_session *session; nghttp2_stream *stream; nghttp2_outbound_item *item; nghttp2_mem *mem; session = (nghttp2_session *)ptr; mem = &session->mem; stream = (nghttp2_stream *)entry; item = stream->item; if (item && !item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } static void ob_q_free(nghttp2_outbound_queue *q, nghttp2_mem *mem) { nghttp2_outbound_item *item, *next; for (item = q->head; item;) { next = item->qnext; nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); item = next; } } static int inflight_settings_new(nghttp2_inflight_settings **settings_ptr, const nghttp2_settings_entry *iv, size_t niv, nghttp2_mem *mem) { *settings_ptr = nghttp2_mem_malloc(mem, sizeof(nghttp2_inflight_settings)); if (!*settings_ptr) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { (*settings_ptr)->iv = nghttp2_frame_iv_copy(iv, niv, mem); if (!(*settings_ptr)->iv) { nghttp2_mem_free(mem, *settings_ptr); return NGHTTP2_ERR_NOMEM; } } else { (*settings_ptr)->iv = NULL; } (*settings_ptr)->niv = niv; (*settings_ptr)->next = NULL; return 0; } static void inflight_settings_del(nghttp2_inflight_settings *settings, nghttp2_mem *mem) { if (!settings) { return; } nghttp2_mem_free(mem, settings->iv); nghttp2_mem_free(mem, settings); } void nghttp2_session_del(nghttp2_session *session) { nghttp2_mem *mem; nghttp2_inflight_settings *settings; if (session == NULL) { return; } mem = &session->mem; for (settings = session->inflight_settings_head; settings;) { nghttp2_inflight_settings *next = settings->next; inflight_settings_del(settings, mem); settings = next; } nghttp2_stream_free(&session->root); /* Have to free streams first, so that we can check stream->item->queued */ nghttp2_map_each_free(&session->streams, free_streams, session); nghttp2_map_free(&session->streams); ob_q_free(&session->ob_urgent, mem); ob_q_free(&session->ob_reg, mem); ob_q_free(&session->ob_syn, mem); active_outbound_item_reset(&session->aob, mem); session_inbound_frame_reset(session); nghttp2_hd_deflate_free(&session->hd_deflater); nghttp2_hd_inflate_free(&session->hd_inflater); nghttp2_bufs_free(&session->aob.framebufs); nghttp2_mem_free(mem, session); } int nghttp2_session_reprioritize_stream( nghttp2_session *session, nghttp2_stream *stream, const nghttp2_priority_spec *pri_spec_in) { int rv; nghttp2_stream *dep_stream = NULL; nghttp2_priority_spec pri_spec_default; const nghttp2_priority_spec *pri_spec = pri_spec_in; assert(pri_spec->stream_id != stream->stream_id); if (!nghttp2_stream_in_dep_tree(stream)) { return 0; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { return NGHTTP2_ERR_NOMEM; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } else if (nghttp2_stream_dep_find_ancestor(dep_stream, stream)) { DEBUGF("stream: cycle detected, dep_stream(%p)=%d stream(%p)=%d\n", dep_stream, dep_stream->stream_id, stream, stream->stream_id); nghttp2_stream_dep_remove_subtree(dep_stream); rv = nghttp2_stream_dep_add_subtree(stream->dep_prev, dep_stream); if (rv != 0) { return rv; } } assert(dep_stream); if (dep_stream == stream->dep_prev && !pri_spec->exclusive) { /* This is minor optimization when just weight is changed. */ nghttp2_stream_change_weight(stream, pri_spec->weight); return 0; } nghttp2_stream_dep_remove_subtree(stream); /* We have to update weight after removing stream from tree */ stream->weight = pri_spec->weight; if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert_subtree(dep_stream, stream); } else { rv = nghttp2_stream_dep_add_subtree(dep_stream, stream); } if (rv != 0) { return rv; } return 0; } int nghttp2_session_add_item(nghttp2_session *session, nghttp2_outbound_item *item) { /* TODO Return error if stream is not found for the frame requiring stream presence. */ int rv = 0; nghttp2_stream *stream; nghttp2_frame *frame; frame = &item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); switch (frame->hd.type) { case NGHTTP2_DATA: if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->item) { return NGHTTP2_ERR_DATA_EXIST; } rv = nghttp2_stream_attach_item(stream, item); if (rv != 0) { return rv; } return 0; case NGHTTP2_HEADERS: /* We push request HEADERS and push response HEADERS to dedicated queue because their transmission is affected by SETTINGS_MAX_CONCURRENT_STREAMS */ /* TODO If 2 HEADERS are submitted for reserved stream, then both of them are queued into ob_syn, which is not desirable. */ if (frame->headers.cat == NGHTTP2_HCAT_REQUEST || (stream && stream->state == NGHTTP2_STREAM_RESERVED)) { nghttp2_outbound_queue_push(&session->ob_syn, item); item->queued = 1; return 0; ; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_SETTINGS: case NGHTTP2_PING: nghttp2_outbound_queue_push(&session->ob_urgent, item); item->queued = 1; return 0; case NGHTTP2_RST_STREAM: if (stream) { stream->state = NGHTTP2_STREAM_CLOSING; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_PUSH_PROMISE: { nghttp2_headers_aux_data *aux_data; nghttp2_priority_spec pri_spec; aux_data = &item->aux_data.headers; if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); if (!nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, aux_data->stream_user_data)) { return NGHTTP2_ERR_NOMEM; } /* We don't have to call nghttp2_session_adjust_closed_stream() here, since stream->stream_id is local stream_id, and it does not affect closed stream count. */ nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } case NGHTTP2_WINDOW_UPDATE: if (stream) { stream->window_update_queued = 1; } else if (frame->hd.stream_id == 0) { session->window_update_queued = 1; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; default: nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } } int nghttp2_session_add_rst_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_stream *stream; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (stream && stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } /* Cancel pending request HEADERS in ob_syn if this RST_STREAM refers to that stream. */ if (!session->server && nghttp2_session_is_my_stream_id(session, stream_id) && nghttp2_outbound_queue_top(&session->ob_syn)) { nghttp2_headers_aux_data *aux_data; nghttp2_frame *headers_frame; headers_frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(headers_frame->hd.type == NGHTTP2_HEADERS); if (headers_frame->hd.stream_id <= stream_id && (uint32_t)stream_id < session->next_stream_id) { for (item = session->ob_syn.head; item; item = item->qnext) { aux_data = &item->aux_data.headers; if (item->frame.hd.stream_id < stream_id) { continue; } /* stream_id in ob_syn queue must be strictly increasing. If we found larger ID, then we can break here. */ if (item->frame.hd.stream_id > stream_id || aux_data->canceled) { break; } aux_data->error_code = error_code; aux_data->canceled = 1; return 0; } } } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_rst_stream_init(&frame->rst_stream, stream_id, error_code); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_rst_stream_free(&frame->rst_stream); nghttp2_mem_free(mem, item); return rv; } return 0; } nghttp2_stream *nghttp2_session_open_stream(nghttp2_session *session, int32_t stream_id, uint8_t flags, nghttp2_priority_spec *pri_spec_in, nghttp2_stream_state initial_state, void *stream_user_data) { int rv; nghttp2_stream *stream; nghttp2_stream *dep_stream = NULL; int stream_alloc = 0; nghttp2_priority_spec pri_spec_default; nghttp2_priority_spec *pri_spec = pri_spec_in; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { assert(stream->state == NGHTTP2_STREAM_IDLE); assert(nghttp2_stream_in_dep_tree(stream)); nghttp2_session_detach_idle_stream(session, stream); rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return NULL; } } else { stream = nghttp2_mem_malloc(mem, sizeof(nghttp2_stream)); if (stream == NULL) { return NULL; } stream_alloc = 1; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { /* Depends on idle stream, which does not exist in memory. Assign default priority for it. */ nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { if (stream_alloc) { nghttp2_mem_free(mem, stream); } return NULL; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { /* If dep_stream is not part of dependency tree, stream will get default priority. This handles the case when pri_spec->stream_id == stream_id. This happens because we don't check pri_spec->stream_id against new stream ID in nghttp2_submit_request. This also handles the case when idle stream created by PRIORITY frame was opened. Somehow we first remove the idle stream from dependency tree. This is done to simplify code base, but ideally we should retain old dependency. But I'm not sure this adds values. */ nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (initial_state == NGHTTP2_STREAM_RESERVED) { flags |= NGHTTP2_STREAM_FLAG_PUSH; } if (stream_alloc) { nghttp2_stream_init(stream, stream_id, flags, initial_state, pri_spec->weight, (int32_t)session->remote_settings.initial_window_size, (int32_t)session->local_settings.initial_window_size, stream_user_data, mem); rv = nghttp2_map_insert(&session->streams, &stream->map_entry); if (rv != 0) { nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return NULL; } } else { stream->flags = flags; stream->state = initial_state; stream->weight = pri_spec->weight; stream->stream_user_data = stream_user_data; } switch (initial_state) { case NGHTTP2_STREAM_RESERVED: if (nghttp2_session_is_my_stream_id(session, stream_id)) { /* reserved (local) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } else { /* reserved (remote) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); ++session->num_incoming_reserved_streams; } /* Reserved stream does not count in the concurrent streams limit. That is one of the DOS vector. */ break; case NGHTTP2_STREAM_IDLE: /* Idle stream does not count toward the concurrent streams limit. This is used as anchor node in dependency tree. */ nghttp2_session_keep_idle_stream(session, stream); break; default: if (nghttp2_session_is_my_stream_id(session, stream_id)) { ++session->num_outgoing_streams; } else { ++session->num_incoming_streams; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } assert(dep_stream); if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert(dep_stream, stream); if (rv != 0) { return NULL; } } else { nghttp2_stream_dep_add(dep_stream, stream); } return stream; } int nghttp2_session_close_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_stream *stream; nghttp2_mem *mem; int is_my_stream_id; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } DEBUGF("stream: stream(%p)=%d close\n", stream, stream->stream_id); if (stream->item) { nghttp2_outbound_item *item; item = stream->item; rv = nghttp2_stream_detach_item(stream); if (rv != 0) { return rv; } /* If item is queued, it will be deleted when it is popped (nghttp2_session_prep_frame() will fail). If session->aob.item points to this item, let active_outbound_item_reset() free the item. */ if (!item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } } /* We call on_stream_close_callback even if stream->state is NGHTTP2_STREAM_INITIAL. This will happen while sending request HEADERS, a local endpoint receives RST_STREAM for that stream. It may be PROTOCOL_ERROR, but without notifying stream closure will hang the stream in a local endpoint. */ if (session->callbacks.on_stream_close_callback) { if (session->callbacks.on_stream_close_callback( session, stream_id, error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } is_my_stream_id = nghttp2_session_is_my_stream_id(session, stream_id); /* pushed streams which is not opened yet is not counted toward max concurrent limits */ if ((stream->flags & NGHTTP2_STREAM_FLAG_PUSH)) { if (!is_my_stream_id) { --session->num_incoming_reserved_streams; } } else { if (is_my_stream_id) { --session->num_outgoing_streams; } else { --session->num_incoming_streams; } } /* Closes both directions just in case they are not closed yet */ stream->flags |= NGHTTP2_STREAM_FLAG_CLOSED; if ((session->opt_flags & NGHTTP2_OPTMASK_NO_CLOSED_STREAMS) == 0 && session->server && !is_my_stream_id && nghttp2_stream_in_dep_tree(stream)) { /* On server side, retain stream at most MAX_CONCURRENT_STREAMS combined with the current active incoming streams to make dependency tree work better. */ nghttp2_session_keep_closed_stream(session, stream); } else { rv = nghttp2_session_destroy_stream(session, stream); if (rv != 0) { return rv; } } return 0; } int nghttp2_session_destroy_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_mem *mem; int rv; DEBUGF("stream: destroy closed stream(%p)=%d\n", stream, stream->stream_id); mem = &session->mem; if (nghttp2_stream_in_dep_tree(stream)) { rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return rv; } } nghttp2_map_remove(&session->streams, stream->stream_id); nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } void nghttp2_session_keep_closed_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep closed stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->closed_stream_tail) { session->closed_stream_tail->closed_next = stream; stream->closed_prev = session->closed_stream_tail; } else { session->closed_stream_head = stream; } session->closed_stream_tail = stream; ++session->num_closed_streams; } void nghttp2_session_keep_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->idle_stream_tail) { session->idle_stream_tail->closed_next = stream; stream->closed_prev = session->idle_stream_tail; } else { session->idle_stream_head = stream; } session->idle_stream_tail = stream; ++session->num_idle_streams; } void nghttp2_session_detach_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_stream *prev_stream, *next_stream; DEBUGF("stream: detach idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); prev_stream = stream->closed_prev; next_stream = stream->closed_next; if (prev_stream) { prev_stream->closed_next = next_stream; } else { session->idle_stream_head = next_stream; } if (next_stream) { next_stream->closed_prev = prev_stream; } else { session->idle_stream_tail = prev_stream; } stream->closed_prev = NULL; stream->closed_next = NULL; --session->num_idle_streams; } int nghttp2_session_adjust_closed_stream(nghttp2_session *session) { size_t num_stream_max; int rv; if (session->local_settings.max_concurrent_streams == NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS) { num_stream_max = session->pending_local_max_concurrent_stream; } else { num_stream_max = session->local_settings.max_concurrent_streams; } DEBUGF("stream: adjusting kept closed streams num_closed_streams=%zu, " "num_incoming_streams=%zu, max_concurrent_streams=%zu\n", session->num_closed_streams, session->num_incoming_streams, num_stream_max); while (session->num_closed_streams > 0 && session->num_closed_streams + session->num_incoming_streams > num_stream_max) { nghttp2_stream *head_stream; nghttp2_stream *next; head_stream = session->closed_stream_head; assert(head_stream); next = head_stream->closed_next; rv = nghttp2_session_destroy_stream(session, head_stream); if (rv != 0) { return rv; } /* head_stream is now freed */ session->closed_stream_head = next; if (session->closed_stream_head) { session->closed_stream_head->closed_prev = NULL; } else { session->closed_stream_tail = NULL; } --session->num_closed_streams; } return 0; } int nghttp2_session_adjust_idle_stream(nghttp2_session *session) { size_t max; int rv; /* Make minimum number of idle streams 16, and maximum 100, which are arbitrary chosen numbers. */ max = nghttp2_min( 100, nghttp2_max( 16, nghttp2_min(session->local_settings.max_concurrent_streams, session->pending_local_max_concurrent_stream))); DEBUGF("stream: adjusting kept idle streams num_idle_streams=%zu, max=%zu\n", session->num_idle_streams, max); while (session->num_idle_streams > max) { nghttp2_stream *head; nghttp2_stream *next; head = session->idle_stream_head; assert(head); next = head->closed_next; rv = nghttp2_session_destroy_stream(session, head); if (rv != 0) { return rv; } /* head is now destroyed */ session->idle_stream_head = next; if (session->idle_stream_head) { session->idle_stream_head->closed_prev = NULL; } else { session->idle_stream_tail = NULL; } --session->num_idle_streams; } return 0; } /* * Closes stream with stream ID |stream_id| if both transmission and * reception of the stream were disallowed. The |error_code| indicates * the reason of the closure. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_INVALID_ARGUMENT * The stream is not found. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ int nghttp2_session_close_stream_if_shut_rdwr(nghttp2_session *session, nghttp2_stream *stream) { if ((stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR) { return nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_NO_ERROR); } return 0; } /* * Returns nonzero if local endpoint allows reception of new stream * from remote. */ static int session_allow_incoming_new_stream(nghttp2_session *session) { return (session->goaway_flags & (NGHTTP2_GOAWAY_TERM_ON_SEND | NGHTTP2_GOAWAY_SENT)) == 0; } /* * This function returns nonzero if session is closing. */ static int session_is_closing(nghttp2_session *session) { return (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) != 0 || (nghttp2_session_want_read(session) == 0 && nghttp2_session_want_write(session) == 0); } /* * Check that we can send a frame to the |stream|. This function * returns 0 if we can send a frame to the |frame|, or one of the * following negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_for_stream_send(nghttp2_session *session, nghttp2_stream *stream) { if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream->shut_flags & NGHTTP2_SHUT_WR) { return NGHTTP2_ERR_STREAM_SHUT_WR; } return 0; } int nghttp2_session_check_request_allowed(nghttp2_session *session) { return !session->server && session->next_stream_id <= INT32_MAX && (session->goaway_flags & NGHTTP2_GOAWAY_RECV) == 0 && !session_is_closing(session); } /* * This function checks request HEADERS frame, which opens stream, can * be sent at this time. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because of GOAWAY: session is * going down or received last_stream_id is strictly less than * frame->hd.stream_id. * NGHTTP2_ERR_STREAM_CLOSING * request HEADERS was canceled by RST_STREAM while it is in queue. */ static int session_predicate_request_headers_send(nghttp2_session *session, nghttp2_outbound_item *item) { if (item->aux_data.headers.canceled) { return NGHTTP2_ERR_STREAM_CLOSING; } /* If we are terminating session (NGHTTP2_GOAWAY_TERM_ON_SEND), GOAWAY was received from peer, or session is about to close, new request is not allowed. */ if ((session->goaway_flags & NGHTTP2_GOAWAY_RECV) || session_is_closing(session)) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is the first frame from the * server, with the |stream| can be sent at this time. The |stream| * can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_INVALID_STREAM_ID * The stream ID is invalid. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_PROTO * Client side attempted to send response. */ static int session_predicate_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return NGHTTP2_ERR_INVALID_STREAM_ID; } switch (stream->state) { case NGHTTP2_STREAM_OPENING: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks HEADERS for reserved stream can be sent. The * |stream| must be reserved state and the |session| is server side. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_PROTO * The stream is not reserved state * NGHTTP2_ERR_STREAM_CLOSED * RST_STREAM was queued for this stream. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * Client side attempted to send push response. */ static int session_predicate_push_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; /* TODO Should disallow HEADERS if GOAWAY has already been issued? */ rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (stream->state != NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_PROTO; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is neither stream-opening nor * first response header, with the |stream| can be sent at this time. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); switch (stream->state) { case NGHTTP2_STREAM_OPENED: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return 0; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks PUSH_PROMISE frame |frame| with the |stream| * can be sent at this time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * The client side attempts to send PUSH_PROMISE, or the server * sends PUSH_PROMISE for the stream not initiated by the client. * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_PUSH_DISABLED * The remote peer disabled reception of PUSH_PROMISE. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_push_promise_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; if (!session->server) { return NGHTTP2_ERR_PROTO; } rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (session->remote_settings.enable_push == 0) { return NGHTTP2_ERR_PUSH_DISABLED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks WINDOW_UPDATE with the stream ID |stream_id| * can be sent at this time. Note that END_STREAM flag of the previous * frame does not affect the transmission of the WINDOW_UPDATE frame. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_window_update_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { /* Connection-level window update */ return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (state_reserved_local(session, stream)) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } static int session_predicate_altsvc_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return 0; } static int session_predicate_origin_send(nghttp2_session *session) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return 0; } /* Take into account settings max frame size and both connection-level flow control here */ static ssize_t nghttp2_session_enforce_flow_control_limits(nghttp2_session *session, nghttp2_stream *stream, ssize_t requested_window_size) { DEBUGF("send: remote windowsize connection=%d, remote maxframsize=%u, " "stream(id %d)=%d\n", session->remote_window_size, session->remote_settings.max_frame_size, stream->stream_id, stream->remote_window_size); return nghttp2_min(nghttp2_min(nghttp2_min(requested_window_size, stream->remote_window_size), session->remote_window_size), (int32_t)session->remote_settings.max_frame_size); } /* * Returns the maximum length of next data read. If the * connection-level and/or stream-wise flow control are enabled, the * return value takes into account those current window sizes. The remote * settings for max frame size is also taken into account. */ static size_t nghttp2_session_next_data_read(nghttp2_session *session, nghttp2_stream *stream) { ssize_t window_size; window_size = nghttp2_session_enforce_flow_control_limits( session, stream, NGHTTP2_DATA_PAYLOADLEN); DEBUGF("send: available window=%zd\n", window_size); return window_size > 0 ? (size_t)window_size : 0; } /* * This function checks DATA with the |stream| can be sent at this * time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int nghttp2_session_predicate_data_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { /* Request body data */ /* If stream->state is NGHTTP2_STREAM_CLOSING, RST_STREAM was queued but not yet sent. In this case, we won't send DATA frames. */ if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (stream->state == NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } /* Response body data */ if (stream->state == NGHTTP2_STREAM_OPENED) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } static ssize_t session_call_select_padding(nghttp2_session *session, const nghttp2_frame *frame, size_t max_payloadlen) { ssize_t rv; if (frame->hd.length >= max_payloadlen) { return (ssize_t)frame->hd.length; } if (session->callbacks.select_padding_callback) { size_t max_paddedlen; max_paddedlen = nghttp2_min(frame->hd.length + NGHTTP2_MAX_PADLEN, max_payloadlen); rv = session->callbacks.select_padding_callback( session, frame, max_paddedlen, session->user_data); if (rv < (ssize_t)frame->hd.length || rv > (ssize_t)max_paddedlen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } return (ssize_t)frame->hd.length; } /* Add padding to HEADERS or PUSH_PROMISE. We use frame->headers.padlen in this function to use the fact that frame->push_promise has also padlen in the same position. */ static int session_headers_add_pad(nghttp2_session *session, nghttp2_frame *frame) { int rv; ssize_t padded_payloadlen; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; size_t padlen; size_t max_payloadlen; aob = &session->aob; framebufs = &aob->framebufs; max_payloadlen = nghttp2_min(NGHTTP2_MAX_PAYLOADLEN, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } padlen = (size_t)padded_payloadlen - frame->hd.length; DEBUGF("send: padding selected: payloadlen=%zd, padlen=%zu\n", padded_payloadlen, padlen); rv = nghttp2_frame_add_pad(framebufs, &frame->hd, padlen, 0); if (rv != 0) { return rv; } frame->headers.padlen = padlen; return 0; } static size_t session_estimate_headers_payload(nghttp2_session *session, const nghttp2_nv *nva, size_t nvlen, size_t additional) { return nghttp2_hd_deflate_bound(&session->hd_deflater, nva, nvlen) + additional; } static int session_pack_extension(nghttp2_session *session, nghttp2_bufs *bufs, nghttp2_frame *frame) { ssize_t rv; nghttp2_buf *buf; size_t buflen; size_t framelen; assert(session->callbacks.pack_extension_callback); buf = &bufs->head->buf; buflen = nghttp2_min(nghttp2_buf_avail(buf), NGHTTP2_MAX_PAYLOADLEN); rv = session->callbacks.pack_extension_callback(session, buf->last, buflen, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return (int)rv; } if (rv < 0 || (size_t)rv > buflen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } framelen = (size_t)rv; frame->hd.length = framelen; assert(buf->pos == buf->last); buf->last += framelen; buf->pos -= NGHTTP2_FRAME_HDLEN; nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); return 0; } /* * This function serializes frame for transmission. * * This function returns 0 if it succeeds, or one of negative error * codes, including both fatal and non-fatal ones. */ static int session_prep_frame(nghttp2_session *session, nghttp2_outbound_item *item) { int rv; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; frame = &item->frame; switch (frame->hd.type) { case NGHTTP2_DATA: { size_t next_readmax; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { assert(stream->item == item); } rv = nghttp2_session_predicate_data_send(session, stream); if (rv != 0) { // If stream was already closed, nghttp2_session_get_stream() // returns NULL, but item is still attached to the stream. // Search stream including closed again. stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } } return rv; } /* Assuming stream is not NULL */ assert(stream); next_readmax = nghttp2_session_next_data_read(session, stream); if (next_readmax == 0) { /* This must be true since we only pop DATA frame item from queue when session->remote_window_size > 0 */ assert(session->remote_window_size > 0); rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } rv = nghttp2_session_pack_data(session, &session->aob.framebufs, next_readmax, frame, &item->aux_data.data, stream); if (rv == NGHTTP2_ERR_PAUSE) { return rv; } if (rv == NGHTTP2_ERR_DEFERRED) { rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv != 0) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } return rv; } return 0; } case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; size_t estimated_payloadlen; aux_data = &item->aux_data.headers; if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { /* initial HEADERS, which opens stream */ nghttp2_stream *stream; stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_INITIAL, aux_data->stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream() here, since we don't keep closed stream in client side */ rv = session_predicate_request_headers_send(session, item); if (rv != 0) { return rv; } if (session_enforce_http_messaging(session)) { nghttp2_http_record_request_method(stream, frame); } } else { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream && stream->state == NGHTTP2_STREAM_RESERVED) { rv = session_predicate_push_response_headers_send(session, stream); if (rv == 0) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; if (aux_data->stream_user_data) { stream->stream_user_data = aux_data->stream_user_data; } } } else if (session_predicate_response_headers_send(session, stream) == 0) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; rv = 0; } else { frame->headers.cat = NGHTTP2_HCAT_HEADERS; rv = session_predicate_headers_send(session, stream); } if (rv != 0) { return rv; } } estimated_payloadlen = session_estimate_headers_payload( session, frame->headers.nva, frame->headers.nvlen, NGHTTP2_PRIORITY_SPECLEN); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_headers(&session->aob.framebufs, &frame->headers, &session->hd_deflater); if (rv != 0) { return rv; } DEBUGF("send: before padding, HEADERS serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } DEBUGF("send: HEADERS finally serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { assert(session->last_sent_stream_id < frame->hd.stream_id); session->last_sent_stream_id = frame->hd.stream_id; } return 0; } case NGHTTP2_PRIORITY: { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } /* PRIORITY frame can be sent at any time and to any stream ID. */ nghttp2_frame_pack_priority(&session->aob.framebufs, &frame->priority); /* Peer can send PRIORITY frame against idle stream to create "anchor" in dependency tree. Only client can do this in nghttp2. In nghttp2, only server retains non-active (closed or idle) streams in memory, so we don't open stream here. */ return 0; } case NGHTTP2_RST_STREAM: if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_rst_stream(&session->aob.framebufs, &frame->rst_stream); return 0; case NGHTTP2_SETTINGS: { if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; /* When session is about to close, don't send SETTINGS ACK. We are required to send SETTINGS without ACK though; for example, we have to send SETTINGS as a part of connection preface. */ if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } } rv = nghttp2_frame_pack_settings(&session->aob.framebufs, &frame->settings); if (rv != 0) { return rv; } return 0; } case NGHTTP2_PUSH_PROMISE: { nghttp2_stream *stream; size_t estimated_payloadlen; /* stream could be NULL if associated stream was already closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* predicate should fail if stream is NULL. */ rv = session_predicate_push_promise_send(session, stream); if (rv != 0) { return rv; } assert(stream); estimated_payloadlen = session_estimate_headers_payload( session, frame->push_promise.nva, frame->push_promise.nvlen, 0); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_push_promise( &session->aob.framebufs, &frame->push_promise, &session->hd_deflater); if (rv != 0) { return rv; } rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } assert(session->last_sent_stream_id + 2 <= frame->push_promise.promised_stream_id); session->last_sent_stream_id = frame->push_promise.promised_stream_id; return 0; } case NGHTTP2_PING: if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; } /* PING frame is allowed to be sent unless termination GOAWAY is sent */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_ping(&session->aob.framebufs, &frame->ping); return 0; case NGHTTP2_GOAWAY: rv = nghttp2_frame_pack_goaway(&session->aob.framebufs, &frame->goaway); if (rv != 0) { return rv; } session->local_last_stream_id = frame->goaway.last_stream_id; return 0; case NGHTTP2_WINDOW_UPDATE: rv = session_predicate_window_update_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_window_update(&session->aob.framebufs, &frame->window_update); return 0; case NGHTTP2_CONTINUATION: /* We never handle CONTINUATION here. */ assert(0); return 0; default: { nghttp2_ext_aux_data *aux_data; /* extension frame */ aux_data = &item->aux_data.ext; if (aux_data->builtin == 0) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return session_pack_extension(session, &session->aob.framebufs, frame); } switch (frame->hd.type) { case NGHTTP2_ALTSVC: rv = session_predicate_altsvc_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_altsvc(&session->aob.framebufs, &frame->ext); return 0; case NGHTTP2_ORIGIN: rv = session_predicate_origin_send(session); if (rv != 0) { return rv; } rv = nghttp2_frame_pack_origin(&session->aob.framebufs, &frame->ext); if (rv != 0) { return rv; } return 0; default: /* Unreachable here */ assert(0); return 0; } } } } nghttp2_outbound_item * nghttp2_session_get_next_ob_item(nghttp2_session *session) { if (nghttp2_outbound_queue_top(&session->ob_urgent)) { return nghttp2_outbound_queue_top(&session->ob_urgent); } if (nghttp2_outbound_queue_top(&session->ob_reg)) { return nghttp2_outbound_queue_top(&session->ob_reg); } if (!session_is_outgoing_concurrent_streams_max(session)) { if (nghttp2_outbound_queue_top(&session->ob_syn)) { return nghttp2_outbound_queue_top(&session->ob_syn); } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } nghttp2_outbound_item * nghttp2_session_pop_next_ob_item(nghttp2_session *session) { nghttp2_outbound_item *item; item = nghttp2_outbound_queue_top(&session->ob_urgent); if (item) { nghttp2_outbound_queue_pop(&session->ob_urgent); item->queued = 0; return item; } item = nghttp2_outbound_queue_top(&session->ob_reg); if (item) { nghttp2_outbound_queue_pop(&session->ob_reg); item->queued = 0; return item; } if (!session_is_outgoing_concurrent_streams_max(session)) { item = nghttp2_outbound_queue_top(&session->ob_syn); if (item) { nghttp2_outbound_queue_pop(&session->ob_syn); item->queued = 0; return item; } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } static int session_call_before_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.before_frame_send_callback) { rv = session->callbacks.before_frame_send_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_send_callback) { rv = session->callbacks.on_frame_send_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int find_stream_on_goaway_func(nghttp2_map_entry *entry, void *ptr) { nghttp2_close_stream_on_goaway_arg *arg; nghttp2_stream *stream; arg = (nghttp2_close_stream_on_goaway_arg *)ptr; stream = (nghttp2_stream *)entry; if (nghttp2_session_is_my_stream_id(arg->session, stream->stream_id)) { if (arg->incoming) { return 0; } } else if (!arg->incoming) { return 0; } if (stream->state != NGHTTP2_STREAM_IDLE && (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) == 0 && stream->stream_id > arg->last_stream_id) { /* We are collecting streams to close because we cannot call nghttp2_session_close_stream() inside nghttp2_map_each(). Reuse closed_next member.. bad choice? */ assert(stream->closed_next == NULL); assert(stream->closed_prev == NULL); if (arg->head) { stream->closed_next = arg->head; arg->head = stream; } else { arg->head = stream; } } return 0; } /* Closes non-idle and non-closed streams whose stream ID > last_stream_id. If incoming is nonzero, we are going to close incoming streams. Otherwise, close outgoing streams. */ static int session_close_stream_on_goaway(nghttp2_session *session, int32_t last_stream_id, int incoming) { int rv; nghttp2_stream *stream, *next_stream; nghttp2_close_stream_on_goaway_arg arg = {session, NULL, last_stream_id, incoming}; rv = nghttp2_map_each(&session->streams, find_stream_on_goaway_func, &arg); assert(rv == 0); stream = arg.head; while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; rv = nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_REFUSED_STREAM); /* stream may be deleted here */ stream = next_stream; if (nghttp2_is_fatal(rv)) { /* Clean up closed_next member just in case */ while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; stream = next_stream; } return rv; } } return 0; } static void reschedule_stream(nghttp2_stream *stream) { stream->last_writelen = stream->item->frame.hd.length; nghttp2_stream_reschedule(stream); } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size); static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size); /* * Called after a frame is sent. This function runs * on_frame_send_callback and handles stream closure upon END_STREAM * or RST_STREAM. This function does not reset session->aob. It is a * responsibility of session_after_frame_sent2. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent1(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_stream *stream; frame = &item->frame; if (frame->hd.type == NGHTTP2_DATA) { nghttp2_data_aux_data *aux_data; aux_data = &item->aux_data.data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* We update flow control window after a frame was completely sent. This is possible because we choose payload length not to exceed the window */ session->remote_window_size -= (int32_t)frame->hd.length; if (stream) { stream->remote_window_size -= (int32_t)frame->hd.length; } if (stream && aux_data->eof) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } /* Call on_frame_send_callback after nghttp2_stream_detach_item(), so that application can issue nghttp2_submit_data() in the callback. */ if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { int stream_closed; stream_closed = (stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR; nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* stream may be NULL if it was closed */ if (stream_closed) { stream = NULL; } } return 0; } if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* non-DATA frame */ if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { DEBUGF("send: CONTINUATION exists, just return\n"); return 0; } } rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } switch (frame->hd.type) { case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: { stream->state = NGHTTP2_STREAM_OPENING; if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { /* nghttp2_submit_data() makes a copy of aux_data->data_prd */ rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; } case NGHTTP2_HCAT_PUSH_RESPONSE: stream->flags = (uint8_t)(stream->flags & ~NGHTTP2_STREAM_FLAG_PUSH); ++session->num_outgoing_streams; /* Fall through */ case NGHTTP2_HCAT_RESPONSE: stream->state = NGHTTP2_STREAM_OPENED; /* Fall through */ case NGHTTP2_HCAT_HEADERS: if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; default: /* Unreachable */ assert(0); return 0; } } case NGHTTP2_PRIORITY: if (session->server) { return 0; ; } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_RST_STREAM: rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_GOAWAY: { nghttp2_goaway_aux_data *aux_data; aux_data = &item->aux_data.goaway; if ((aux_data->flags & NGHTTP2_GOAWAY_AUX_SHUTDOWN_NOTICE) == 0) { if (aux_data->flags & NGHTTP2_GOAWAY_AUX_TERM_ON_SEND) { session->goaway_flags |= NGHTTP2_GOAWAY_TERM_SENT; } session->goaway_flags |= NGHTTP2_GOAWAY_SENT; rv = session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 1); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } case NGHTTP2_WINDOW_UPDATE: if (frame->hd.stream_id == 0) { session->window_update_queued = 0; if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_connection_consumed_size(session, 0); } else { rv = nghttp2_session_update_recv_connection_window_size(session, 0); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } stream->window_update_queued = 0; /* We don't have to send WINDOW_UPDATE if END_STREAM from peer is seen. */ if (stream->shut_flags & NGHTTP2_SHUT_RD) { return 0; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_stream_consumed_size(session, stream, 0); } else { rv = nghttp2_session_update_recv_stream_window_size(session, stream, 0, 1); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; default: return 0; } } /* * Called after a frame is sent and session_after_frame_sent1. This * function is responsible to reset session->aob. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent2(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_mem *mem; nghttp2_stream *stream; nghttp2_data_aux_data *aux_data; mem = &session->mem; frame = &item->frame; if (frame->hd.type != NGHTTP2_DATA) { if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { framebufs->cur = framebufs->cur->next; DEBUGF("send: next CONTINUATION frame, %zu bytes\n", nghttp2_buf_len(&framebufs->cur->buf)); return 0; } } active_outbound_item_reset(&session->aob, mem); return 0; } /* DATA frame */ aux_data = &item->aux_data.data; /* On EOF, we have already detached data. Please note that application may issue nghttp2_submit_data() in on_frame_send_callback (call from session_after_frame_sent1), which attach data to stream. We don't want to detach it. */ if (aux_data->eof) { active_outbound_item_reset(aob, mem); return 0; } /* Reset no_copy here because next write may not use this. */ aux_data->no_copy = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* If session is closed or RST_STREAM was queued, we won't send further data. */ if (nghttp2_session_predicate_data_send(session, stream) != 0) { if (stream) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } } active_outbound_item_reset(aob, mem); return 0; } aob->item = NULL; active_outbound_item_reset(&session->aob, mem); return 0; } static int session_call_send_data(nghttp2_session *session, nghttp2_outbound_item *item, nghttp2_bufs *framebufs) { int rv; nghttp2_buf *buf; size_t length; nghttp2_frame *frame; nghttp2_data_aux_data *aux_data; buf = &framebufs->cur->buf; frame = &item->frame; length = frame->hd.length - frame->data.padlen; aux_data = &item->aux_data.data; rv = session->callbacks.send_data_callback(session, frame, buf->pos, length, &aux_data->data_prd.source, session->user_data); switch (rv) { case 0: case NGHTTP2_ERR_WOULDBLOCK: case NGHTTP2_ERR_PAUSE: case NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE: return rv; default: return NGHTTP2_ERR_CALLBACK_FAILURE; } } static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, const uint8_t **data_ptr, int fast_cb) { int rv; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; nghttp2_mem *mem; mem = &session->mem; aob = &session->aob; framebufs = &aob->framebufs; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } for (;;) { switch (aob->state) { case NGHTTP2_OB_POP_ITEM: { nghttp2_outbound_item *item; item = nghttp2_session_pop_next_ob_item(session); if (item == NULL) { return 0; } rv = session_prep_frame(session, item); if (rv == NGHTTP2_ERR_PAUSE) { return 0; } if (rv == NGHTTP2_ERR_DEFERRED) { DEBUGF("send: frame transmission deferred\n"); break; } if (rv < 0) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; DEBUGF("send: frame preparation failed with %s\n", nghttp2_strerror(rv)); /* TODO If the error comes from compressor, the connection must be closed. */ if (item->frame.hd.type != NGHTTP2_DATA && session->callbacks.on_frame_not_send_callback && is_non_fatal(rv)) { nghttp2_frame *frame = &item->frame; /* The library is responsible for the transmission of WINDOW_UPDATE frame, so we don't call error callback for it. */ if (frame->hd.type != NGHTTP2_WINDOW_UPDATE && session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by failed request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; if (item->aux_data.headers.canceled) { error_code = item->aux_data.headers.error_code; } else { /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); active_outbound_item_reset(aob, mem); if (rv == NGHTTP2_ERR_HEADER_COMP) { /* If header compression error occurred, should terminiate connection. */ rv = nghttp2_session_terminate_session(session, NGHTTP2_INTERNAL_ERROR); } if (nghttp2_is_fatal(rv)) { return rv; } break; } aob->item = item; nghttp2_bufs_rewind(framebufs); if (item->frame.hd.type != NGHTTP2_DATA) { nghttp2_frame *frame; frame = &item->frame; DEBUGF("send: next frame: payloadlen=%zu, type=%u, flags=0x%02x, " "stream_id=%d\n", frame->hd.length, frame->hd.type, frame->hd.flags, frame->hd.stream_id); rv = session_call_before_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_CANCEL) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; if (session->callbacks.on_frame_not_send_callback) { if (session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by canceled request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; /* We don't have to check item->aux_data.headers.canceled since it has already been checked. */ /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } active_outbound_item_reset(aob, mem); break; } } else { DEBUGF("send: next frame: DATA\n"); if (item->aux_data.data.no_copy) { aob->state = NGHTTP2_OB_SEND_NO_COPY; break; } } DEBUGF("send: start transmitting frame type=%u, length=%zd\n", framebufs->cur->buf.pos[3], framebufs->cur->buf.last - framebufs->cur->buf.pos); aob->state = NGHTTP2_OB_SEND_DATA; break; } case NGHTTP2_OB_SEND_DATA: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of a frame\n"); /* Frame has completely sent */ if (fast_cb) { rv = session_after_frame_sent2(session); } else { rv = session_after_frame_sent1(session); if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); } if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); /* We increment the offset here. If send_callback does not send everything, we will adjust it. */ buf->pos += datalen; return (ssize_t)datalen; } case NGHTTP2_OB_SEND_NO_COPY: { nghttp2_stream *stream; nghttp2_frame *frame; int pause; DEBUGF("send: no copy DATA\n"); frame = &aob->item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream == NULL) { DEBUGF("send: no copy DATA cancelled because stream was closed\n"); active_outbound_item_reset(aob, mem); break; } rv = session_call_send_data(session, aob->item, framebufs); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } active_outbound_item_reset(aob, mem); break; } if (rv == NGHTTP2_ERR_WOULDBLOCK) { return 0; } pause = (rv == NGHTTP2_ERR_PAUSE); rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ if (pause) { return 0; } break; } case NGHTTP2_OB_SEND_CLIENT_MAGIC: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of client magic\n"); active_outbound_item_reset(aob, mem); break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); buf->pos += datalen; return (ssize_t)datalen; } } } } ssize_t nghttp2_session_mem_send(nghttp2_session *session, const uint8_t **data_ptr) { int rv; ssize_t len; *data_ptr = NULL; len = nghttp2_session_mem_send_internal(session, data_ptr, 1); if (len <= 0) { return len; } if (session->aob.item) { /* We have to call session_after_frame_sent1 here to handle stream closure upon transmission of frames. Otherwise, END_STREAM may be reached to client before we call nghttp2_session_mem_send again and we may get exceeding number of incoming streams. */ rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return (ssize_t)rv; } } return len; } int nghttp2_session_send(nghttp2_session *session) { const uint8_t *data = NULL; ssize_t datalen; ssize_t sentlen; nghttp2_bufs *framebufs; framebufs = &session->aob.framebufs; for (;;) { datalen = nghttp2_session_mem_send_internal(session, &data, 0); if (datalen <= 0) { return (int)datalen; } sentlen = session->callbacks.send_callback(session, data, (size_t)datalen, 0, session->user_data); if (sentlen < 0) { if (sentlen == NGHTTP2_ERR_WOULDBLOCK) { /* Transmission canceled. Rewind the offset */ framebufs->cur->buf.pos -= datalen; return 0; } return NGHTTP2_ERR_CALLBACK_FAILURE; } /* Rewind the offset to the amount of unsent bytes */ framebufs->cur->buf.pos -= datalen - sentlen; } } static ssize_t session_recv(nghttp2_session *session, uint8_t *buf, size_t len) { ssize_t rv; rv = session->callbacks.recv_callback(session, buf, len, 0, session->user_data); if (rv > 0) { if ((size_t)rv > len) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } else if (rv < 0 && rv != NGHTTP2_ERR_WOULDBLOCK && rv != NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } static int session_call_on_begin_frame(nghttp2_session *session, const nghttp2_frame_hd *hd) { int rv; if (session->callbacks.on_begin_frame_callback) { rv = session->callbacks.on_begin_frame_callback(session, hd, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_recv_callback) { rv = session->callbacks.on_frame_recv_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_begin_headers(nghttp2_session *session, nghttp2_frame *frame) { int rv; DEBUGF("recv: call on_begin_headers callback stream_id=%d\n", frame->hd.stream_id); if (session->callbacks.on_begin_headers_callback) { rv = session->callbacks.on_begin_headers_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv = 0; if (session->callbacks.on_header_callback2) { rv = session->callbacks.on_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_header_callback) { rv = session->callbacks.on_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_invalid_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv; if (session->callbacks.on_invalid_header_callback2) { rv = session->callbacks.on_invalid_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_invalid_header_callback) { rv = session->callbacks.on_invalid_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } else { return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_extension_chunk_recv_callback(nghttp2_session *session, const uint8_t *data, size_t len) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; if (session->callbacks.on_extension_chunk_recv_callback) { rv = session->callbacks.on_extension_chunk_recv_callback( session, &frame->hd, data, len, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_unpack_extension_callback(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; void *payload = NULL; rv = session->callbacks.unpack_extension_callback( session, &payload, &frame->hd, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } frame->ext.payload = payload; return 0; } /* * Handles frame size error. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_handle_frame_size_error(nghttp2_session *session) { /* TODO Currently no callback is called for this error, because we call this callback before reading any payload */ return nghttp2_session_terminate_session(session, NGHTTP2_FRAME_SIZE_ERROR); } static uint32_t get_error_code_from_lib_error_code(int lib_error_code) { switch (lib_error_code) { case NGHTTP2_ERR_STREAM_CLOSED: return NGHTTP2_STREAM_CLOSED; case NGHTTP2_ERR_HEADER_COMP: return NGHTTP2_COMPRESSION_ERROR; case NGHTTP2_ERR_FRAME_SIZE_ERROR: return NGHTTP2_FRAME_SIZE_ERROR; case NGHTTP2_ERR_FLOW_CONTROL: return NGHTTP2_FLOW_CONTROL_ERROR; case NGHTTP2_ERR_REFUSED_STREAM: return NGHTTP2_REFUSED_STREAM; case NGHTTP2_ERR_PROTO: case NGHTTP2_ERR_HTTP_HEADER: case NGHTTP2_ERR_HTTP_MESSAGING: return NGHTTP2_PROTOCOL_ERROR; default: return NGHTTP2_INTERNAL_ERROR; } } /* * Calls on_invalid_frame_recv_callback if it is set to |session|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * User defined callback function fails. */ static int session_call_on_invalid_frame_recv_callback(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream2(nghttp2_session *session, int32_t stream_id, nghttp2_frame *frame, int lib_error_code) { int rv; rv = nghttp2_session_add_rst_stream( session, stream_id, get_error_code_from_lib_error_code(lib_error_code)); if (rv != 0) { return rv; } if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { return session_handle_invalid_stream2(session, frame->hd.stream_id, frame, lib_error_code); } static int session_inflate_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { int rv; rv = session_handle_invalid_stream(session, frame, lib_error_code); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Handles invalid frame which causes connection error. */ static int session_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return nghttp2_session_terminate_session_with_reason( session, get_error_code_from_lib_error_code(lib_error_code), reason); } static int session_inflate_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { int rv; rv = session_handle_invalid_connection(session, frame, lib_error_code, reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Inflates header block in the memory pointed by |in| with |inlen| * bytes. If this function returns NGHTTP2_ERR_PAUSE, the caller must * call this function again, until it returns 0 or one of negative * error code. If |call_header_cb| is zero, the on_header_callback * are not invoked and the function never return NGHTTP2_ERR_PAUSE. If * the given |in| is the last chunk of header block, the |final| must * be nonzero. If header block is successfully processed (which is * indicated by the return value 0, NGHTTP2_ERR_PAUSE or * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE), the number of processed * input bytes is assigned to the |*readlen_ptr|. * * This function return 0 if it succeeds, or one of the negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE * The callback returns this error code, indicating that this * stream should be RST_STREAMed. * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_PAUSE * The callback function returned NGHTTP2_ERR_PAUSE * NGHTTP2_ERR_HEADER_COMP * Header decompression failed */ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, size_t *readlen_ptr, uint8_t *in, size_t inlen, int final, int call_header_cb) { ssize_t proclen; int rv; int inflate_flags; nghttp2_hd_nv nv; nghttp2_stream *stream; nghttp2_stream *subject_stream; int trailer = 0; *readlen_ptr = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); } else { subject_stream = stream; trailer = session_trailer_headers(session, stream, frame); } DEBUGF("recv: decoding header block %zu bytes\n", inlen); for (;;) { inflate_flags = 0; proclen = nghttp2_hd_inflate_hd_nv(&session->hd_inflater, &nv, &inflate_flags, in, inlen, final); if (nghttp2_is_fatal((int)proclen)) { return (int)proclen; } if (proclen < 0) { if (session->iframe.state == NGHTTP2_IB_READ_HEADER_BLOCK) { if (subject_stream && subject_stream->state != NGHTTP2_STREAM_CLOSING) { /* Adding RST_STREAM here is very important. It prevents from invoking subsequent callbacks for the same stream ID. */ rv = nghttp2_session_add_rst_stream( session, subject_stream->stream_id, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } } } rv = nghttp2_session_terminate_session(session, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_HEADER_COMP; } in += proclen; inlen -= (size_t)proclen; *readlen_ptr += (size_t)proclen; DEBUGF("recv: proclen=%zd\n", proclen); if (call_header_cb && (inflate_flags & NGHTTP2_HD_INFLATE_EMIT)) { rv = 0; if (subject_stream) { if (session_enforce_http_messaging(session)) { rv = nghttp2_http_on_header(session, subject_stream, frame, &nv, trailer); if (rv == NGHTTP2_ERR_IGN_HTTP_HEADER) { /* Don't overwrite rv here */ int rv2; rv2 = session_call_on_invalid_header(session, frame, &nv); if (rv2 == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = NGHTTP2_ERR_HTTP_HEADER; } else { if (rv2 != 0) { return rv2; } /* header is ignored */ DEBUGF("recv: HTTP ignored: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv2 = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Ignoring received invalid HTTP header field: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv2)) { return rv2; } } } if (rv == NGHTTP2_ERR_HTTP_HEADER) { DEBUGF("recv: HTTP error: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Invalid HTTP header field was received: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv)) { return rv; } rv = session_handle_invalid_stream2(session, subject_stream->stream_id, frame, NGHTTP2_ERR_HTTP_HEADER); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } } if (rv == 0) { rv = session_call_on_header(session, frame, &nv); /* This handles NGHTTP2_ERR_PAUSE and NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE as well */ if (rv != 0) { return rv; } } } } if (inflate_flags & NGHTTP2_HD_INFLATE_FINAL) { nghttp2_hd_inflate_end_headers(&session->hd_inflater); break; } if ((inflate_flags & NGHTTP2_HD_INFLATE_EMIT) == 0 && inlen == 0) { break; } } return 0; } /* * Call this function when HEADERS frame was completely received. * * This function returns 0 if it succeeds, or one of negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_end_stream_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; if ((frame->hd.flags & NGHTTP2_FLAG_END_STREAM) == 0) { return 0; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_after_header_block_received(nghttp2_session *session) { int rv = 0; nghttp2_frame *frame = &session->iframe.frame; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } if (session_enforce_http_messaging(session)) { if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { nghttp2_stream *subject_stream; subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); if (subject_stream) { rv = nghttp2_http_on_request_headers(subject_stream, frame); } } else { assert(frame->hd.type == NGHTTP2_HEADERS); switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: rv = nghttp2_http_on_request_headers(stream, frame); break; case NGHTTP2_HCAT_RESPONSE: case NGHTTP2_HCAT_PUSH_RESPONSE: rv = nghttp2_http_on_response_headers(stream); break; case NGHTTP2_HCAT_HEADERS: if (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) { assert(!session->server); rv = nghttp2_http_on_response_headers(stream); } else { rv = nghttp2_http_on_trailer_headers(stream, frame); } break; default: assert(0); } if (rv == 0 && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { rv = nghttp2_http_on_remote_end_stream(stream); } } if (rv != 0) { int32_t stream_id; if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { stream_id = frame->push_promise.promised_stream_id; } else { stream_id = frame->hd.stream_id; } rv = session_handle_invalid_stream2(session, stream_id, frame, NGHTTP2_ERR_HTTP_MESSAGING); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type == NGHTTP2_HEADERS && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ } return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type != NGHTTP2_HEADERS) { return 0; } return session_end_stream_headers_received(session, frame, stream); } int nghttp2_session_on_request_headers_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: stream_id == 0"); } /* If client receives idle stream from server, it is invalid regardless stream ID is even or odd. This is because client is not expected to receive request from server. */ if (!session->server) { if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: client received request"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } assert(session->server); if (!session_is_new_peer_stream_id(session, frame->hd.stream_id)) { if (frame->hd.stream_id == 0 || nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: invalid stream_id"); } /* RFC 7540 says if an endpoint receives a HEADERS with invalid * stream ID (e.g, numerically smaller than previous), it MUST * issue connection error with error code PROTOCOL_ERROR. It is a * bit hard to detect this, since we cannot remember all streams * we observed so far. * * You might imagine this is really easy. But no. HTTP/2 is * asynchronous protocol, and usually client and server do not * share the complete picture of open/closed stream status. For * example, after server sends RST_STREAM for a stream, client may * send trailer HEADERS for that stream. If naive server detects * that, and issued connection error, then it is a bug of server * implementation since client is not wrong if it did not get * RST_STREAM when it issued trailer HEADERS. * * At the moment, we are very conservative here. We only use * connection error if stream ID refers idle stream, or we are * sure that stream is half-closed(remote) or closed. Otherwise * we just ignore HEADERS for now. */ stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } session->last_recv_stream_id = frame->hd.stream_id; if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We just ignore stream after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (frame->headers.pri_spec.stream_id == frame->hd.stream_id) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: depend on itself"); } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_OPENING, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_closed_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; /* This function is only called if stream->state == NGHTTP2_STREAM_OPENING and stream_id is local side initiated. */ assert(stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "response HEADERS: stream_id == 0"); } if (stream->shut_flags & NGHTTP2_SHUT_RD) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. We go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } stream->state = NGHTTP2_STREAM_OPENED; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_push_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; assert(stream->state == NGHTTP2_STREAM_RESERVED); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: stream_id == 0"); } if (session->server) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: no HEADERS allowed from client in reserved state"); } if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We don't accept new stream after GOAWAY was sent. */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } nghttp2_stream_promise_fulfilled(stream); if (!nghttp2_session_is_my_stream_id(session, stream->stream_id)) { --session->num_incoming_reserved_streams; } ++session->num_incoming_streams; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: stream_id == 0"); } if ((stream->shut_flags & NGHTTP2_SHUT_RD)) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. we go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } if (nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { if (stream->state == NGHTTP2_STREAM_OPENED) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* If this is remote peer initiated stream, it is OK unless it has sent END_STREAM frame already. But if stream is in NGHTTP2_STREAM_CLOSING, we discard the frame. This is a race condition. */ if (stream->state != NGHTTP2_STREAM_CLOSING) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } static int session_process_headers_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_stream *stream; rv = nghttp2_frame_unpack_headers_payload(&frame->headers, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: could not unpack"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { frame->headers.cat = NGHTTP2_HCAT_REQUEST; return nghttp2_session_on_request_headers_received(session, frame); } if (stream->state == NGHTTP2_STREAM_RESERVED) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; return nghttp2_session_on_push_response_headers_received(session, frame, stream); } if (stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; return nghttp2_session_on_response_headers_received(session, frame, stream); } frame->headers.cat = NGHTTP2_HCAT_HEADERS; return nghttp2_session_on_headers_received(session, frame, stream); } int nghttp2_session_on_priority_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PRIORITY: stream_id == 0"); } if (frame->priority.pri_spec.stream_id == frame->hd.stream_id) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "depend on itself"); } if (!session->server) { /* Re-prioritization works only in server */ return session_call_on_frame_received(session, frame); } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { /* PRIORITY against idle stream can create anchor node in dependency tree. */ if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_priority_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_priority_payload(&frame->priority, iframe->sbuf.pos); return nghttp2_session_on_priority_received(session, frame); } int nghttp2_session_on_rst_stream_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream_id == 0"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream in idle"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { /* We may use stream->shut_flags for strict error checking. */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } rv = session_call_on_frame_received(session, frame); if (rv != 0) { return rv; } rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_process_rst_stream_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_rst_stream_payload(&frame->rst_stream, iframe->sbuf.pos); return nghttp2_session_on_rst_stream_received(session, frame); } static int update_remote_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_remote_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* If window size gets positive, push deferred DATA frame to outbound queue. */ if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* * Updates the remote initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_remote_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = (int32_t)session->remote_settings.initial_window_size; return nghttp2_map_each(&session->streams, update_remote_initial_window_size_func, &arg); } static int update_local_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_local_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(arg->session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(arg->session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } /* * Updates the local initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_local_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size, int32_t old_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = old_initial_window_size; return nghttp2_map_each(&session->streams, update_local_initial_window_size_func, &arg); } /* * Apply SETTINGS values |iv| having |niv| elements to the local * settings. We assumes that all values in |iv| is correct, since we * validated them in nghttp2_session_add_settings() already. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_HEADER_COMP * The header table size is out of range * NGHTTP2_ERR_NOMEM * Out of memory */ int nghttp2_session_update_local_settings(nghttp2_session *session, nghttp2_settings_entry *iv, size_t niv) { int rv; size_t i; int32_t new_initial_window_size = -1; uint32_t header_table_size = 0; uint32_t min_header_table_size = UINT32_MAX; uint8_t header_table_size_seen = 0; /* For NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, use the value last seen. For NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, use both minimum value and last seen value. */ for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: header_table_size_seen = 1; header_table_size = iv[i].value; min_header_table_size = nghttp2_min(min_header_table_size, iv[i].value); break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: new_initial_window_size = (int32_t)iv[i].value; break; } } if (header_table_size_seen) { if (min_header_table_size < header_table_size) { rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, min_header_table_size); if (rv != 0) { return rv; } } rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, header_table_size); if (rv != 0) { return rv; } } if (new_initial_window_size != -1) { rv = session_update_local_initial_window_size( session, new_initial_window_size, (int32_t)session->local_settings.initial_window_size); if (rv != 0) { return rv; } } for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: session->local_settings.header_table_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: session->local_settings.enable_push = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->local_settings.max_concurrent_streams = iv[i].value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: session->local_settings.initial_window_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: session->local_settings.max_frame_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->local_settings.max_header_list_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: session->local_settings.enable_connect_protocol = iv[i].value; break; } } return 0; } int nghttp2_session_on_settings_received(nghttp2_session *session, nghttp2_frame *frame, int noack) { int rv; size_t i; nghttp2_mem *mem; nghttp2_inflight_settings *settings; mem = &session->mem; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: stream_id != 0"); } if (frame->hd.flags & NGHTTP2_FLAG_ACK) { if (frame->settings.niv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FRAME_SIZE_ERROR, "SETTINGS: ACK and payload != 0"); } settings = session->inflight_settings_head; if (!settings) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: unexpected ACK"); } rv = nghttp2_session_update_local_settings(session, settings->iv, settings->niv); session->inflight_settings_head = settings->next; inflight_settings_del(settings, mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, rv, NULL); } return session_call_on_frame_received(session, frame); } if (!session->remote_settings_received) { session->remote_settings.max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; session->remote_settings_received = 1; } for (i = 0; i < frame->settings.niv; ++i) { nghttp2_settings_entry *entry = &frame->settings.iv[i]; switch (entry->settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: rv = nghttp2_hd_deflate_change_table_size(&session->hd_deflater, entry->value); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } else { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_HEADER_COMP, NULL); } } session->remote_settings.header_table_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENBLE_PUSH"); } if (!session->server && entry->value != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to enable push"); } session->remote_settings.enable_push = entry->value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->remote_settings.max_concurrent_streams = entry->value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: /* Update the initial window size of the all active streams */ /* Check that initial_window_size < (1u << 31) */ if (entry->value > NGHTTP2_MAX_WINDOW_SIZE) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, "SETTINGS: too large SETTINGS_INITIAL_WINDOW_SIZE"); } rv = session_update_remote_initial_window_size(session, (int32_t)entry->value); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_settings.initial_window_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: if (entry->value < NGHTTP2_MAX_FRAME_SIZE_MIN || entry->value > NGHTTP2_MAX_FRAME_SIZE_MAX) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_MAX_FRAME_SIZE"); } session->remote_settings.max_frame_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->remote_settings.max_header_list_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENABLE_CONNECT_PROTOCOL"); } if (!session->server && session->remote_settings.enable_connect_protocol && entry->value == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to disable " "SETTINGS_ENABLE_CONNECT_PROTOCOL"); } session->remote_settings.enable_connect_protocol = entry->value; break; } } if (!noack && !session_is_closing(session)) { rv = nghttp2_session_add_settings(session, NGHTTP2_FLAG_ACK, NULL, 0); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_INTERNAL, NULL); } } return session_call_on_frame_received(session, frame); } static int session_process_settings_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; size_t i; nghttp2_settings_entry min_header_size_entry; if (iframe->max_niv) { min_header_size_entry = iframe->iv[iframe->max_niv - 1]; if (min_header_size_entry.value < UINT32_MAX) { /* If we have less value, then we must have SETTINGS_HEADER_TABLE_SIZE in i < iframe->niv */ for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { break; } } assert(i < iframe->niv); if (min_header_size_entry.value != iframe->iv[i].value) { iframe->iv[iframe->niv++] = iframe->iv[i]; iframe->iv[i] = min_header_size_entry; } } } nghttp2_frame_unpack_settings_payload(&frame->settings, iframe->iv, iframe->niv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; return nghttp2_session_on_settings_received(session, frame, 0 /* ACK */); } int nghttp2_session_on_push_promise_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; nghttp2_stream *promised_stream; nghttp2_priority_spec pri_spec; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream_id == 0"); } if (session->server || session->local_settings.enable_push == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: push disabled"); } if (!nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid stream_id"); } if (!session_allow_incoming_new_stream(session)) { /* We just discard PUSH_PROMISE after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (!session_is_new_peer_stream_id(session, frame->push_promise.promised_stream_id)) { /* The spec says if an endpoint receives a PUSH_PROMISE with illegal stream ID is subject to a connection error of type PROTOCOL_ERROR. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid promised_stream_id"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream in idle"); } session->last_recv_stream_id = frame->push_promise.promised_stream_id; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING || !session->pending_enable_push || session->num_incoming_reserved_streams >= session->max_incoming_reserved_streams) { /* Currently, client does not retain closed stream, so we don't check NGHTTP2_SHUT_RD condition here. */ rv = nghttp2_session_add_rst_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_CANCEL); if (rv != 0) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "PUSH_PROMISE: stream closed"); } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); promised_stream = nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, NULL); if (!promised_stream) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since we don't keep closed stream in client side */ session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } static int session_process_push_promise_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = nghttp2_frame_unpack_push_promise_payload(&frame->push_promise, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: could not unpack"); } return nghttp2_session_on_push_promise_received(session, frame); } int nghttp2_session_on_ping_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PING: stream_id != 0"); } if ((session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_PING_ACK) == 0 && (frame->hd.flags & NGHTTP2_FLAG_ACK) == 0 && !session_is_closing(session)) { /* Peer sent ping, so ping it back */ rv = nghttp2_session_add_ping(session, NGHTTP2_FLAG_ACK, frame->ping.opaque_data); if (rv != 0) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_ping_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_ping_payload(&frame->ping, iframe->sbuf.pos); return nghttp2_session_on_ping_received(session, frame); } int nghttp2_session_on_goaway_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: stream_id != 0"); } /* Spec says Endpoints MUST NOT increase the value they send in the last stream identifier. */ if ((frame->goaway.last_stream_id > 0 && !nghttp2_session_is_my_stream_id(session, frame->goaway.last_stream_id)) || session->remote_last_stream_id < frame->goaway.last_stream_id) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: invalid last_stream_id"); } session->goaway_flags |= NGHTTP2_GOAWAY_RECV; session->remote_last_stream_id = frame->goaway.last_stream_id; rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } return session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 0); } static int session_process_goaway_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_goaway_payload(&frame->goaway, iframe->sbuf.pos, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_goaway_received(session, frame); } static int session_on_connection_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { /* Handle connection-level flow control */ if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < session->remote_window_size) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_window_size += frame->window_update.window_size_increment; return session_call_on_frame_received(session, frame); } static int session_on_stream_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE to idle stream"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (state_reserved_remote(session, stream)) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPADATE to reserved stream"); } if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < stream->remote_window_size) { return session_handle_invalid_stream(session, frame, NGHTTP2_ERR_FLOW_CONTROL); } stream->remote_window_size += frame->window_update.window_size_increment; if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { if (frame->hd.stream_id == 0) { return session_on_connection_window_update_received(session, frame); } else { return session_on_stream_window_update_received(session, frame); } } static int session_process_window_update_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_window_update_payload(&frame->window_update, iframe->sbuf.pos); return nghttp2_session_on_window_update_received(session, frame); } int nghttp2_session_on_altsvc_received(nghttp2_session *session, nghttp2_frame *frame) { nghttp2_ext_altsvc *altsvc; nghttp2_stream *stream; altsvc = frame->ext.payload; /* session->server case has been excluded */ if (frame->hd.stream_id == 0) { if (altsvc->origin_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } } else { if (altsvc->origin_len > 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } } if (altsvc->field_value_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_origin_received(nghttp2_session *session, nghttp2_frame *frame) { return session_call_on_frame_received(session, frame); } static int session_process_altsvc_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_altsvc_payload( &frame->ext, nghttp2_get_uint16(iframe->sbuf.pos), iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); /* nghttp2_frame_unpack_altsvc_payload steals buffer from iframe->lbuf */ nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_altsvc_received(session, frame); } static int session_process_origin_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_mem *mem = &session->mem; int rv; rv = nghttp2_frame_unpack_origin_payload(&frame->ext, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf), mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } /* Ignore ORIGIN frame which cannot be parsed. */ return 0; } return nghttp2_session_on_origin_received(session, frame); } static int session_process_extension_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = session_call_unpack_extension_callback(session); if (nghttp2_is_fatal(rv)) { return rv; } /* This handles the case where rv == NGHTTP2_ERR_CANCEL as well */ if (rv != 0) { return 0; } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_data_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { /* This should be treated as stream error, but it results in lots of RST_STREAM. So just ignore frame against nonexistent stream for now. */ return 0; } if (session_enforce_http_messaging(session) && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { if (nghttp2_http_on_remote_end_stream(stream) != 0) { rv = nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* For errors, this function only returns FATAL error. */ static int session_process_data_frame(nghttp2_session *session) { int rv; nghttp2_frame *public_data_frame = &session->iframe.frame; rv = nghttp2_session_on_data_received(session, public_data_frame); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } /* * Now we have SETTINGS synchronization, flow control error can be * detected strictly. If DATA frame is received with length > 0 and * current received window size + delta length is strictly larger than * local window size, it is subject to FLOW_CONTROL_ERROR, so return * -1. Note that local_window_size is calculated after SETTINGS ACK is * received from peer, so peer must honor this limit. If the resulting * recv_window_size is strictly larger than NGHTTP2_MAX_WINDOW_SIZE, * return -1 too. */ static int adjust_recv_window_size(int32_t *recv_window_size_ptr, size_t delta, int32_t local_window_size) { if (*recv_window_size_ptr > local_window_size - (int32_t)delta || *recv_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - (int32_t)delta) { return -1; } *recv_window_size_ptr += (int32_t)delta; return 0; } int nghttp2_session_update_recv_stream_window_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size, int send_window_update) { int rv; rv = adjust_recv_window_size(&stream->recv_window_size, delta_size, stream->local_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* We don't have to send WINDOW_UPDATE if the data received is the last chunk in the incoming stream. */ /* We have to use local_settings here because it is the constraint the remote endpoint should honor. */ if (send_window_update && !(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } int nghttp2_session_update_recv_connection_window_size(nghttp2_session *session, size_t delta_size) { int rv; rv = adjust_recv_window_size(&session->recv_window_size, delta_size, session->local_window_size); if (rv != 0) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && session->window_update_queued == 0 && nghttp2_should_send_window_update(session->local_window_size, session->recv_window_size)) { /* Use stream ID 0 to update connection-level flow control window */ rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, 0, session->recv_window_size); if (rv != 0) { return rv; } session->recv_window_size = 0; } return 0; } static int session_update_consumed_size(nghttp2_session *session, int32_t *consumed_size_ptr, int32_t *recv_window_size_ptr, uint8_t window_update_queued, int32_t stream_id, size_t delta_size, int32_t local_window_size) { int32_t recv_size; int rv; if ((size_t)*consumed_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta_size) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } *consumed_size_ptr += (int32_t)delta_size; if (window_update_queued == 0) { /* recv_window_size may be smaller than consumed_size, because it may be decreased by negative value with nghttp2_submit_window_update(). */ recv_size = nghttp2_min(*consumed_size_ptr, *recv_window_size_ptr); if (nghttp2_should_send_window_update(local_window_size, recv_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream_id, recv_size); if (rv != 0) { return rv; } *recv_window_size_ptr -= recv_size; *consumed_size_ptr -= recv_size; } } return 0; } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size) { return session_update_consumed_size( session, &stream->consumed_size, &stream->recv_window_size, stream->window_update_queued, stream->stream_id, delta_size, stream->local_window_size); } static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size) { return session_update_consumed_size( session, &session->consumed_size, &session->recv_window_size, session->window_update_queued, 0, delta_size, session->local_window_size); } /* * Checks that we can receive the DATA frame for stream, which is * indicated by |session->iframe.frame.hd.stream_id|. If it is a * connection error situation, GOAWAY frame will be issued by this * function. * * If the DATA frame is allowed, returns 0. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_IGN_PAYLOAD * The reception of DATA frame is connection error; or should be * ignored. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_on_data_received_fail_fast(nghttp2_session *session) { int rv; nghttp2_stream *stream; nghttp2_inbound_frame *iframe; int32_t stream_id; const char *failure_reason; uint32_t error_code = NGHTTP2_PROTOCOL_ERROR; iframe = &session->iframe; stream_id = iframe->frame.hd.stream_id; if (stream_id == 0) { /* The spec says that if a DATA frame is received whose stream ID is 0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. */ failure_reason = "DATA: stream_id == 0"; goto fail; } if (session_detect_idle_stream(session, stream_id)) { failure_reason = "DATA: stream in idle"; error_code = NGHTTP2_PROTOCOL_ERROR; goto fail; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { failure_reason = "DATA: stream closed"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { failure_reason = "DATA: stream in half-closed(remote)"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->state != NGHTTP2_STREAM_OPENED) { failure_reason = "DATA: stream not opened"; goto fail; } return 0; } if (stream->state == NGHTTP2_STREAM_RESERVED) { failure_reason = "DATA: stream in reserved"; goto fail; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } return 0; fail: rv = nghttp2_session_terminate_session_with_reason(session, error_code, failure_reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_PAYLOAD; } static size_t inbound_frame_payload_readlen(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { return nghttp2_min((size_t)(last - in), iframe->payloadleft); } /* * Resets iframe->sbuf and advance its mark pointer by |left| bytes. */ static void inbound_frame_set_mark(nghttp2_inbound_frame *iframe, size_t left) { nghttp2_buf_reset(&iframe->sbuf); iframe->sbuf.mark += left; } static size_t inbound_frame_buf_read(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { size_t readlen; readlen = nghttp2_min((size_t)(last - in), nghttp2_buf_mark_avail(&iframe->sbuf)); iframe->sbuf.last = nghttp2_cpymem(iframe->sbuf.last, in, readlen); return readlen; } /* * Unpacks SETTINGS entry in iframe->sbuf. */ static void inbound_frame_set_settings_entry(nghttp2_inbound_frame *iframe) { nghttp2_settings_entry iv; nghttp2_settings_entry *min_header_table_size_entry; size_t i; nghttp2_frame_unpack_settings_entry(&iv, iframe->sbuf.pos); switch (iv.settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: case NGHTTP2_SETTINGS_ENABLE_PUSH: case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: break; default: DEBUGF("recv: unknown settings id=0x%02x\n", iv.settings_id); iframe->iv[iframe->niv++] = iv; return; } for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == iv.settings_id) { iframe->iv[i] = iv; break; } } if (i == iframe->niv) { iframe->iv[iframe->niv++] = iv; } if (iv.settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { /* Keep track of minimum value of SETTINGS_HEADER_TABLE_SIZE */ min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; if (iv.value < min_header_table_size_entry->value) { min_header_table_size_entry->value = iv.value; } } } /* * Checks PADDED flags and set iframe->sbuf to read them accordingly. * If padding is set, this function returns 1. If no padding is set, * this function returns 0. On error, returns -1. */ static int inbound_frame_handle_pad(nghttp2_inbound_frame *iframe, nghttp2_frame_hd *hd) { if (hd->flags & NGHTTP2_FLAG_PADDED) { if (hd->length < 1) { return -1; } inbound_frame_set_mark(iframe, 1); return 1; } DEBUGF("recv: no padding in payload\n"); return 0; } /* * Computes number of padding based on flags. This function returns * the calculated length if it succeeds, or -1. */ static ssize_t inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { size_t padlen; /* 1 for Pad Length field */ padlen = (size_t)(iframe->sbuf.pos[0] + 1); DEBUGF("recv: padlen=%zu\n", padlen); /* We cannot use iframe->frame.hd.length because of CONTINUATION */ if (padlen - 1 > iframe->payloadleft) { return -1; } iframe->padlen = padlen; return (ssize_t)padlen; } /* * This function returns the effective payload length in the data of * length |readlen| when the remaning payload is |payloadleft|. The * |payloadleft| does not include |readlen|. If padding was started * strictly before this data chunk, this function returns -1. */ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, size_t payloadleft, size_t readlen) { size_t trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); if (trail_padlen > payloadleft) { size_t padlen; padlen = trail_padlen - payloadleft; if (readlen < padlen) { return -1; } return (ssize_t)(readlen - padlen); } return (ssize_t)(readlen); } ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t inlen) { const uint8_t *first = in, *last = in + inlen; nghttp2_inbound_frame *iframe = &session->iframe; size_t readlen; ssize_t padlen; int rv; int busy = 0; nghttp2_frame_hd cont_hd; nghttp2_stream *stream; size_t pri_fieldlen; nghttp2_mem *mem; DEBUGF("recv: connection recv_window_size=%d, local_window=%d\n", session->recv_window_size, session->local_window_size); mem = &session->mem; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } if (!nghttp2_session_want_read(session)) { return (ssize_t)inlen; } for (;;) { switch (iframe->state) { case NGHTTP2_IB_READ_CLIENT_MAGIC: readlen = nghttp2_min(inlen, iframe->payloadleft); if (memcmp(&NGHTTP2_CLIENT_MAGIC[NGHTTP2_CLIENT_MAGIC_LEN - iframe->payloadleft], in, readlen) != 0) { return NGHTTP2_ERR_BAD_CLIENT_MAGIC; } iframe->payloadleft -= readlen; in += readlen; if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } break; case NGHTTP2_IB_READ_FIRST_SETTINGS: DEBUGF("recv: [IB_READ_FIRST_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } if (iframe->sbuf.pos[3] != NGHTTP2_SETTINGS || (iframe->sbuf.pos[4] & NGHTTP2_FLAG_ACK)) { rv = session_call_error_callback( session, NGHTTP2_ERR_SETTINGS_EXPECTED, "Remote peer returned unexpected data while we expected " "SETTINGS frame. Perhaps, peer does not support HTTP/2 " "properly."); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "SETTINGS expected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->state = NGHTTP2_IB_READ_HEAD; /* Fall through */ case NGHTTP2_IB_READ_HEAD: { int on_begin_frame_called = 0; DEBUGF("recv: [IB_READ_HEAD]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&iframe->frame.hd, iframe->sbuf.pos); iframe->payloadleft = iframe->frame.hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", iframe->frame.hd.length, iframe->frame.hd.type, iframe->frame.hd.flags, iframe->frame.hd.stream_id); if (iframe->frame.hd.length > session->local_settings.max_frame_size) { DEBUGF("recv: length is too large %zu > %u\n", iframe->frame.hd.length, session->local_settings.max_frame_size); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_FRAME_SIZE_ERROR, "too large frame size"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } switch (iframe->frame.hd.type) { case NGHTTP2_DATA: { DEBUGF("recv: DATA\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_PADDED); /* Check stream is open. If it is not open or closing, ignore payload. */ busy = 1; rv = session_on_data_received_fail_fast(session); if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_IGN_PAYLOAD) { DEBUGF("recv: DATA not allowed stream_id=%d\n", iframe->frame.hd.stream_id); iframe->state = NGHTTP2_IB_IGN_DATA; break; } if (nghttp2_is_fatal(rv)) { return rv; } rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_PAD_DATA; break; } iframe->state = NGHTTP2_IB_READ_DATA; break; } case NGHTTP2_HEADERS: DEBUGF("recv: HEADERS\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED | NGHTTP2_FLAG_PRIORITY); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } /* Call on_begin_frame_callback here because session_process_headers_frame() may call on_begin_headers_callback */ rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } on_begin_frame_called = 1; rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: DEBUGF("recv: PRIORITY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != NGHTTP2_PRIORITY_SPECLEN) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, NGHTTP2_PRIORITY_SPECLEN); break; case NGHTTP2_RST_STREAM: case NGHTTP2_WINDOW_UPDATE: #ifdef DEBUGBUILD switch (iframe->frame.hd.type) { case NGHTTP2_RST_STREAM: DEBUGF("recv: RST_STREAM\n"); break; case NGHTTP2_WINDOW_UPDATE: DEBUGF("recv: WINDOW_UPDATE\n"); break; } #endif /* DEBUGBUILD */ iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_SETTINGS: DEBUGF("recv: SETTINGS\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if ((iframe->frame.hd.length % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) || ((iframe->frame.hd.flags & NGHTTP2_FLAG_ACK) && iframe->payloadleft > 0)) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } /* Check the settings flood counter early to be safe */ if (session->obq_flood_counter_ >= session->max_outbound_ack && !(iframe->frame.hd.flags & NGHTTP2_FLAG_ACK)) { return NGHTTP2_ERR_FLOODED; } iframe->state = NGHTTP2_IB_READ_SETTINGS; if (iframe->payloadleft) { nghttp2_settings_entry *min_header_table_size_entry; /* We allocate iv with additional one entry, to store the minimum header table size. */ iframe->max_niv = iframe->frame.hd.length / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH + 1; if (iframe->max_niv - 1 > session->max_settings) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_ENHANCE_YOUR_CALM, "SETTINGS: too many setting entries"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->iv = nghttp2_mem_malloc(mem, sizeof(nghttp2_settings_entry) * iframe->max_niv); if (!iframe->iv) { return NGHTTP2_ERR_NOMEM; } min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; min_header_table_size_entry->settings_id = NGHTTP2_SETTINGS_HEADER_TABLE_SIZE; min_header_table_size_entry->value = UINT32_MAX; inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } busy = 1; inbound_frame_set_mark(iframe, 0); break; case NGHTTP2_PUSH_PROMISE: DEBUGF("recv: PUSH_PROMISE\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_PING: DEBUGF("recv: PING\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if (iframe->payloadleft != 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_GOAWAY: DEBUGF("recv: GOAWAY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft < 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_CONTINUATION: DEBUGF("recv: unexpected CONTINUATION\n"); /* Receiving CONTINUATION in this state are subject to connection error of type PROTOCOL_ERROR */ rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "CONTINUATION: unexpected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; default: DEBUGF("recv: extension frame\n"); if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { if (!session->callbacks.unpack_extension_callback) { /* Silently ignore unknown frame type. */ busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_EXTENSION_PAYLOAD; break; } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ALTSVC\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; iframe->frame.ext.payload = &iframe->ext_frame_payload.altsvc; if (session->server) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } if (iframe->payloadleft < 2) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 2); break; case NGHTTP2_ORIGIN: if (!(session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ORIGIN\n"); iframe->frame.ext.payload = &iframe->ext_frame_payload.origin; if (session->server || iframe->frame.hd.stream_id || (iframe->frame.hd.flags & 0xf0)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->payloadleft); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->payloadleft); } else { busy = 1; } iframe->state = NGHTTP2_IB_READ_ORIGIN_PAYLOAD; break; default: busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } } if (!on_begin_frame_called) { switch (iframe->state) { case NGHTTP2_IB_IGN_HEADER_BLOCK: case NGHTTP2_IB_IGN_PAYLOAD: case NGHTTP2_IB_FRAME_SIZE_ERROR: case NGHTTP2_IB_IGN_DATA: case NGHTTP2_IB_IGN_ALL: break; default: rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } } } break; } case NGHTTP2_IB_READ_NBYTE: DEBUGF("recv: [IB_READ_NBYTE]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zd\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + pri_fieldlen > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.headers.padlen = (size_t)padlen; if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } else { /* Truncate buffers used for padding spec */ inbound_frame_set_mark(iframe, 0); } } rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: rv = session_process_priority_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_RST_STREAM: rv = session_process_rst_stream_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_PUSH_PROMISE: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + 4 /* promised stream id */ > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.push_promise.padlen = (size_t)padlen; if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; } rv = session_process_push_promise_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.push_promise.promised_stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PING: rv = session_process_ping_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_GOAWAY: { size_t debuglen; /* 8 is Last-stream-ID + Error Code */ debuglen = iframe->frame.hd.length - 8; if (debuglen > 0) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, debuglen); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, debuglen); } busy = 1; iframe->state = NGHTTP2_IB_READ_GOAWAY_DEBUG; break; } case NGHTTP2_WINDOW_UPDATE: rv = session_process_window_update_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_ALTSVC: { size_t origin_len; origin_len = nghttp2_get_uint16(iframe->sbuf.pos); DEBUGF("recv: origin_len=%zu\n", origin_len); if (origin_len > iframe->payloadleft) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } if (iframe->frame.hd.length > 2) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->frame.hd.length - 2); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->frame.hd.length); } busy = 1; iframe->state = NGHTTP2_IB_READ_ALTSVC_PAYLOAD; break; } default: /* This is unknown frame */ session_inbound_frame_reset(session); break; } break; case NGHTTP2_IB_READ_HEADER_BLOCK: case NGHTTP2_IB_IGN_HEADER_BLOCK: { ssize_t data_readlen; size_t trail_padlen; int final; #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { DEBUGF("recv: [IB_READ_HEADER_BLOCK]\n"); } else { DEBUGF("recv: [IB_IGN_HEADER_BLOCK]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_payload_readlen(iframe, in, last); DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft - readlen); data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft - readlen, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); final = (iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) && iframe->payloadleft - (size_t)data_readlen == trail_padlen; if (data_readlen > 0 || (data_readlen == 0 && final)) { size_t hd_proclen = 0; DEBUGF("recv: block final=%d\n", final); rv = inflate_header_block(session, &iframe->frame, &hd_proclen, (uint8_t *)in, (size_t)data_readlen, final, iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_PAUSE) { in += hd_proclen; iframe->payloadleft -= hd_proclen; return in - first; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { /* The application says no more headers. We decompress the rest of the header block but not invoke on_header_callback and on_frame_recv_callback. */ in += hd_proclen; iframe->payloadleft -= hd_proclen; /* Use promised stream ID for PUSH_PROMISE */ rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.type == NGHTTP2_PUSH_PROMISE ? iframe->frame.push_promise.promised_stream_id : iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } in += readlen; iframe->payloadleft -= readlen; if (rv == NGHTTP2_ERR_HEADER_COMP) { /* GOAWAY is already issued */ if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); } else { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; } break; } } else { in += readlen; iframe->payloadleft -= readlen; } if (iframe->payloadleft) { break; } if ((iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) == 0) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_HDLEN); iframe->padlen = 0; if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_EXPECT_CONTINUATION; } else { iframe->state = NGHTTP2_IB_IGN_CONTINUATION; } } else { if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { rv = session_after_header_block_received(session); if (nghttp2_is_fatal(rv)) { return rv; } } session_inbound_frame_reset(session); } break; } case NGHTTP2_IB_IGN_PAYLOAD: DEBUGF("recv: [IB_IGN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { break; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: case NGHTTP2_PUSH_PROMISE: case NGHTTP2_CONTINUATION: /* Mark inflater bad so that we won't perform further decoding */ session->hd_inflater.ctx.bad = 1; break; default: break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_FRAME_SIZE_ERROR: DEBUGF("recv: [IB_FRAME_SIZE_ERROR]\n"); rv = session_handle_frame_size_error(session); if (nghttp2_is_fatal(rv)) { return rv; } assert(iframe->state == NGHTTP2_IB_IGN_ALL); return (ssize_t)inlen; case NGHTTP2_IB_READ_SETTINGS: DEBUGF("recv: [IB_READ_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { break; } if (readlen > 0) { inbound_frame_set_settings_entry(iframe); } if (iframe->payloadleft) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } rv = session_process_settings_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_GOAWAY_DEBUG: DEBUGF("recv: [IB_READ_GOAWAY_DEBUG]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_goaway_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_EXPECT_CONTINUATION: case NGHTTP2_IB_IGN_CONTINUATION: #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { fprintf(stderr, "recv: [IB_EXPECT_CONTINUATION]\n"); } else { fprintf(stderr, "recv: [IB_IGN_CONTINUATION]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&cont_hd, iframe->sbuf.pos); iframe->payloadleft = cont_hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", cont_hd.length, cont_hd.type, cont_hd.flags, cont_hd.stream_id); if (cont_hd.type != NGHTTP2_CONTINUATION || cont_hd.stream_id != iframe->frame.hd.stream_id) { DEBUGF("recv: expected stream_id=%d, type=%d, but got stream_id=%d, " "type=%u\n", iframe->frame.hd.stream_id, NGHTTP2_CONTINUATION, cont_hd.stream_id, cont_hd.type); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "unexpected non-CONTINUATION frame or stream_id is invalid"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } /* CONTINUATION won't bear NGHTTP2_PADDED flag */ iframe->frame.hd.flags = (uint8_t)( iframe->frame.hd.flags | (cont_hd.flags & NGHTTP2_FLAG_END_HEADERS)); iframe->frame.hd.length += cont_hd.length; busy = 1; if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; rv = session_call_on_begin_frame(session, &cont_hd); if (nghttp2_is_fatal(rv)) { return rv; } } else { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; } break; case NGHTTP2_IB_READ_PAD_DATA: DEBUGF("recv: [IB_READ_PAD_DATA]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zu\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } /* Pad Length field is subject to flow control */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } /* Pad Length field is consumed immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (stream) { rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } } busy = 1; padlen = inbound_frame_compute_pad(iframe); if (padlen < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.data.padlen = (size_t)padlen; iframe->state = NGHTTP2_IB_READ_DATA; break; case NGHTTP2_IB_READ_DATA: stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (!stream) { busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } DEBUGF("recv: [IB_READ_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { ssize_t data_readlen; rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } padlen = (ssize_t)readlen - data_readlen; if (padlen > 0) { /* Padding is considered as "consumed" immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, (size_t)padlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } DEBUGF("recv: data_readlen=%zd\n", data_readlen); if (data_readlen > 0) { if (session_enforce_http_messaging(session)) { if (nghttp2_http_on_data_chunk(stream, (size_t)data_readlen) != 0) { if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Consume all data for connection immediately here */ rv = session_update_connection_consumed_size( session, (size_t)data_readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_DATA) { return (ssize_t)inlen; } } rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } } if (session->callbacks.on_data_chunk_recv_callback) { rv = session->callbacks.on_data_chunk_recv_callback( session, iframe->frame.hd.flags, iframe->frame.hd.stream_id, in - readlen, (size_t)data_readlen, session->user_data); if (rv == NGHTTP2_ERR_PAUSE) { return in - first; } if (nghttp2_is_fatal(rv)) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } } if (iframe->payloadleft) { break; } rv = session_process_data_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_DATA: DEBUGF("recv: [IB_IGN_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { /* Update connection-level flow control window for ignored DATA frame too */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Ignored DATA is considered as "consumed" immediately. */ rv = session_update_connection_consumed_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } } if (iframe->payloadleft) { break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_ALL: return (ssize_t)inlen; case NGHTTP2_IB_READ_EXTENSION_PAYLOAD: DEBUGF("recv: [IB_READ_EXTENSION_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { rv = session_call_on_extension_chunk_recv_callback( session, in - readlen, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } if (iframe->payloadleft > 0) { break; } rv = session_process_extension_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ALTSVC_PAYLOAD: DEBUGF("recv: [IB_READ_ALTSVC_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_altsvc_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ORIGIN_PAYLOAD: DEBUGF("recv: [IB_READ_ORIGIN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_origin_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; } if (!busy && in == last) { break; } busy = 0; } assert(in == last); return in - first; } int nghttp2_session_recv(nghttp2_session *session) { uint8_t buf[NGHTTP2_INBOUND_BUFFER_LENGTH]; while (1) { ssize_t readlen; readlen = session_recv(session, buf, sizeof(buf)); if (readlen > 0) { ssize_t proclen = nghttp2_session_mem_recv(session, buf, (size_t)readlen); if (proclen < 0) { return (int)proclen; } assert(proclen == readlen); } else if (readlen == 0 || readlen == NGHTTP2_ERR_WOULDBLOCK) { return 0; } else if (readlen == NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_EOF; } else if (readlen < 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } /* * Returns the number of active streams, which includes streams in * reserved state. */ static size_t session_get_num_active_streams(nghttp2_session *session) { return nghttp2_map_size(&session->streams) - session->num_closed_streams - session->num_idle_streams; } int nghttp2_session_want_read(nghttp2_session *session) { size_t num_active_streams; /* If this flag is set, we don't want to read. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } num_active_streams = session_get_num_active_streams(session); /* Unless termination GOAWAY is sent or received, we always want to read incoming frames. */ if (num_active_streams > 0) { return 1; } /* If there is no active streams and GOAWAY has been sent or received, we are done with this session. */ return (session->goaway_flags & (NGHTTP2_GOAWAY_SENT | NGHTTP2_GOAWAY_RECV)) == 0; } int nghttp2_session_want_write(nghttp2_session *session) { /* If these flag is set, we don't want to write any data. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } /* * Unless termination GOAWAY is sent or received, we want to write * frames if there is pending ones. If pending frame is request/push * response HEADERS and concurrent stream limit is reached, we don't * want to write them. */ return session->aob.item || nghttp2_outbound_queue_top(&session->ob_urgent) || nghttp2_outbound_queue_top(&session->ob_reg) || (!nghttp2_pq_empty(&session->root.obq) && session->remote_window_size > 0) || (nghttp2_outbound_queue_top(&session->ob_syn) && !session_is_outgoing_concurrent_streams_max(session)); } int nghttp2_session_add_ping(nghttp2_session *session, uint8_t flags, const uint8_t *opaque_data) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; if ((flags & NGHTTP2_FLAG_ACK) && session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_ping_init(&frame->ping, flags, opaque_data); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_ping_free(&frame->ping); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } return 0; } int nghttp2_session_add_goaway(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const uint8_t *opaque_data, size_t opaque_data_len, uint8_t aux_flags) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; uint8_t *opaque_data_copy = NULL; nghttp2_goaway_aux_data *aux_data; nghttp2_mem *mem; mem = &session->mem; if (nghttp2_session_is_my_stream_id(session, last_stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (opaque_data_len) { if (opaque_data_len + 8 > NGHTTP2_MAX_PAYLOADLEN) { return NGHTTP2_ERR_INVALID_ARGUMENT; } opaque_data_copy = nghttp2_mem_malloc(mem, opaque_data_len); if (opaque_data_copy == NULL) { return NGHTTP2_ERR_NOMEM; } memcpy(opaque_data_copy, opaque_data, opaque_data_len); } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { nghttp2_mem_free(mem, opaque_data_copy); return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; /* last_stream_id must not be increased from the value previously sent */ last_stream_id = nghttp2_min(last_stream_id, session->local_last_stream_id); nghttp2_frame_goaway_init(&frame->goaway, last_stream_id, error_code, opaque_data_copy, opaque_data_len); aux_data = &item->aux_data.goaway; aux_data->flags = aux_flags; rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_goaway_free(&frame->goaway, mem); nghttp2_mem_free(mem, item); return rv; } return 0; } int nghttp2_session_add_window_update(nghttp2_session *session, uint8_t flags, int32_t stream_id, int32_t window_size_increment) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_window_update_init(&frame->window_update, flags, stream_id, window_size_increment); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_window_update_free(&frame->window_update); nghttp2_mem_free(mem, item); return rv; } return 0; } static void session_append_inflight_settings(nghttp2_session *session, nghttp2_inflight_settings *settings) { nghttp2_inflight_settings **i; for (i = &session->inflight_settings_head; *i; i = &(*i)->next) ; *i = settings; } int nghttp2_session_add_settings(nghttp2_session *session, uint8_t flags, const nghttp2_settings_entry *iv, size_t niv) { nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_settings_entry *iv_copy; size_t i; int rv; nghttp2_mem *mem; nghttp2_inflight_settings *inflight_settings = NULL; mem = &session->mem; if (flags & NGHTTP2_FLAG_ACK) { if (niv != 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } } if (!nghttp2_iv_check(iv, niv)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { iv_copy = nghttp2_frame_iv_copy(iv, niv, mem); if (iv_copy == NULL) { nghttp2_mem_free(mem, item); return NGHTTP2_ERR_NOMEM; } } else { iv_copy = NULL; } if ((flags & NGHTTP2_FLAG_ACK) == 0) { rv = inflight_settings_new(&inflight_settings, iv, niv, mem); if (rv != 0) { assert(nghttp2_is_fatal(rv)); nghttp2_mem_free(mem, iv_copy); nghttp2_mem_free(mem, item); return rv; } } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_settings_init(&frame->settings, flags, iv_copy, niv); rv = nghttp2_session_add_item(session, item); if (rv != 0) { /* The only expected error is fatal one */ assert(nghttp2_is_fatal(rv)); inflight_settings_del(inflight_settings, mem); nghttp2_frame_settings_free(&frame->settings, mem); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } else { session_append_inflight_settings(session, inflight_settings); } /* Extract NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS and ENABLE_PUSH here. We use it to refuse the incoming stream and PUSH_PROMISE with RST_STREAM. */ for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS) { session->pending_local_max_concurrent_stream = iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_PUSH) { session->pending_enable_push = (uint8_t)iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL) { session->pending_enable_connect_protocol = (uint8_t)iv[i - 1].value; break; } } return 0; } int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, size_t datamax, nghttp2_frame *frame, nghttp2_data_aux_data *aux_data, nghttp2_stream *stream) { int rv; uint32_t data_flags; ssize_t payloadlen; ssize_t padded_payloadlen; nghttp2_buf *buf; size_t max_payloadlen; assert(bufs->head == bufs->cur); buf = &bufs->cur->buf; if (session->callbacks.read_length_callback) { payloadlen = session->callbacks.read_length_callback( session, frame->hd.type, stream->stream_id, session->remote_window_size, stream->remote_window_size, session->remote_settings.max_frame_size, session->user_data); DEBUGF("send: read_length_callback=%zd\n", payloadlen); payloadlen = nghttp2_session_enforce_flow_control_limits(session, stream, payloadlen); DEBUGF("send: read_length_callback after flow control=%zd\n", payloadlen); if (payloadlen <= 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } if ((size_t)payloadlen > nghttp2_buf_avail(buf)) { /* Resize the current buffer(s). The reason why we do +1 for buffer size is for possible padding field. */ rv = nghttp2_bufs_realloc(&session->aob.framebufs, (size_t)(NGHTTP2_FRAME_HDLEN + 1 + payloadlen)); if (rv != 0) { DEBUGF("send: realloc buffer failed rv=%d", rv); /* If reallocation failed, old buffers are still in tact. So use safe limit. */ payloadlen = (ssize_t)datamax; DEBUGF("send: use safe limit payloadlen=%zd", payloadlen); } else { assert(&session->aob.framebufs == bufs); buf = &bufs->cur->buf; } } datamax = (size_t)payloadlen; } /* Current max DATA length is less then buffer chunk size */ assert(nghttp2_buf_avail(buf) >= datamax); data_flags = NGHTTP2_DATA_FLAG_NONE; payloadlen = aux_data->data_prd.read_callback( session, frame->hd.stream_id, buf->pos, datamax, &data_flags, &aux_data->data_prd.source, session->user_data); if (payloadlen == NGHTTP2_ERR_DEFERRED || payloadlen == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE || payloadlen == NGHTTP2_ERR_PAUSE) { DEBUGF("send: DATA postponed due to %s\n", nghttp2_strerror((int)payloadlen)); return (int)payloadlen; } if (payloadlen < 0 || datamax < (size_t)payloadlen) { /* This is the error code when callback is failed. */ return NGHTTP2_ERR_CALLBACK_FAILURE; } buf->last = buf->pos + payloadlen; buf->pos -= NGHTTP2_FRAME_HDLEN; /* Clear flags, because this may contain previous flags of previous DATA */ frame->hd.flags = NGHTTP2_FLAG_NONE; if (data_flags & NGHTTP2_DATA_FLAG_EOF) { aux_data->eof = 1; /* If NGHTTP2_DATA_FLAG_NO_END_STREAM is set, don't set NGHTTP2_FLAG_END_STREAM */ if ((aux_data->flags & NGHTTP2_FLAG_END_STREAM) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM) == 0) { frame->hd.flags |= NGHTTP2_FLAG_END_STREAM; } } if (data_flags & NGHTTP2_DATA_FLAG_NO_COPY) { if (session->callbacks.send_data_callback == NULL) { DEBUGF("NGHTTP2_DATA_FLAG_NO_COPY requires send_data_callback set\n"); return NGHTTP2_ERR_CALLBACK_FAILURE; } aux_data->no_copy = 1; } frame->hd.length = (size_t)payloadlen; frame->data.padlen = 0; max_payloadlen = nghttp2_min(datamax, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } frame->data.padlen = (size_t)(padded_payloadlen - payloadlen); nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); rv = nghttp2_frame_add_pad(bufs, &frame->hd, frame->data.padlen, aux_data->no_copy); if (rv != 0) { return rv; } reschedule_stream(stream); if (frame->hd.length == 0 && (data_flags & NGHTTP2_DATA_FLAG_EOF) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM)) { /* DATA payload length is 0, and DATA frame does not bear END_STREAM. In this case, there is no point to send 0 length DATA frame. */ return NGHTTP2_ERR_CANCEL; } return 0; } void *nghttp2_session_get_stream_user_data(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { return stream->stream_user_data; } else { return NULL; } } int nghttp2_session_set_stream_user_data(nghttp2_session *session, int32_t stream_id, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame *frame; nghttp2_outbound_item *item; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { stream->stream_user_data = stream_user_data; return 0; } if (session->server || !nghttp2_session_is_my_stream_id(session, stream_id) || !nghttp2_outbound_queue_top(&session->ob_syn)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(frame->hd.type == NGHTTP2_HEADERS); if (frame->hd.stream_id > stream_id || (uint32_t)stream_id >= session->next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } for (item = session->ob_syn.head; item; item = item->qnext) { if (item->frame.hd.stream_id < stream_id) { continue; } if (item->frame.hd.stream_id > stream_id) { break; } item->aux_data.headers.stream_user_data = stream_user_data; return 0; } return NGHTTP2_ERR_INVALID_ARGUMENT; } int nghttp2_session_resume_data(nghttp2_session *session, int32_t stream_id) { int rv; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL || !nghttp2_stream_check_deferred_item(stream)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } rv = nghttp2_stream_resume_deferred_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } size_t nghttp2_session_get_outbound_queue_size(nghttp2_session *session) { return nghttp2_outbound_queue_size(&session->ob_urgent) + nghttp2_outbound_queue_size(&session->ob_reg) + nghttp2_outbound_queue_size(&session->ob_syn); /* TODO account for item attached to stream */ } int32_t nghttp2_session_get_stream_effective_recv_data_length(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->recv_window_size < 0 ? 0 : stream->recv_window_size; } int32_t nghttp2_session_get_stream_effective_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->local_window_size; } int32_t nghttp2_session_get_stream_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; int32_t size; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } size = stream->local_window_size - stream->recv_window_size; /* size could be negative if local endpoint reduced SETTINGS_INITIAL_WINDOW_SIZE */ if (size < 0) { return 0; } return size; } int32_t nghttp2_session_get_effective_recv_data_length(nghttp2_session *session) { return session->recv_window_size < 0 ? 0 : session->recv_window_size; } int32_t nghttp2_session_get_effective_local_window_size(nghttp2_session *session) { return session->local_window_size; } int32_t nghttp2_session_get_local_window_size(nghttp2_session *session) { return session->local_window_size - session->recv_window_size; } int32_t nghttp2_session_get_stream_remote_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } /* stream->remote_window_size can be negative when SETTINGS_INITIAL_WINDOW_SIZE is changed. */ return nghttp2_max(0, stream->remote_window_size); } int32_t nghttp2_session_get_remote_window_size(nghttp2_session *session) { return session->remote_window_size; } uint32_t nghttp2_session_get_remote_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->remote_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->remote_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->remote_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->remote_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->remote_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->remote_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->remote_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } uint32_t nghttp2_session_get_local_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->local_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->local_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->local_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->local_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->local_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->local_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->local_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } static int nghttp2_session_upgrade_internal(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame frame; nghttp2_settings_entry *iv; size_t niv; int rv; nghttp2_priority_spec pri_spec; nghttp2_mem *mem; mem = &session->mem; if ((!session->server && session->next_stream_id != 1) || (session->server && session->last_recv_stream_id >= 1)) { return NGHTTP2_ERR_PROTO; } if (settings_payloadlen % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) { return NGHTTP2_ERR_INVALID_ARGUMENT; } /* SETTINGS frame contains too many settings */ if (settings_payloadlen / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH > session->max_settings) { return NGHTTP2_ERR_TOO_MANY_SETTINGS; } rv = nghttp2_frame_unpack_settings_payload2(&iv, &niv, settings_payload, settings_payloadlen, mem); if (rv != 0) { return rv; } if (session->server) { nghttp2_frame_hd_init(&frame.hd, settings_payloadlen, NGHTTP2_SETTINGS, NGHTTP2_FLAG_NONE, 0); frame.settings.iv = iv; frame.settings.niv = niv; rv = nghttp2_session_on_settings_received(session, &frame, 1 /* No ACK */); } else { rv = nghttp2_submit_settings(session, NGHTTP2_FLAG_NONE, iv, niv); } nghttp2_mem_free(mem, iv); if (rv != 0) { return rv; } nghttp2_priority_spec_default_init(&pri_spec); stream = nghttp2_session_open_stream( session, 1, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_OPENING, session->server ? NULL : stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since this should be the first stream open. */ if (session->server) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); session->last_recv_stream_id = 1; session->last_proc_stream_id = 1; } else { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); session->last_sent_stream_id = 1; session->next_stream_id += 2; } return 0; } int nghttp2_session_upgrade(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); /* We have no information about request header fields when Upgrade was happened. So we don't know the request method here. If request method is HEAD, we have a trouble because we may have nonzero content-length header field in response headers, and we will going to check it against the actual DATA frames, but we may get mismatch because HEAD response body must be empty. Because of this reason, nghttp2_session_upgrade() was deprecated in favor of nghttp2_session_upgrade2(), which has |head_request| parameter to indicate that request method is HEAD or not. */ stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_UPGRADE_WORKAROUND; return 0; } int nghttp2_session_upgrade2(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, int head_request, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); if (head_request) { stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_HEAD; } return 0; } int nghttp2_session_get_stream_local_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_WR) != 0; } int nghttp2_session_get_stream_remote_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_RD) != 0; } int nghttp2_session_consume(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_connection(nghttp2_session *session, size_t size) { int rv; if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_stream(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_set_next_stream_id(nghttp2_session *session, int32_t next_stream_id) { if (next_stream_id <= 0 || session->next_stream_id > (uint32_t)next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->server) { if (next_stream_id % 2) { return NGHTTP2_ERR_INVALID_ARGUMENT; } } else if (next_stream_id % 2 == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } session->next_stream_id = (uint32_t)next_stream_id; return 0; } uint32_t nghttp2_session_get_next_stream_id(nghttp2_session *session) { return session->next_stream_id; } int32_t nghttp2_session_get_last_proc_stream_id(nghttp2_session *session) { return session->last_proc_stream_id; } nghttp2_stream *nghttp2_session_find_stream(nghttp2_session *session, int32_t stream_id) { if (stream_id == 0) { return &session->root; } return nghttp2_session_get_stream_raw(session, stream_id); } nghttp2_stream *nghttp2_session_get_root_stream(nghttp2_session *session) { return &session->root; } int nghttp2_session_check_server_session(nghttp2_session *session) { return session->server; } int nghttp2_session_change_stream_priority( nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { int rv; nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); rv = nghttp2_session_reprioritize_stream(session, stream, &pri_spec_copy); if (nghttp2_is_fatal(rv)) { return rv; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } int nghttp2_session_create_idle_stream(nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id || !session_detect_idle_stream(session, stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); stream = nghttp2_session_open_stream(session, stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec_copy, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } size_t nghttp2_session_get_hd_inflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_inflate_get_dynamic_table_size(&session->hd_inflater); } size_t nghttp2_session_get_hd_deflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_deflate_get_dynamic_table_size(&session->hd_deflater); } void nghttp2_session_set_user_data(nghttp2_session *session, void *user_data) { session->user_data = user_data; }
./CrossVul/dataset_final_sorted/CWE-707/c/good_3937_0
crossvul-cpp_data_bad_3936_6
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_session.h" #include <string.h> #include <stddef.h> #include <stdio.h> #include <assert.h> #include <stdarg.h> #include "nghttp2_helper.h" #include "nghttp2_net.h" #include "nghttp2_priority_spec.h" #include "nghttp2_option.h" #include "nghttp2_http.h" #include "nghttp2_pq.h" #include "nghttp2_debug.h" /* * Returns non-zero if the number of outgoing opened streams is larger * than or equal to * remote_settings.max_concurrent_streams. */ static int session_is_outgoing_concurrent_streams_max(nghttp2_session *session) { return session->remote_settings.max_concurrent_streams <= session->num_outgoing_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * local_settings.max_concurrent_streams. */ static int session_is_incoming_concurrent_streams_max(nghttp2_session *session) { return session->local_settings.max_concurrent_streams <= session->num_incoming_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * session->pending_local_max_concurrent_stream. */ static int session_is_incoming_concurrent_streams_pending_max(nghttp2_session *session) { return session->pending_local_max_concurrent_stream <= session->num_incoming_streams; } /* * Returns non-zero if |lib_error| is non-fatal error. */ static int is_non_fatal(int lib_error_code) { return lib_error_code < 0 && lib_error_code > NGHTTP2_ERR_FATAL; } int nghttp2_is_fatal(int lib_error_code) { return lib_error_code < NGHTTP2_ERR_FATAL; } static int session_enforce_http_messaging(nghttp2_session *session) { return (session->opt_flags & NGHTTP2_OPTMASK_NO_HTTP_MESSAGING) == 0; } /* * Returns nonzero if |frame| is trailer headers. */ static int session_trailer_headers(nghttp2_session *session, nghttp2_stream *stream, nghttp2_frame *frame) { if (!stream || frame->hd.type != NGHTTP2_HEADERS) { return 0; } if (session->server) { return frame->headers.cat == NGHTTP2_HCAT_HEADERS; } return frame->headers.cat == NGHTTP2_HCAT_HEADERS && (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) == 0; } /* Returns nonzero if the |stream| is in reserved(remote) state */ static int state_reserved_remote(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && !nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* Returns nonzero if the |stream| is in reserved(local) state */ static int state_reserved_local(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* * Checks whether received stream_id is valid. This function returns * 1 if it succeeds, or 0. */ static int session_is_new_peer_stream_id(nghttp2_session *session, int32_t stream_id) { return stream_id != 0 && !nghttp2_session_is_my_stream_id(session, stream_id) && session->last_recv_stream_id < stream_id; } static int session_detect_idle_stream(nghttp2_session *session, int32_t stream_id) { /* Assume that stream object with stream_id does not exist */ if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (session->last_sent_stream_id < stream_id) { return 1; } return 0; } if (session_is_new_peer_stream_id(session, stream_id)) { return 1; } return 0; } static int check_ext_type_set(const uint8_t *ext_types, uint8_t type) { return (ext_types[type / 8] & (1 << (type & 0x7))) > 0; } static int session_call_error_callback(nghttp2_session *session, int lib_error_code, const char *fmt, ...) { size_t bufsize; va_list ap; char *buf; int rv; nghttp2_mem *mem; if (!session->callbacks.error_callback && !session->callbacks.error_callback2) { return 0; } mem = &session->mem; va_start(ap, fmt); rv = vsnprintf(NULL, 0, fmt, ap); va_end(ap); if (rv < 0) { return NGHTTP2_ERR_NOMEM; } bufsize = (size_t)(rv + 1); buf = nghttp2_mem_malloc(mem, bufsize); if (buf == NULL) { return NGHTTP2_ERR_NOMEM; } va_start(ap, fmt); rv = vsnprintf(buf, bufsize, fmt, ap); va_end(ap); if (rv < 0) { nghttp2_mem_free(mem, buf); /* vsnprintf may return error because of various things we can imagine, but typically we don't want to drop session just for debug callback. */ DEBUGF("error_callback: vsnprintf failed. The template was %s\n", fmt); return 0; } if (session->callbacks.error_callback2) { rv = session->callbacks.error_callback2(session, lib_error_code, buf, (size_t)rv, session->user_data); } else { rv = session->callbacks.error_callback(session, buf, (size_t)rv, session->user_data); } nghttp2_mem_free(mem, buf); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_terminate_session(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const char *reason) { int rv; const uint8_t *debug_data; size_t debug_datalen; if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return 0; } /* Ignore all incoming frames because we are going to tear down the session. */ session->iframe.state = NGHTTP2_IB_IGN_ALL; if (reason == NULL) { debug_data = NULL; debug_datalen = 0; } else { debug_data = (const uint8_t *)reason; debug_datalen = strlen(reason); } rv = nghttp2_session_add_goaway(session, last_stream_id, error_code, debug_data, debug_datalen, NGHTTP2_GOAWAY_AUX_TERM_ON_SEND); if (rv != 0) { return rv; } session->goaway_flags |= NGHTTP2_GOAWAY_TERM_ON_SEND; return 0; } int nghttp2_session_terminate_session(nghttp2_session *session, uint32_t error_code) { return session_terminate_session(session, session->last_proc_stream_id, error_code, NULL); } int nghttp2_session_terminate_session2(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code) { return session_terminate_session(session, last_stream_id, error_code, NULL); } int nghttp2_session_terminate_session_with_reason(nghttp2_session *session, uint32_t error_code, const char *reason) { return session_terminate_session(session, session->last_proc_stream_id, error_code, reason); } int nghttp2_session_is_my_stream_id(nghttp2_session *session, int32_t stream_id) { int rem; if (stream_id == 0) { return 0; } rem = stream_id & 0x1; if (session->server) { return rem == 0; } return rem == 1; } nghttp2_stream *nghttp2_session_get_stream(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); if (stream == NULL || (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) || stream->state == NGHTTP2_STREAM_IDLE) { return NULL; } return stream; } nghttp2_stream *nghttp2_session_get_stream_raw(nghttp2_session *session, int32_t stream_id) { return (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); } static void session_inbound_frame_reset(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_mem *mem = &session->mem; /* A bit risky code, since if this function is called from nghttp2_session_new(), we rely on the fact that iframe->frame.hd.type is 0, so that no free is performed. */ switch (iframe->frame.hd.type) { case NGHTTP2_DATA: break; case NGHTTP2_HEADERS: nghttp2_frame_headers_free(&iframe->frame.headers, mem); break; case NGHTTP2_PRIORITY: nghttp2_frame_priority_free(&iframe->frame.priority); break; case NGHTTP2_RST_STREAM: nghttp2_frame_rst_stream_free(&iframe->frame.rst_stream); break; case NGHTTP2_SETTINGS: nghttp2_frame_settings_free(&iframe->frame.settings, mem); nghttp2_mem_free(mem, iframe->iv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; break; case NGHTTP2_PUSH_PROMISE: nghttp2_frame_push_promise_free(&iframe->frame.push_promise, mem); break; case NGHTTP2_PING: nghttp2_frame_ping_free(&iframe->frame.ping); break; case NGHTTP2_GOAWAY: nghttp2_frame_goaway_free(&iframe->frame.goaway, mem); break; case NGHTTP2_WINDOW_UPDATE: nghttp2_frame_window_update_free(&iframe->frame.window_update); break; default: /* extension frame */ if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { nghttp2_frame_extension_free(&iframe->frame.ext); } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { break; } nghttp2_frame_altsvc_free(&iframe->frame.ext, mem); break; case NGHTTP2_ORIGIN: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN) == 0) { break; } nghttp2_frame_origin_free(&iframe->frame.ext, mem); break; } } break; } memset(&iframe->frame, 0, sizeof(nghttp2_frame)); memset(&iframe->ext_frame_payload, 0, sizeof(nghttp2_ext_frame_payload)); iframe->state = NGHTTP2_IB_READ_HEAD; nghttp2_buf_wrap_init(&iframe->sbuf, iframe->raw_sbuf, sizeof(iframe->raw_sbuf)); iframe->sbuf.mark += NGHTTP2_FRAME_HDLEN; nghttp2_buf_free(&iframe->lbuf, mem); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); iframe->raw_lbuf = NULL; iframe->payloadleft = 0; iframe->padlen = 0; } static void init_settings(nghttp2_settings_storage *settings) { settings->header_table_size = NGHTTP2_HD_DEFAULT_MAX_BUFFER_SIZE; settings->enable_push = 1; settings->max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; settings->initial_window_size = NGHTTP2_INITIAL_WINDOW_SIZE; settings->max_frame_size = NGHTTP2_MAX_FRAME_SIZE_MIN; settings->max_header_list_size = UINT32_MAX; } static void active_outbound_item_reset(nghttp2_active_outbound_item *aob, nghttp2_mem *mem) { DEBUGF("send: reset nghttp2_active_outbound_item\n"); DEBUGF("send: aob->item = %p\n", aob->item); nghttp2_outbound_item_free(aob->item, mem); nghttp2_mem_free(mem, aob->item); aob->item = NULL; nghttp2_bufs_reset(&aob->framebufs); aob->state = NGHTTP2_OB_POP_ITEM; } int nghttp2_enable_strict_preface = 1; static int session_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, int server, const nghttp2_option *option, nghttp2_mem *mem) { int rv; size_t nbuffer; size_t max_deflate_dynamic_table_size = NGHTTP2_HD_DEFAULT_MAX_DEFLATE_BUFFER_SIZE; if (mem == NULL) { mem = nghttp2_mem_default(); } *session_ptr = nghttp2_mem_calloc(mem, 1, sizeof(nghttp2_session)); if (*session_ptr == NULL) { rv = NGHTTP2_ERR_NOMEM; goto fail_session; } (*session_ptr)->mem = *mem; mem = &(*session_ptr)->mem; /* next_stream_id is initialized in either nghttp2_session_client_new2 or nghttp2_session_server_new2 */ nghttp2_stream_init(&(*session_ptr)->root, 0, NGHTTP2_STREAM_FLAG_NONE, NGHTTP2_STREAM_IDLE, NGHTTP2_DEFAULT_WEIGHT, 0, 0, NULL, mem); (*session_ptr)->remote_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->recv_window_size = 0; (*session_ptr)->consumed_size = 0; (*session_ptr)->recv_reduction = 0; (*session_ptr)->local_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->goaway_flags = NGHTTP2_GOAWAY_NONE; (*session_ptr)->local_last_stream_id = (1u << 31) - 1; (*session_ptr)->remote_last_stream_id = (1u << 31) - 1; (*session_ptr)->pending_local_max_concurrent_stream = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; (*session_ptr)->pending_enable_push = 1; if (server) { (*session_ptr)->server = 1; } init_settings(&(*session_ptr)->remote_settings); init_settings(&(*session_ptr)->local_settings); (*session_ptr)->max_incoming_reserved_streams = NGHTTP2_MAX_INCOMING_RESERVED_STREAMS; /* Limit max outgoing concurrent streams to sensible value */ (*session_ptr)->remote_settings.max_concurrent_streams = 100; (*session_ptr)->max_send_header_block_length = NGHTTP2_MAX_HEADERSLEN; (*session_ptr)->max_outbound_ack = NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM; if (option) { if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE) && option->no_auto_window_update) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE; } if (option->opt_set_mask & NGHTTP2_OPT_PEER_MAX_CONCURRENT_STREAMS) { (*session_ptr)->remote_settings.max_concurrent_streams = option->peer_max_concurrent_streams; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_RESERVED_REMOTE_STREAMS) { (*session_ptr)->max_incoming_reserved_streams = option->max_reserved_remote_streams; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_RECV_CLIENT_MAGIC) && option->no_recv_client_magic) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_HTTP_MESSAGING) && option->no_http_messaging) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_HTTP_MESSAGING; } if (option->opt_set_mask & NGHTTP2_OPT_USER_RECV_EXT_TYPES) { memcpy((*session_ptr)->user_recv_ext_types, option->user_recv_ext_types, sizeof((*session_ptr)->user_recv_ext_types)); } if (option->opt_set_mask & NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES) { (*session_ptr)->builtin_recv_ext_types = option->builtin_recv_ext_types; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_PING_ACK) && option->no_auto_ping_ack) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_PING_ACK; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_SEND_HEADER_BLOCK_LENGTH) { (*session_ptr)->max_send_header_block_length = option->max_send_header_block_length; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_DEFLATE_DYNAMIC_TABLE_SIZE) { max_deflate_dynamic_table_size = option->max_deflate_dynamic_table_size; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_CLOSED_STREAMS) && option->no_closed_streams) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_CLOSED_STREAMS; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_OUTBOUND_ACK) { (*session_ptr)->max_outbound_ack = option->max_outbound_ack; } } rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater, max_deflate_dynamic_table_size, mem); if (rv != 0) { goto fail_hd_deflater; } rv = nghttp2_hd_inflate_init(&(*session_ptr)->hd_inflater, mem); if (rv != 0) { goto fail_hd_inflater; } rv = nghttp2_map_init(&(*session_ptr)->streams, mem); if (rv != 0) { goto fail_map; } nbuffer = ((*session_ptr)->max_send_header_block_length + NGHTTP2_FRAMEBUF_CHUNKLEN - 1) / NGHTTP2_FRAMEBUF_CHUNKLEN; if (nbuffer == 0) { nbuffer = 1; } /* 1 for Pad Field. */ rv = nghttp2_bufs_init3(&(*session_ptr)->aob.framebufs, NGHTTP2_FRAMEBUF_CHUNKLEN, nbuffer, 1, NGHTTP2_FRAME_HDLEN + 1, mem); if (rv != 0) { goto fail_aob_framebuf; } active_outbound_item_reset(&(*session_ptr)->aob, mem); (*session_ptr)->callbacks = *callbacks; (*session_ptr)->user_data = user_data; session_inbound_frame_reset(*session_ptr); if (nghttp2_enable_strict_preface) { nghttp2_inbound_frame *iframe = &(*session_ptr)->iframe; if (server && ((*session_ptr)->opt_flags & NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC) == 0) { iframe->state = NGHTTP2_IB_READ_CLIENT_MAGIC; iframe->payloadleft = NGHTTP2_CLIENT_MAGIC_LEN; } else { iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } if (!server) { (*session_ptr)->aob.state = NGHTTP2_OB_SEND_CLIENT_MAGIC; nghttp2_bufs_add(&(*session_ptr)->aob.framebufs, NGHTTP2_CLIENT_MAGIC, NGHTTP2_CLIENT_MAGIC_LEN); } } return 0; fail_aob_framebuf: nghttp2_map_free(&(*session_ptr)->streams); fail_map: nghttp2_hd_inflate_free(&(*session_ptr)->hd_inflater); fail_hd_inflater: nghttp2_hd_deflate_free(&(*session_ptr)->hd_deflater); fail_hd_deflater: nghttp2_mem_free(mem, *session_ptr); fail_session: return rv; } int nghttp2_session_client_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_client_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_client_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 0, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 1; *session_ptr = session; return 0; } int nghttp2_session_server_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_server_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_server_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 1, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 2; *session_ptr = session; return 0; } static int free_streams(nghttp2_map_entry *entry, void *ptr) { nghttp2_session *session; nghttp2_stream *stream; nghttp2_outbound_item *item; nghttp2_mem *mem; session = (nghttp2_session *)ptr; mem = &session->mem; stream = (nghttp2_stream *)entry; item = stream->item; if (item && !item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } static void ob_q_free(nghttp2_outbound_queue *q, nghttp2_mem *mem) { nghttp2_outbound_item *item, *next; for (item = q->head; item;) { next = item->qnext; nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); item = next; } } static int inflight_settings_new(nghttp2_inflight_settings **settings_ptr, const nghttp2_settings_entry *iv, size_t niv, nghttp2_mem *mem) { *settings_ptr = nghttp2_mem_malloc(mem, sizeof(nghttp2_inflight_settings)); if (!*settings_ptr) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { (*settings_ptr)->iv = nghttp2_frame_iv_copy(iv, niv, mem); if (!(*settings_ptr)->iv) { nghttp2_mem_free(mem, *settings_ptr); return NGHTTP2_ERR_NOMEM; } } else { (*settings_ptr)->iv = NULL; } (*settings_ptr)->niv = niv; (*settings_ptr)->next = NULL; return 0; } static void inflight_settings_del(nghttp2_inflight_settings *settings, nghttp2_mem *mem) { if (!settings) { return; } nghttp2_mem_free(mem, settings->iv); nghttp2_mem_free(mem, settings); } void nghttp2_session_del(nghttp2_session *session) { nghttp2_mem *mem; nghttp2_inflight_settings *settings; if (session == NULL) { return; } mem = &session->mem; for (settings = session->inflight_settings_head; settings;) { nghttp2_inflight_settings *next = settings->next; inflight_settings_del(settings, mem); settings = next; } nghttp2_stream_free(&session->root); /* Have to free streams first, so that we can check stream->item->queued */ nghttp2_map_each_free(&session->streams, free_streams, session); nghttp2_map_free(&session->streams); ob_q_free(&session->ob_urgent, mem); ob_q_free(&session->ob_reg, mem); ob_q_free(&session->ob_syn, mem); active_outbound_item_reset(&session->aob, mem); session_inbound_frame_reset(session); nghttp2_hd_deflate_free(&session->hd_deflater); nghttp2_hd_inflate_free(&session->hd_inflater); nghttp2_bufs_free(&session->aob.framebufs); nghttp2_mem_free(mem, session); } int nghttp2_session_reprioritize_stream( nghttp2_session *session, nghttp2_stream *stream, const nghttp2_priority_spec *pri_spec_in) { int rv; nghttp2_stream *dep_stream = NULL; nghttp2_priority_spec pri_spec_default; const nghttp2_priority_spec *pri_spec = pri_spec_in; assert(pri_spec->stream_id != stream->stream_id); if (!nghttp2_stream_in_dep_tree(stream)) { return 0; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { return NGHTTP2_ERR_NOMEM; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } else if (nghttp2_stream_dep_find_ancestor(dep_stream, stream)) { DEBUGF("stream: cycle detected, dep_stream(%p)=%d stream(%p)=%d\n", dep_stream, dep_stream->stream_id, stream, stream->stream_id); nghttp2_stream_dep_remove_subtree(dep_stream); rv = nghttp2_stream_dep_add_subtree(stream->dep_prev, dep_stream); if (rv != 0) { return rv; } } assert(dep_stream); if (dep_stream == stream->dep_prev && !pri_spec->exclusive) { /* This is minor optimization when just weight is changed. */ nghttp2_stream_change_weight(stream, pri_spec->weight); return 0; } nghttp2_stream_dep_remove_subtree(stream); /* We have to update weight after removing stream from tree */ stream->weight = pri_spec->weight; if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert_subtree(dep_stream, stream); } else { rv = nghttp2_stream_dep_add_subtree(dep_stream, stream); } if (rv != 0) { return rv; } return 0; } int nghttp2_session_add_item(nghttp2_session *session, nghttp2_outbound_item *item) { /* TODO Return error if stream is not found for the frame requiring stream presence. */ int rv = 0; nghttp2_stream *stream; nghttp2_frame *frame; frame = &item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); switch (frame->hd.type) { case NGHTTP2_DATA: if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->item) { return NGHTTP2_ERR_DATA_EXIST; } rv = nghttp2_stream_attach_item(stream, item); if (rv != 0) { return rv; } return 0; case NGHTTP2_HEADERS: /* We push request HEADERS and push response HEADERS to dedicated queue because their transmission is affected by SETTINGS_MAX_CONCURRENT_STREAMS */ /* TODO If 2 HEADERS are submitted for reserved stream, then both of them are queued into ob_syn, which is not desirable. */ if (frame->headers.cat == NGHTTP2_HCAT_REQUEST || (stream && stream->state == NGHTTP2_STREAM_RESERVED)) { nghttp2_outbound_queue_push(&session->ob_syn, item); item->queued = 1; return 0; ; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_SETTINGS: case NGHTTP2_PING: nghttp2_outbound_queue_push(&session->ob_urgent, item); item->queued = 1; return 0; case NGHTTP2_RST_STREAM: if (stream) { stream->state = NGHTTP2_STREAM_CLOSING; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_PUSH_PROMISE: { nghttp2_headers_aux_data *aux_data; nghttp2_priority_spec pri_spec; aux_data = &item->aux_data.headers; if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); if (!nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, aux_data->stream_user_data)) { return NGHTTP2_ERR_NOMEM; } /* We don't have to call nghttp2_session_adjust_closed_stream() here, since stream->stream_id is local stream_id, and it does not affect closed stream count. */ nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } case NGHTTP2_WINDOW_UPDATE: if (stream) { stream->window_update_queued = 1; } else if (frame->hd.stream_id == 0) { session->window_update_queued = 1; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; default: nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } } int nghttp2_session_add_rst_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_stream *stream; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (stream && stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } /* Cancel pending request HEADERS in ob_syn if this RST_STREAM refers to that stream. */ if (!session->server && nghttp2_session_is_my_stream_id(session, stream_id) && nghttp2_outbound_queue_top(&session->ob_syn)) { nghttp2_headers_aux_data *aux_data; nghttp2_frame *headers_frame; headers_frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(headers_frame->hd.type == NGHTTP2_HEADERS); if (headers_frame->hd.stream_id <= stream_id && (uint32_t)stream_id < session->next_stream_id) { for (item = session->ob_syn.head; item; item = item->qnext) { aux_data = &item->aux_data.headers; if (item->frame.hd.stream_id < stream_id) { continue; } /* stream_id in ob_syn queue must be strictly increasing. If we found larger ID, then we can break here. */ if (item->frame.hd.stream_id > stream_id || aux_data->canceled) { break; } aux_data->error_code = error_code; aux_data->canceled = 1; return 0; } } } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_rst_stream_init(&frame->rst_stream, stream_id, error_code); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_rst_stream_free(&frame->rst_stream); nghttp2_mem_free(mem, item); return rv; } return 0; } nghttp2_stream *nghttp2_session_open_stream(nghttp2_session *session, int32_t stream_id, uint8_t flags, nghttp2_priority_spec *pri_spec_in, nghttp2_stream_state initial_state, void *stream_user_data) { int rv; nghttp2_stream *stream; nghttp2_stream *dep_stream = NULL; int stream_alloc = 0; nghttp2_priority_spec pri_spec_default; nghttp2_priority_spec *pri_spec = pri_spec_in; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { assert(stream->state == NGHTTP2_STREAM_IDLE); assert(nghttp2_stream_in_dep_tree(stream)); nghttp2_session_detach_idle_stream(session, stream); rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return NULL; } } else { stream = nghttp2_mem_malloc(mem, sizeof(nghttp2_stream)); if (stream == NULL) { return NULL; } stream_alloc = 1; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { /* Depends on idle stream, which does not exist in memory. Assign default priority for it. */ nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { if (stream_alloc) { nghttp2_mem_free(mem, stream); } return NULL; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { /* If dep_stream is not part of dependency tree, stream will get default priority. This handles the case when pri_spec->stream_id == stream_id. This happens because we don't check pri_spec->stream_id against new stream ID in nghttp2_submit_request. This also handles the case when idle stream created by PRIORITY frame was opened. Somehow we first remove the idle stream from dependency tree. This is done to simplify code base, but ideally we should retain old dependency. But I'm not sure this adds values. */ nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (initial_state == NGHTTP2_STREAM_RESERVED) { flags |= NGHTTP2_STREAM_FLAG_PUSH; } if (stream_alloc) { nghttp2_stream_init(stream, stream_id, flags, initial_state, pri_spec->weight, (int32_t)session->remote_settings.initial_window_size, (int32_t)session->local_settings.initial_window_size, stream_user_data, mem); rv = nghttp2_map_insert(&session->streams, &stream->map_entry); if (rv != 0) { nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return NULL; } } else { stream->flags = flags; stream->state = initial_state; stream->weight = pri_spec->weight; stream->stream_user_data = stream_user_data; } switch (initial_state) { case NGHTTP2_STREAM_RESERVED: if (nghttp2_session_is_my_stream_id(session, stream_id)) { /* reserved (local) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } else { /* reserved (remote) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); ++session->num_incoming_reserved_streams; } /* Reserved stream does not count in the concurrent streams limit. That is one of the DOS vector. */ break; case NGHTTP2_STREAM_IDLE: /* Idle stream does not count toward the concurrent streams limit. This is used as anchor node in dependency tree. */ nghttp2_session_keep_idle_stream(session, stream); break; default: if (nghttp2_session_is_my_stream_id(session, stream_id)) { ++session->num_outgoing_streams; } else { ++session->num_incoming_streams; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } assert(dep_stream); if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert(dep_stream, stream); if (rv != 0) { return NULL; } } else { nghttp2_stream_dep_add(dep_stream, stream); } return stream; } int nghttp2_session_close_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_stream *stream; nghttp2_mem *mem; int is_my_stream_id; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } DEBUGF("stream: stream(%p)=%d close\n", stream, stream->stream_id); if (stream->item) { nghttp2_outbound_item *item; item = stream->item; rv = nghttp2_stream_detach_item(stream); if (rv != 0) { return rv; } /* If item is queued, it will be deleted when it is popped (nghttp2_session_prep_frame() will fail). If session->aob.item points to this item, let active_outbound_item_reset() free the item. */ if (!item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } } /* We call on_stream_close_callback even if stream->state is NGHTTP2_STREAM_INITIAL. This will happen while sending request HEADERS, a local endpoint receives RST_STREAM for that stream. It may be PROTOCOL_ERROR, but without notifying stream closure will hang the stream in a local endpoint. */ if (session->callbacks.on_stream_close_callback) { if (session->callbacks.on_stream_close_callback( session, stream_id, error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } is_my_stream_id = nghttp2_session_is_my_stream_id(session, stream_id); /* pushed streams which is not opened yet is not counted toward max concurrent limits */ if ((stream->flags & NGHTTP2_STREAM_FLAG_PUSH)) { if (!is_my_stream_id) { --session->num_incoming_reserved_streams; } } else { if (is_my_stream_id) { --session->num_outgoing_streams; } else { --session->num_incoming_streams; } } /* Closes both directions just in case they are not closed yet */ stream->flags |= NGHTTP2_STREAM_FLAG_CLOSED; if ((session->opt_flags & NGHTTP2_OPTMASK_NO_CLOSED_STREAMS) == 0 && session->server && !is_my_stream_id && nghttp2_stream_in_dep_tree(stream)) { /* On server side, retain stream at most MAX_CONCURRENT_STREAMS combined with the current active incoming streams to make dependency tree work better. */ nghttp2_session_keep_closed_stream(session, stream); } else { rv = nghttp2_session_destroy_stream(session, stream); if (rv != 0) { return rv; } } return 0; } int nghttp2_session_destroy_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_mem *mem; int rv; DEBUGF("stream: destroy closed stream(%p)=%d\n", stream, stream->stream_id); mem = &session->mem; if (nghttp2_stream_in_dep_tree(stream)) { rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return rv; } } nghttp2_map_remove(&session->streams, stream->stream_id); nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } void nghttp2_session_keep_closed_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep closed stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->closed_stream_tail) { session->closed_stream_tail->closed_next = stream; stream->closed_prev = session->closed_stream_tail; } else { session->closed_stream_head = stream; } session->closed_stream_tail = stream; ++session->num_closed_streams; } void nghttp2_session_keep_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->idle_stream_tail) { session->idle_stream_tail->closed_next = stream; stream->closed_prev = session->idle_stream_tail; } else { session->idle_stream_head = stream; } session->idle_stream_tail = stream; ++session->num_idle_streams; } void nghttp2_session_detach_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_stream *prev_stream, *next_stream; DEBUGF("stream: detach idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); prev_stream = stream->closed_prev; next_stream = stream->closed_next; if (prev_stream) { prev_stream->closed_next = next_stream; } else { session->idle_stream_head = next_stream; } if (next_stream) { next_stream->closed_prev = prev_stream; } else { session->idle_stream_tail = prev_stream; } stream->closed_prev = NULL; stream->closed_next = NULL; --session->num_idle_streams; } int nghttp2_session_adjust_closed_stream(nghttp2_session *session) { size_t num_stream_max; int rv; if (session->local_settings.max_concurrent_streams == NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS) { num_stream_max = session->pending_local_max_concurrent_stream; } else { num_stream_max = session->local_settings.max_concurrent_streams; } DEBUGF("stream: adjusting kept closed streams num_closed_streams=%zu, " "num_incoming_streams=%zu, max_concurrent_streams=%zu\n", session->num_closed_streams, session->num_incoming_streams, num_stream_max); while (session->num_closed_streams > 0 && session->num_closed_streams + session->num_incoming_streams > num_stream_max) { nghttp2_stream *head_stream; nghttp2_stream *next; head_stream = session->closed_stream_head; assert(head_stream); next = head_stream->closed_next; rv = nghttp2_session_destroy_stream(session, head_stream); if (rv != 0) { return rv; } /* head_stream is now freed */ session->closed_stream_head = next; if (session->closed_stream_head) { session->closed_stream_head->closed_prev = NULL; } else { session->closed_stream_tail = NULL; } --session->num_closed_streams; } return 0; } int nghttp2_session_adjust_idle_stream(nghttp2_session *session) { size_t max; int rv; /* Make minimum number of idle streams 16, and maximum 100, which are arbitrary chosen numbers. */ max = nghttp2_min( 100, nghttp2_max( 16, nghttp2_min(session->local_settings.max_concurrent_streams, session->pending_local_max_concurrent_stream))); DEBUGF("stream: adjusting kept idle streams num_idle_streams=%zu, max=%zu\n", session->num_idle_streams, max); while (session->num_idle_streams > max) { nghttp2_stream *head; nghttp2_stream *next; head = session->idle_stream_head; assert(head); next = head->closed_next; rv = nghttp2_session_destroy_stream(session, head); if (rv != 0) { return rv; } /* head is now destroyed */ session->idle_stream_head = next; if (session->idle_stream_head) { session->idle_stream_head->closed_prev = NULL; } else { session->idle_stream_tail = NULL; } --session->num_idle_streams; } return 0; } /* * Closes stream with stream ID |stream_id| if both transmission and * reception of the stream were disallowed. The |error_code| indicates * the reason of the closure. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_INVALID_ARGUMENT * The stream is not found. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ int nghttp2_session_close_stream_if_shut_rdwr(nghttp2_session *session, nghttp2_stream *stream) { if ((stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR) { return nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_NO_ERROR); } return 0; } /* * Returns nonzero if local endpoint allows reception of new stream * from remote. */ static int session_allow_incoming_new_stream(nghttp2_session *session) { return (session->goaway_flags & (NGHTTP2_GOAWAY_TERM_ON_SEND | NGHTTP2_GOAWAY_SENT)) == 0; } /* * This function returns nonzero if session is closing. */ static int session_is_closing(nghttp2_session *session) { return (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) != 0 || (nghttp2_session_want_read(session) == 0 && nghttp2_session_want_write(session) == 0); } /* * Check that we can send a frame to the |stream|. This function * returns 0 if we can send a frame to the |frame|, or one of the * following negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_for_stream_send(nghttp2_session *session, nghttp2_stream *stream) { if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream->shut_flags & NGHTTP2_SHUT_WR) { return NGHTTP2_ERR_STREAM_SHUT_WR; } return 0; } int nghttp2_session_check_request_allowed(nghttp2_session *session) { return !session->server && session->next_stream_id <= INT32_MAX && (session->goaway_flags & NGHTTP2_GOAWAY_RECV) == 0 && !session_is_closing(session); } /* * This function checks request HEADERS frame, which opens stream, can * be sent at this time. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because of GOAWAY: session is * going down or received last_stream_id is strictly less than * frame->hd.stream_id. * NGHTTP2_ERR_STREAM_CLOSING * request HEADERS was canceled by RST_STREAM while it is in queue. */ static int session_predicate_request_headers_send(nghttp2_session *session, nghttp2_outbound_item *item) { if (item->aux_data.headers.canceled) { return NGHTTP2_ERR_STREAM_CLOSING; } /* If we are terminating session (NGHTTP2_GOAWAY_TERM_ON_SEND), GOAWAY was received from peer, or session is about to close, new request is not allowed. */ if ((session->goaway_flags & NGHTTP2_GOAWAY_RECV) || session_is_closing(session)) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is the first frame from the * server, with the |stream| can be sent at this time. The |stream| * can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_INVALID_STREAM_ID * The stream ID is invalid. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_PROTO * Client side attempted to send response. */ static int session_predicate_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return NGHTTP2_ERR_INVALID_STREAM_ID; } switch (stream->state) { case NGHTTP2_STREAM_OPENING: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks HEADERS for reserved stream can be sent. The * |stream| must be reserved state and the |session| is server side. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_PROTO * The stream is not reserved state * NGHTTP2_ERR_STREAM_CLOSED * RST_STREAM was queued for this stream. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * Client side attempted to send push response. */ static int session_predicate_push_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; /* TODO Should disallow HEADERS if GOAWAY has already been issued? */ rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (stream->state != NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_PROTO; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is neither stream-opening nor * first response header, with the |stream| can be sent at this time. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); switch (stream->state) { case NGHTTP2_STREAM_OPENED: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return 0; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks PUSH_PROMISE frame |frame| with the |stream| * can be sent at this time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * The client side attempts to send PUSH_PROMISE, or the server * sends PUSH_PROMISE for the stream not initiated by the client. * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_PUSH_DISABLED * The remote peer disabled reception of PUSH_PROMISE. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_push_promise_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; if (!session->server) { return NGHTTP2_ERR_PROTO; } rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (session->remote_settings.enable_push == 0) { return NGHTTP2_ERR_PUSH_DISABLED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks WINDOW_UPDATE with the stream ID |stream_id| * can be sent at this time. Note that END_STREAM flag of the previous * frame does not affect the transmission of the WINDOW_UPDATE frame. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_window_update_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { /* Connection-level window update */ return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (state_reserved_local(session, stream)) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } static int session_predicate_altsvc_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return 0; } static int session_predicate_origin_send(nghttp2_session *session) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return 0; } /* Take into account settings max frame size and both connection-level flow control here */ static ssize_t nghttp2_session_enforce_flow_control_limits(nghttp2_session *session, nghttp2_stream *stream, ssize_t requested_window_size) { DEBUGF("send: remote windowsize connection=%d, remote maxframsize=%u, " "stream(id %d)=%d\n", session->remote_window_size, session->remote_settings.max_frame_size, stream->stream_id, stream->remote_window_size); return nghttp2_min(nghttp2_min(nghttp2_min(requested_window_size, stream->remote_window_size), session->remote_window_size), (int32_t)session->remote_settings.max_frame_size); } /* * Returns the maximum length of next data read. If the * connection-level and/or stream-wise flow control are enabled, the * return value takes into account those current window sizes. The remote * settings for max frame size is also taken into account. */ static size_t nghttp2_session_next_data_read(nghttp2_session *session, nghttp2_stream *stream) { ssize_t window_size; window_size = nghttp2_session_enforce_flow_control_limits( session, stream, NGHTTP2_DATA_PAYLOADLEN); DEBUGF("send: available window=%zd\n", window_size); return window_size > 0 ? (size_t)window_size : 0; } /* * This function checks DATA with the |stream| can be sent at this * time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int nghttp2_session_predicate_data_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { /* Request body data */ /* If stream->state is NGHTTP2_STREAM_CLOSING, RST_STREAM was queued but not yet sent. In this case, we won't send DATA frames. */ if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (stream->state == NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } /* Response body data */ if (stream->state == NGHTTP2_STREAM_OPENED) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } static ssize_t session_call_select_padding(nghttp2_session *session, const nghttp2_frame *frame, size_t max_payloadlen) { ssize_t rv; if (frame->hd.length >= max_payloadlen) { return (ssize_t)frame->hd.length; } if (session->callbacks.select_padding_callback) { size_t max_paddedlen; max_paddedlen = nghttp2_min(frame->hd.length + NGHTTP2_MAX_PADLEN, max_payloadlen); rv = session->callbacks.select_padding_callback( session, frame, max_paddedlen, session->user_data); if (rv < (ssize_t)frame->hd.length || rv > (ssize_t)max_paddedlen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } return (ssize_t)frame->hd.length; } /* Add padding to HEADERS or PUSH_PROMISE. We use frame->headers.padlen in this function to use the fact that frame->push_promise has also padlen in the same position. */ static int session_headers_add_pad(nghttp2_session *session, nghttp2_frame *frame) { int rv; ssize_t padded_payloadlen; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; size_t padlen; size_t max_payloadlen; aob = &session->aob; framebufs = &aob->framebufs; max_payloadlen = nghttp2_min(NGHTTP2_MAX_PAYLOADLEN, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } padlen = (size_t)padded_payloadlen - frame->hd.length; DEBUGF("send: padding selected: payloadlen=%zd, padlen=%zu\n", padded_payloadlen, padlen); rv = nghttp2_frame_add_pad(framebufs, &frame->hd, padlen, 0); if (rv != 0) { return rv; } frame->headers.padlen = padlen; return 0; } static size_t session_estimate_headers_payload(nghttp2_session *session, const nghttp2_nv *nva, size_t nvlen, size_t additional) { return nghttp2_hd_deflate_bound(&session->hd_deflater, nva, nvlen) + additional; } static int session_pack_extension(nghttp2_session *session, nghttp2_bufs *bufs, nghttp2_frame *frame) { ssize_t rv; nghttp2_buf *buf; size_t buflen; size_t framelen; assert(session->callbacks.pack_extension_callback); buf = &bufs->head->buf; buflen = nghttp2_min(nghttp2_buf_avail(buf), NGHTTP2_MAX_PAYLOADLEN); rv = session->callbacks.pack_extension_callback(session, buf->last, buflen, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return (int)rv; } if (rv < 0 || (size_t)rv > buflen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } framelen = (size_t)rv; frame->hd.length = framelen; assert(buf->pos == buf->last); buf->last += framelen; buf->pos -= NGHTTP2_FRAME_HDLEN; nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); return 0; } /* * This function serializes frame for transmission. * * This function returns 0 if it succeeds, or one of negative error * codes, including both fatal and non-fatal ones. */ static int session_prep_frame(nghttp2_session *session, nghttp2_outbound_item *item) { int rv; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; frame = &item->frame; switch (frame->hd.type) { case NGHTTP2_DATA: { size_t next_readmax; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { assert(stream->item == item); } rv = nghttp2_session_predicate_data_send(session, stream); if (rv != 0) { // If stream was already closed, nghttp2_session_get_stream() // returns NULL, but item is still attached to the stream. // Search stream including closed again. stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } } return rv; } /* Assuming stream is not NULL */ assert(stream); next_readmax = nghttp2_session_next_data_read(session, stream); if (next_readmax == 0) { /* This must be true since we only pop DATA frame item from queue when session->remote_window_size > 0 */ assert(session->remote_window_size > 0); rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } rv = nghttp2_session_pack_data(session, &session->aob.framebufs, next_readmax, frame, &item->aux_data.data, stream); if (rv == NGHTTP2_ERR_PAUSE) { return rv; } if (rv == NGHTTP2_ERR_DEFERRED) { rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv != 0) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } return rv; } return 0; } case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; size_t estimated_payloadlen; aux_data = &item->aux_data.headers; if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { /* initial HEADERS, which opens stream */ nghttp2_stream *stream; stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_INITIAL, aux_data->stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream() here, since we don't keep closed stream in client side */ rv = session_predicate_request_headers_send(session, item); if (rv != 0) { return rv; } if (session_enforce_http_messaging(session)) { nghttp2_http_record_request_method(stream, frame); } } else { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream && stream->state == NGHTTP2_STREAM_RESERVED) { rv = session_predicate_push_response_headers_send(session, stream); if (rv == 0) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; if (aux_data->stream_user_data) { stream->stream_user_data = aux_data->stream_user_data; } } } else if (session_predicate_response_headers_send(session, stream) == 0) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; rv = 0; } else { frame->headers.cat = NGHTTP2_HCAT_HEADERS; rv = session_predicate_headers_send(session, stream); } if (rv != 0) { return rv; } } estimated_payloadlen = session_estimate_headers_payload( session, frame->headers.nva, frame->headers.nvlen, NGHTTP2_PRIORITY_SPECLEN); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_headers(&session->aob.framebufs, &frame->headers, &session->hd_deflater); if (rv != 0) { return rv; } DEBUGF("send: before padding, HEADERS serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } DEBUGF("send: HEADERS finally serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { assert(session->last_sent_stream_id < frame->hd.stream_id); session->last_sent_stream_id = frame->hd.stream_id; } return 0; } case NGHTTP2_PRIORITY: { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } /* PRIORITY frame can be sent at any time and to any stream ID. */ nghttp2_frame_pack_priority(&session->aob.framebufs, &frame->priority); /* Peer can send PRIORITY frame against idle stream to create "anchor" in dependency tree. Only client can do this in nghttp2. In nghttp2, only server retains non-active (closed or idle) streams in memory, so we don't open stream here. */ return 0; } case NGHTTP2_RST_STREAM: if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_rst_stream(&session->aob.framebufs, &frame->rst_stream); return 0; case NGHTTP2_SETTINGS: { if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; /* When session is about to close, don't send SETTINGS ACK. We are required to send SETTINGS without ACK though; for example, we have to send SETTINGS as a part of connection preface. */ if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } } rv = nghttp2_frame_pack_settings(&session->aob.framebufs, &frame->settings); if (rv != 0) { return rv; } return 0; } case NGHTTP2_PUSH_PROMISE: { nghttp2_stream *stream; size_t estimated_payloadlen; /* stream could be NULL if associated stream was already closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* predicate should fail if stream is NULL. */ rv = session_predicate_push_promise_send(session, stream); if (rv != 0) { return rv; } assert(stream); estimated_payloadlen = session_estimate_headers_payload( session, frame->push_promise.nva, frame->push_promise.nvlen, 0); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_push_promise( &session->aob.framebufs, &frame->push_promise, &session->hd_deflater); if (rv != 0) { return rv; } rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } assert(session->last_sent_stream_id + 2 <= frame->push_promise.promised_stream_id); session->last_sent_stream_id = frame->push_promise.promised_stream_id; return 0; } case NGHTTP2_PING: if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; } /* PING frame is allowed to be sent unless termination GOAWAY is sent */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_ping(&session->aob.framebufs, &frame->ping); return 0; case NGHTTP2_GOAWAY: rv = nghttp2_frame_pack_goaway(&session->aob.framebufs, &frame->goaway); if (rv != 0) { return rv; } session->local_last_stream_id = frame->goaway.last_stream_id; return 0; case NGHTTP2_WINDOW_UPDATE: rv = session_predicate_window_update_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_window_update(&session->aob.framebufs, &frame->window_update); return 0; case NGHTTP2_CONTINUATION: /* We never handle CONTINUATION here. */ assert(0); return 0; default: { nghttp2_ext_aux_data *aux_data; /* extension frame */ aux_data = &item->aux_data.ext; if (aux_data->builtin == 0) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return session_pack_extension(session, &session->aob.framebufs, frame); } switch (frame->hd.type) { case NGHTTP2_ALTSVC: rv = session_predicate_altsvc_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_altsvc(&session->aob.framebufs, &frame->ext); return 0; case NGHTTP2_ORIGIN: rv = session_predicate_origin_send(session); if (rv != 0) { return rv; } rv = nghttp2_frame_pack_origin(&session->aob.framebufs, &frame->ext); if (rv != 0) { return rv; } return 0; default: /* Unreachable here */ assert(0); return 0; } } } } nghttp2_outbound_item * nghttp2_session_get_next_ob_item(nghttp2_session *session) { if (nghttp2_outbound_queue_top(&session->ob_urgent)) { return nghttp2_outbound_queue_top(&session->ob_urgent); } if (nghttp2_outbound_queue_top(&session->ob_reg)) { return nghttp2_outbound_queue_top(&session->ob_reg); } if (!session_is_outgoing_concurrent_streams_max(session)) { if (nghttp2_outbound_queue_top(&session->ob_syn)) { return nghttp2_outbound_queue_top(&session->ob_syn); } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } nghttp2_outbound_item * nghttp2_session_pop_next_ob_item(nghttp2_session *session) { nghttp2_outbound_item *item; item = nghttp2_outbound_queue_top(&session->ob_urgent); if (item) { nghttp2_outbound_queue_pop(&session->ob_urgent); item->queued = 0; return item; } item = nghttp2_outbound_queue_top(&session->ob_reg); if (item) { nghttp2_outbound_queue_pop(&session->ob_reg); item->queued = 0; return item; } if (!session_is_outgoing_concurrent_streams_max(session)) { item = nghttp2_outbound_queue_top(&session->ob_syn); if (item) { nghttp2_outbound_queue_pop(&session->ob_syn); item->queued = 0; return item; } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } static int session_call_before_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.before_frame_send_callback) { rv = session->callbacks.before_frame_send_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_send_callback) { rv = session->callbacks.on_frame_send_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int find_stream_on_goaway_func(nghttp2_map_entry *entry, void *ptr) { nghttp2_close_stream_on_goaway_arg *arg; nghttp2_stream *stream; arg = (nghttp2_close_stream_on_goaway_arg *)ptr; stream = (nghttp2_stream *)entry; if (nghttp2_session_is_my_stream_id(arg->session, stream->stream_id)) { if (arg->incoming) { return 0; } } else if (!arg->incoming) { return 0; } if (stream->state != NGHTTP2_STREAM_IDLE && (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) == 0 && stream->stream_id > arg->last_stream_id) { /* We are collecting streams to close because we cannot call nghttp2_session_close_stream() inside nghttp2_map_each(). Reuse closed_next member.. bad choice? */ assert(stream->closed_next == NULL); assert(stream->closed_prev == NULL); if (arg->head) { stream->closed_next = arg->head; arg->head = stream; } else { arg->head = stream; } } return 0; } /* Closes non-idle and non-closed streams whose stream ID > last_stream_id. If incoming is nonzero, we are going to close incoming streams. Otherwise, close outgoing streams. */ static int session_close_stream_on_goaway(nghttp2_session *session, int32_t last_stream_id, int incoming) { int rv; nghttp2_stream *stream, *next_stream; nghttp2_close_stream_on_goaway_arg arg = {session, NULL, last_stream_id, incoming}; rv = nghttp2_map_each(&session->streams, find_stream_on_goaway_func, &arg); assert(rv == 0); stream = arg.head; while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; rv = nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_REFUSED_STREAM); /* stream may be deleted here */ stream = next_stream; if (nghttp2_is_fatal(rv)) { /* Clean up closed_next member just in case */ while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; stream = next_stream; } return rv; } } return 0; } static void reschedule_stream(nghttp2_stream *stream) { stream->last_writelen = stream->item->frame.hd.length; nghttp2_stream_reschedule(stream); } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size); static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size); /* * Called after a frame is sent. This function runs * on_frame_send_callback and handles stream closure upon END_STREAM * or RST_STREAM. This function does not reset session->aob. It is a * responsibility of session_after_frame_sent2. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent1(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_stream *stream; frame = &item->frame; if (frame->hd.type == NGHTTP2_DATA) { nghttp2_data_aux_data *aux_data; aux_data = &item->aux_data.data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* We update flow control window after a frame was completely sent. This is possible because we choose payload length not to exceed the window */ session->remote_window_size -= (int32_t)frame->hd.length; if (stream) { stream->remote_window_size -= (int32_t)frame->hd.length; } if (stream && aux_data->eof) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } /* Call on_frame_send_callback after nghttp2_stream_detach_item(), so that application can issue nghttp2_submit_data() in the callback. */ if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { int stream_closed; stream_closed = (stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR; nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* stream may be NULL if it was closed */ if (stream_closed) { stream = NULL; } } return 0; } if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* non-DATA frame */ if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { DEBUGF("send: CONTINUATION exists, just return\n"); return 0; } } rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } switch (frame->hd.type) { case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: { stream->state = NGHTTP2_STREAM_OPENING; if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { /* nghttp2_submit_data() makes a copy of aux_data->data_prd */ rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; } case NGHTTP2_HCAT_PUSH_RESPONSE: stream->flags = (uint8_t)(stream->flags & ~NGHTTP2_STREAM_FLAG_PUSH); ++session->num_outgoing_streams; /* Fall through */ case NGHTTP2_HCAT_RESPONSE: stream->state = NGHTTP2_STREAM_OPENED; /* Fall through */ case NGHTTP2_HCAT_HEADERS: if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; default: /* Unreachable */ assert(0); return 0; } } case NGHTTP2_PRIORITY: if (session->server) { return 0; ; } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_RST_STREAM: rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_GOAWAY: { nghttp2_goaway_aux_data *aux_data; aux_data = &item->aux_data.goaway; if ((aux_data->flags & NGHTTP2_GOAWAY_AUX_SHUTDOWN_NOTICE) == 0) { if (aux_data->flags & NGHTTP2_GOAWAY_AUX_TERM_ON_SEND) { session->goaway_flags |= NGHTTP2_GOAWAY_TERM_SENT; } session->goaway_flags |= NGHTTP2_GOAWAY_SENT; rv = session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 1); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } case NGHTTP2_WINDOW_UPDATE: if (frame->hd.stream_id == 0) { session->window_update_queued = 0; if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_connection_consumed_size(session, 0); } else { rv = nghttp2_session_update_recv_connection_window_size(session, 0); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } stream->window_update_queued = 0; /* We don't have to send WINDOW_UPDATE if END_STREAM from peer is seen. */ if (stream->shut_flags & NGHTTP2_SHUT_RD) { return 0; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_stream_consumed_size(session, stream, 0); } else { rv = nghttp2_session_update_recv_stream_window_size(session, stream, 0, 1); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; default: return 0; } } /* * Called after a frame is sent and session_after_frame_sent1. This * function is responsible to reset session->aob. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent2(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_mem *mem; nghttp2_stream *stream; nghttp2_data_aux_data *aux_data; mem = &session->mem; frame = &item->frame; if (frame->hd.type != NGHTTP2_DATA) { if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { framebufs->cur = framebufs->cur->next; DEBUGF("send: next CONTINUATION frame, %zu bytes\n", nghttp2_buf_len(&framebufs->cur->buf)); return 0; } } active_outbound_item_reset(&session->aob, mem); return 0; } /* DATA frame */ aux_data = &item->aux_data.data; /* On EOF, we have already detached data. Please note that application may issue nghttp2_submit_data() in on_frame_send_callback (call from session_after_frame_sent1), which attach data to stream. We don't want to detach it. */ if (aux_data->eof) { active_outbound_item_reset(aob, mem); return 0; } /* Reset no_copy here because next write may not use this. */ aux_data->no_copy = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* If session is closed or RST_STREAM was queued, we won't send further data. */ if (nghttp2_session_predicate_data_send(session, stream) != 0) { if (stream) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } } active_outbound_item_reset(aob, mem); return 0; } aob->item = NULL; active_outbound_item_reset(&session->aob, mem); return 0; } static int session_call_send_data(nghttp2_session *session, nghttp2_outbound_item *item, nghttp2_bufs *framebufs) { int rv; nghttp2_buf *buf; size_t length; nghttp2_frame *frame; nghttp2_data_aux_data *aux_data; buf = &framebufs->cur->buf; frame = &item->frame; length = frame->hd.length - frame->data.padlen; aux_data = &item->aux_data.data; rv = session->callbacks.send_data_callback(session, frame, buf->pos, length, &aux_data->data_prd.source, session->user_data); switch (rv) { case 0: case NGHTTP2_ERR_WOULDBLOCK: case NGHTTP2_ERR_PAUSE: case NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE: return rv; default: return NGHTTP2_ERR_CALLBACK_FAILURE; } } static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, const uint8_t **data_ptr, int fast_cb) { int rv; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; nghttp2_mem *mem; mem = &session->mem; aob = &session->aob; framebufs = &aob->framebufs; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } for (;;) { switch (aob->state) { case NGHTTP2_OB_POP_ITEM: { nghttp2_outbound_item *item; item = nghttp2_session_pop_next_ob_item(session); if (item == NULL) { return 0; } rv = session_prep_frame(session, item); if (rv == NGHTTP2_ERR_PAUSE) { return 0; } if (rv == NGHTTP2_ERR_DEFERRED) { DEBUGF("send: frame transmission deferred\n"); break; } if (rv < 0) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; DEBUGF("send: frame preparation failed with %s\n", nghttp2_strerror(rv)); /* TODO If the error comes from compressor, the connection must be closed. */ if (item->frame.hd.type != NGHTTP2_DATA && session->callbacks.on_frame_not_send_callback && is_non_fatal(rv)) { nghttp2_frame *frame = &item->frame; /* The library is responsible for the transmission of WINDOW_UPDATE frame, so we don't call error callback for it. */ if (frame->hd.type != NGHTTP2_WINDOW_UPDATE && session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by failed request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; if (item->aux_data.headers.canceled) { error_code = item->aux_data.headers.error_code; } else { /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); active_outbound_item_reset(aob, mem); if (rv == NGHTTP2_ERR_HEADER_COMP) { /* If header compression error occurred, should terminiate connection. */ rv = nghttp2_session_terminate_session(session, NGHTTP2_INTERNAL_ERROR); } if (nghttp2_is_fatal(rv)) { return rv; } break; } aob->item = item; nghttp2_bufs_rewind(framebufs); if (item->frame.hd.type != NGHTTP2_DATA) { nghttp2_frame *frame; frame = &item->frame; DEBUGF("send: next frame: payloadlen=%zu, type=%u, flags=0x%02x, " "stream_id=%d\n", frame->hd.length, frame->hd.type, frame->hd.flags, frame->hd.stream_id); rv = session_call_before_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_CANCEL) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; if (session->callbacks.on_frame_not_send_callback) { if (session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by canceled request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; /* We don't have to check item->aux_data.headers.canceled since it has already been checked. */ /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } active_outbound_item_reset(aob, mem); break; } } else { DEBUGF("send: next frame: DATA\n"); if (item->aux_data.data.no_copy) { aob->state = NGHTTP2_OB_SEND_NO_COPY; break; } } DEBUGF("send: start transmitting frame type=%u, length=%zd\n", framebufs->cur->buf.pos[3], framebufs->cur->buf.last - framebufs->cur->buf.pos); aob->state = NGHTTP2_OB_SEND_DATA; break; } case NGHTTP2_OB_SEND_DATA: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of a frame\n"); /* Frame has completely sent */ if (fast_cb) { rv = session_after_frame_sent2(session); } else { rv = session_after_frame_sent1(session); if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); } if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); /* We increment the offset here. If send_callback does not send everything, we will adjust it. */ buf->pos += datalen; return (ssize_t)datalen; } case NGHTTP2_OB_SEND_NO_COPY: { nghttp2_stream *stream; nghttp2_frame *frame; int pause; DEBUGF("send: no copy DATA\n"); frame = &aob->item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream == NULL) { DEBUGF("send: no copy DATA cancelled because stream was closed\n"); active_outbound_item_reset(aob, mem); break; } rv = session_call_send_data(session, aob->item, framebufs); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } active_outbound_item_reset(aob, mem); break; } if (rv == NGHTTP2_ERR_WOULDBLOCK) { return 0; } pause = (rv == NGHTTP2_ERR_PAUSE); rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ if (pause) { return 0; } break; } case NGHTTP2_OB_SEND_CLIENT_MAGIC: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of client magic\n"); active_outbound_item_reset(aob, mem); break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); buf->pos += datalen; return (ssize_t)datalen; } } } } ssize_t nghttp2_session_mem_send(nghttp2_session *session, const uint8_t **data_ptr) { int rv; ssize_t len; *data_ptr = NULL; len = nghttp2_session_mem_send_internal(session, data_ptr, 1); if (len <= 0) { return len; } if (session->aob.item) { /* We have to call session_after_frame_sent1 here to handle stream closure upon transmission of frames. Otherwise, END_STREAM may be reached to client before we call nghttp2_session_mem_send again and we may get exceeding number of incoming streams. */ rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return (ssize_t)rv; } } return len; } int nghttp2_session_send(nghttp2_session *session) { const uint8_t *data = NULL; ssize_t datalen; ssize_t sentlen; nghttp2_bufs *framebufs; framebufs = &session->aob.framebufs; for (;;) { datalen = nghttp2_session_mem_send_internal(session, &data, 0); if (datalen <= 0) { return (int)datalen; } sentlen = session->callbacks.send_callback(session, data, (size_t)datalen, 0, session->user_data); if (sentlen < 0) { if (sentlen == NGHTTP2_ERR_WOULDBLOCK) { /* Transmission canceled. Rewind the offset */ framebufs->cur->buf.pos -= datalen; return 0; } return NGHTTP2_ERR_CALLBACK_FAILURE; } /* Rewind the offset to the amount of unsent bytes */ framebufs->cur->buf.pos -= datalen - sentlen; } } static ssize_t session_recv(nghttp2_session *session, uint8_t *buf, size_t len) { ssize_t rv; rv = session->callbacks.recv_callback(session, buf, len, 0, session->user_data); if (rv > 0) { if ((size_t)rv > len) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } else if (rv < 0 && rv != NGHTTP2_ERR_WOULDBLOCK && rv != NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } static int session_call_on_begin_frame(nghttp2_session *session, const nghttp2_frame_hd *hd) { int rv; if (session->callbacks.on_begin_frame_callback) { rv = session->callbacks.on_begin_frame_callback(session, hd, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_recv_callback) { rv = session->callbacks.on_frame_recv_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_begin_headers(nghttp2_session *session, nghttp2_frame *frame) { int rv; DEBUGF("recv: call on_begin_headers callback stream_id=%d\n", frame->hd.stream_id); if (session->callbacks.on_begin_headers_callback) { rv = session->callbacks.on_begin_headers_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv = 0; if (session->callbacks.on_header_callback2) { rv = session->callbacks.on_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_header_callback) { rv = session->callbacks.on_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_invalid_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv; if (session->callbacks.on_invalid_header_callback2) { rv = session->callbacks.on_invalid_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_invalid_header_callback) { rv = session->callbacks.on_invalid_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } else { return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_extension_chunk_recv_callback(nghttp2_session *session, const uint8_t *data, size_t len) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; if (session->callbacks.on_extension_chunk_recv_callback) { rv = session->callbacks.on_extension_chunk_recv_callback( session, &frame->hd, data, len, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_unpack_extension_callback(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; void *payload = NULL; rv = session->callbacks.unpack_extension_callback( session, &payload, &frame->hd, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } frame->ext.payload = payload; return 0; } /* * Handles frame size error. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_handle_frame_size_error(nghttp2_session *session) { /* TODO Currently no callback is called for this error, because we call this callback before reading any payload */ return nghttp2_session_terminate_session(session, NGHTTP2_FRAME_SIZE_ERROR); } static uint32_t get_error_code_from_lib_error_code(int lib_error_code) { switch (lib_error_code) { case NGHTTP2_ERR_STREAM_CLOSED: return NGHTTP2_STREAM_CLOSED; case NGHTTP2_ERR_HEADER_COMP: return NGHTTP2_COMPRESSION_ERROR; case NGHTTP2_ERR_FRAME_SIZE_ERROR: return NGHTTP2_FRAME_SIZE_ERROR; case NGHTTP2_ERR_FLOW_CONTROL: return NGHTTP2_FLOW_CONTROL_ERROR; case NGHTTP2_ERR_REFUSED_STREAM: return NGHTTP2_REFUSED_STREAM; case NGHTTP2_ERR_PROTO: case NGHTTP2_ERR_HTTP_HEADER: case NGHTTP2_ERR_HTTP_MESSAGING: return NGHTTP2_PROTOCOL_ERROR; default: return NGHTTP2_INTERNAL_ERROR; } } /* * Calls on_invalid_frame_recv_callback if it is set to |session|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * User defined callback function fails. */ static int session_call_on_invalid_frame_recv_callback(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream2(nghttp2_session *session, int32_t stream_id, nghttp2_frame *frame, int lib_error_code) { int rv; rv = nghttp2_session_add_rst_stream( session, stream_id, get_error_code_from_lib_error_code(lib_error_code)); if (rv != 0) { return rv; } if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { return session_handle_invalid_stream2(session, frame->hd.stream_id, frame, lib_error_code); } static int session_inflate_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { int rv; rv = session_handle_invalid_stream(session, frame, lib_error_code); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Handles invalid frame which causes connection error. */ static int session_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return nghttp2_session_terminate_session_with_reason( session, get_error_code_from_lib_error_code(lib_error_code), reason); } static int session_inflate_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { int rv; rv = session_handle_invalid_connection(session, frame, lib_error_code, reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Inflates header block in the memory pointed by |in| with |inlen| * bytes. If this function returns NGHTTP2_ERR_PAUSE, the caller must * call this function again, until it returns 0 or one of negative * error code. If |call_header_cb| is zero, the on_header_callback * are not invoked and the function never return NGHTTP2_ERR_PAUSE. If * the given |in| is the last chunk of header block, the |final| must * be nonzero. If header block is successfully processed (which is * indicated by the return value 0, NGHTTP2_ERR_PAUSE or * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE), the number of processed * input bytes is assigned to the |*readlen_ptr|. * * This function return 0 if it succeeds, or one of the negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE * The callback returns this error code, indicating that this * stream should be RST_STREAMed. * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_PAUSE * The callback function returned NGHTTP2_ERR_PAUSE * NGHTTP2_ERR_HEADER_COMP * Header decompression failed */ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, size_t *readlen_ptr, uint8_t *in, size_t inlen, int final, int call_header_cb) { ssize_t proclen; int rv; int inflate_flags; nghttp2_hd_nv nv; nghttp2_stream *stream; nghttp2_stream *subject_stream; int trailer = 0; *readlen_ptr = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); } else { subject_stream = stream; trailer = session_trailer_headers(session, stream, frame); } DEBUGF("recv: decoding header block %zu bytes\n", inlen); for (;;) { inflate_flags = 0; proclen = nghttp2_hd_inflate_hd_nv(&session->hd_inflater, &nv, &inflate_flags, in, inlen, final); if (nghttp2_is_fatal((int)proclen)) { return (int)proclen; } if (proclen < 0) { if (session->iframe.state == NGHTTP2_IB_READ_HEADER_BLOCK) { if (subject_stream && subject_stream->state != NGHTTP2_STREAM_CLOSING) { /* Adding RST_STREAM here is very important. It prevents from invoking subsequent callbacks for the same stream ID. */ rv = nghttp2_session_add_rst_stream( session, subject_stream->stream_id, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } } } rv = nghttp2_session_terminate_session(session, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_HEADER_COMP; } in += proclen; inlen -= (size_t)proclen; *readlen_ptr += (size_t)proclen; DEBUGF("recv: proclen=%zd\n", proclen); if (call_header_cb && (inflate_flags & NGHTTP2_HD_INFLATE_EMIT)) { rv = 0; if (subject_stream) { if (session_enforce_http_messaging(session)) { rv = nghttp2_http_on_header(session, subject_stream, frame, &nv, trailer); if (rv == NGHTTP2_ERR_IGN_HTTP_HEADER) { /* Don't overwrite rv here */ int rv2; rv2 = session_call_on_invalid_header(session, frame, &nv); if (rv2 == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = NGHTTP2_ERR_HTTP_HEADER; } else { if (rv2 != 0) { return rv2; } /* header is ignored */ DEBUGF("recv: HTTP ignored: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv2 = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Ignoring received invalid HTTP header field: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv2)) { return rv2; } } } if (rv == NGHTTP2_ERR_HTTP_HEADER) { DEBUGF("recv: HTTP error: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Invalid HTTP header field was received: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv)) { return rv; } rv = session_handle_invalid_stream2(session, subject_stream->stream_id, frame, NGHTTP2_ERR_HTTP_HEADER); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } } if (rv == 0) { rv = session_call_on_header(session, frame, &nv); /* This handles NGHTTP2_ERR_PAUSE and NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE as well */ if (rv != 0) { return rv; } } } } if (inflate_flags & NGHTTP2_HD_INFLATE_FINAL) { nghttp2_hd_inflate_end_headers(&session->hd_inflater); break; } if ((inflate_flags & NGHTTP2_HD_INFLATE_EMIT) == 0 && inlen == 0) { break; } } return 0; } /* * Call this function when HEADERS frame was completely received. * * This function returns 0 if it succeeds, or one of negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_end_stream_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; if ((frame->hd.flags & NGHTTP2_FLAG_END_STREAM) == 0) { return 0; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_after_header_block_received(nghttp2_session *session) { int rv = 0; nghttp2_frame *frame = &session->iframe.frame; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } if (session_enforce_http_messaging(session)) { if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { nghttp2_stream *subject_stream; subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); if (subject_stream) { rv = nghttp2_http_on_request_headers(subject_stream, frame); } } else { assert(frame->hd.type == NGHTTP2_HEADERS); switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: rv = nghttp2_http_on_request_headers(stream, frame); break; case NGHTTP2_HCAT_RESPONSE: case NGHTTP2_HCAT_PUSH_RESPONSE: rv = nghttp2_http_on_response_headers(stream); break; case NGHTTP2_HCAT_HEADERS: if (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) { assert(!session->server); rv = nghttp2_http_on_response_headers(stream); } else { rv = nghttp2_http_on_trailer_headers(stream, frame); } break; default: assert(0); } if (rv == 0 && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { rv = nghttp2_http_on_remote_end_stream(stream); } } if (rv != 0) { int32_t stream_id; if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { stream_id = frame->push_promise.promised_stream_id; } else { stream_id = frame->hd.stream_id; } rv = session_handle_invalid_stream2(session, stream_id, frame, NGHTTP2_ERR_HTTP_MESSAGING); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type == NGHTTP2_HEADERS && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ } return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type != NGHTTP2_HEADERS) { return 0; } return session_end_stream_headers_received(session, frame, stream); } int nghttp2_session_on_request_headers_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: stream_id == 0"); } /* If client receives idle stream from server, it is invalid regardless stream ID is even or odd. This is because client is not expected to receive request from server. */ if (!session->server) { if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: client received request"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } assert(session->server); if (!session_is_new_peer_stream_id(session, frame->hd.stream_id)) { if (frame->hd.stream_id == 0 || nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: invalid stream_id"); } /* RFC 7540 says if an endpoint receives a HEADERS with invalid * stream ID (e.g, numerically smaller than previous), it MUST * issue connection error with error code PROTOCOL_ERROR. It is a * bit hard to detect this, since we cannot remember all streams * we observed so far. * * You might imagine this is really easy. But no. HTTP/2 is * asynchronous protocol, and usually client and server do not * share the complete picture of open/closed stream status. For * example, after server sends RST_STREAM for a stream, client may * send trailer HEADERS for that stream. If naive server detects * that, and issued connection error, then it is a bug of server * implementation since client is not wrong if it did not get * RST_STREAM when it issued trailer HEADERS. * * At the moment, we are very conservative here. We only use * connection error if stream ID refers idle stream, or we are * sure that stream is half-closed(remote) or closed. Otherwise * we just ignore HEADERS for now. */ stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } session->last_recv_stream_id = frame->hd.stream_id; if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We just ignore stream after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (frame->headers.pri_spec.stream_id == frame->hd.stream_id) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: depend on itself"); } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_OPENING, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_closed_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; /* This function is only called if stream->state == NGHTTP2_STREAM_OPENING and stream_id is local side initiated. */ assert(stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "response HEADERS: stream_id == 0"); } if (stream->shut_flags & NGHTTP2_SHUT_RD) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. We go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } stream->state = NGHTTP2_STREAM_OPENED; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_push_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; assert(stream->state == NGHTTP2_STREAM_RESERVED); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: stream_id == 0"); } if (session->server) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: no HEADERS allowed from client in reserved state"); } if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We don't accept new stream after GOAWAY was sent. */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } nghttp2_stream_promise_fulfilled(stream); if (!nghttp2_session_is_my_stream_id(session, stream->stream_id)) { --session->num_incoming_reserved_streams; } ++session->num_incoming_streams; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: stream_id == 0"); } if ((stream->shut_flags & NGHTTP2_SHUT_RD)) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. we go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } if (nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { if (stream->state == NGHTTP2_STREAM_OPENED) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* If this is remote peer initiated stream, it is OK unless it has sent END_STREAM frame already. But if stream is in NGHTTP2_STREAM_CLOSING, we discard the frame. This is a race condition. */ if (stream->state != NGHTTP2_STREAM_CLOSING) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } static int session_process_headers_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_stream *stream; rv = nghttp2_frame_unpack_headers_payload(&frame->headers, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: could not unpack"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { frame->headers.cat = NGHTTP2_HCAT_REQUEST; return nghttp2_session_on_request_headers_received(session, frame); } if (stream->state == NGHTTP2_STREAM_RESERVED) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; return nghttp2_session_on_push_response_headers_received(session, frame, stream); } if (stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; return nghttp2_session_on_response_headers_received(session, frame, stream); } frame->headers.cat = NGHTTP2_HCAT_HEADERS; return nghttp2_session_on_headers_received(session, frame, stream); } int nghttp2_session_on_priority_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PRIORITY: stream_id == 0"); } if (frame->priority.pri_spec.stream_id == frame->hd.stream_id) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "depend on itself"); } if (!session->server) { /* Re-prioritization works only in server */ return session_call_on_frame_received(session, frame); } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { /* PRIORITY against idle stream can create anchor node in dependency tree. */ if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_priority_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_priority_payload(&frame->priority, iframe->sbuf.pos); return nghttp2_session_on_priority_received(session, frame); } int nghttp2_session_on_rst_stream_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream_id == 0"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream in idle"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { /* We may use stream->shut_flags for strict error checking. */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } rv = session_call_on_frame_received(session, frame); if (rv != 0) { return rv; } rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_process_rst_stream_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_rst_stream_payload(&frame->rst_stream, iframe->sbuf.pos); return nghttp2_session_on_rst_stream_received(session, frame); } static int update_remote_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_remote_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* If window size gets positive, push deferred DATA frame to outbound queue. */ if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* * Updates the remote initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_remote_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = (int32_t)session->remote_settings.initial_window_size; return nghttp2_map_each(&session->streams, update_remote_initial_window_size_func, &arg); } static int update_local_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_local_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(arg->session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(arg->session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } /* * Updates the local initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_local_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size, int32_t old_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = old_initial_window_size; return nghttp2_map_each(&session->streams, update_local_initial_window_size_func, &arg); } /* * Apply SETTINGS values |iv| having |niv| elements to the local * settings. We assumes that all values in |iv| is correct, since we * validated them in nghttp2_session_add_settings() already. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_HEADER_COMP * The header table size is out of range * NGHTTP2_ERR_NOMEM * Out of memory */ int nghttp2_session_update_local_settings(nghttp2_session *session, nghttp2_settings_entry *iv, size_t niv) { int rv; size_t i; int32_t new_initial_window_size = -1; uint32_t header_table_size = 0; uint32_t min_header_table_size = UINT32_MAX; uint8_t header_table_size_seen = 0; /* For NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, use the value last seen. For NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, use both minimum value and last seen value. */ for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: header_table_size_seen = 1; header_table_size = iv[i].value; min_header_table_size = nghttp2_min(min_header_table_size, iv[i].value); break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: new_initial_window_size = (int32_t)iv[i].value; break; } } if (header_table_size_seen) { if (min_header_table_size < header_table_size) { rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, min_header_table_size); if (rv != 0) { return rv; } } rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, header_table_size); if (rv != 0) { return rv; } } if (new_initial_window_size != -1) { rv = session_update_local_initial_window_size( session, new_initial_window_size, (int32_t)session->local_settings.initial_window_size); if (rv != 0) { return rv; } } for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: session->local_settings.header_table_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: session->local_settings.enable_push = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->local_settings.max_concurrent_streams = iv[i].value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: session->local_settings.initial_window_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: session->local_settings.max_frame_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->local_settings.max_header_list_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: session->local_settings.enable_connect_protocol = iv[i].value; break; } } return 0; } int nghttp2_session_on_settings_received(nghttp2_session *session, nghttp2_frame *frame, int noack) { int rv; size_t i; nghttp2_mem *mem; nghttp2_inflight_settings *settings; mem = &session->mem; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: stream_id != 0"); } if (frame->hd.flags & NGHTTP2_FLAG_ACK) { if (frame->settings.niv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FRAME_SIZE_ERROR, "SETTINGS: ACK and payload != 0"); } settings = session->inflight_settings_head; if (!settings) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: unexpected ACK"); } rv = nghttp2_session_update_local_settings(session, settings->iv, settings->niv); session->inflight_settings_head = settings->next; inflight_settings_del(settings, mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, rv, NULL); } return session_call_on_frame_received(session, frame); } if (!session->remote_settings_received) { session->remote_settings.max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; session->remote_settings_received = 1; } for (i = 0; i < frame->settings.niv; ++i) { nghttp2_settings_entry *entry = &frame->settings.iv[i]; switch (entry->settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: rv = nghttp2_hd_deflate_change_table_size(&session->hd_deflater, entry->value); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } else { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_HEADER_COMP, NULL); } } session->remote_settings.header_table_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENBLE_PUSH"); } if (!session->server && entry->value != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to enable push"); } session->remote_settings.enable_push = entry->value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->remote_settings.max_concurrent_streams = entry->value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: /* Update the initial window size of the all active streams */ /* Check that initial_window_size < (1u << 31) */ if (entry->value > NGHTTP2_MAX_WINDOW_SIZE) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, "SETTINGS: too large SETTINGS_INITIAL_WINDOW_SIZE"); } rv = session_update_remote_initial_window_size(session, (int32_t)entry->value); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_settings.initial_window_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: if (entry->value < NGHTTP2_MAX_FRAME_SIZE_MIN || entry->value > NGHTTP2_MAX_FRAME_SIZE_MAX) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_MAX_FRAME_SIZE"); } session->remote_settings.max_frame_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->remote_settings.max_header_list_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENABLE_CONNECT_PROTOCOL"); } if (!session->server && session->remote_settings.enable_connect_protocol && entry->value == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to disable " "SETTINGS_ENABLE_CONNECT_PROTOCOL"); } session->remote_settings.enable_connect_protocol = entry->value; break; } } if (!noack && !session_is_closing(session)) { rv = nghttp2_session_add_settings(session, NGHTTP2_FLAG_ACK, NULL, 0); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_INTERNAL, NULL); } } return session_call_on_frame_received(session, frame); } static int session_process_settings_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; size_t i; nghttp2_settings_entry min_header_size_entry; if (iframe->max_niv) { min_header_size_entry = iframe->iv[iframe->max_niv - 1]; if (min_header_size_entry.value < UINT32_MAX) { /* If we have less value, then we must have SETTINGS_HEADER_TABLE_SIZE in i < iframe->niv */ for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { break; } } assert(i < iframe->niv); if (min_header_size_entry.value != iframe->iv[i].value) { iframe->iv[iframe->niv++] = iframe->iv[i]; iframe->iv[i] = min_header_size_entry; } } } nghttp2_frame_unpack_settings_payload(&frame->settings, iframe->iv, iframe->niv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; return nghttp2_session_on_settings_received(session, frame, 0 /* ACK */); } int nghttp2_session_on_push_promise_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; nghttp2_stream *promised_stream; nghttp2_priority_spec pri_spec; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream_id == 0"); } if (session->server || session->local_settings.enable_push == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: push disabled"); } if (!nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid stream_id"); } if (!session_allow_incoming_new_stream(session)) { /* We just discard PUSH_PROMISE after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (!session_is_new_peer_stream_id(session, frame->push_promise.promised_stream_id)) { /* The spec says if an endpoint receives a PUSH_PROMISE with illegal stream ID is subject to a connection error of type PROTOCOL_ERROR. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid promised_stream_id"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream in idle"); } session->last_recv_stream_id = frame->push_promise.promised_stream_id; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING || !session->pending_enable_push || session->num_incoming_reserved_streams >= session->max_incoming_reserved_streams) { /* Currently, client does not retain closed stream, so we don't check NGHTTP2_SHUT_RD condition here. */ rv = nghttp2_session_add_rst_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_CANCEL); if (rv != 0) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "PUSH_PROMISE: stream closed"); } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); promised_stream = nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, NULL); if (!promised_stream) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since we don't keep closed stream in client side */ session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } static int session_process_push_promise_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = nghttp2_frame_unpack_push_promise_payload(&frame->push_promise, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: could not unpack"); } return nghttp2_session_on_push_promise_received(session, frame); } int nghttp2_session_on_ping_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PING: stream_id != 0"); } if ((session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_PING_ACK) == 0 && (frame->hd.flags & NGHTTP2_FLAG_ACK) == 0 && !session_is_closing(session)) { /* Peer sent ping, so ping it back */ rv = nghttp2_session_add_ping(session, NGHTTP2_FLAG_ACK, frame->ping.opaque_data); if (rv != 0) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_ping_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_ping_payload(&frame->ping, iframe->sbuf.pos); return nghttp2_session_on_ping_received(session, frame); } int nghttp2_session_on_goaway_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: stream_id != 0"); } /* Spec says Endpoints MUST NOT increase the value they send in the last stream identifier. */ if ((frame->goaway.last_stream_id > 0 && !nghttp2_session_is_my_stream_id(session, frame->goaway.last_stream_id)) || session->remote_last_stream_id < frame->goaway.last_stream_id) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: invalid last_stream_id"); } session->goaway_flags |= NGHTTP2_GOAWAY_RECV; session->remote_last_stream_id = frame->goaway.last_stream_id; rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } return session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 0); } static int session_process_goaway_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_goaway_payload(&frame->goaway, iframe->sbuf.pos, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_goaway_received(session, frame); } static int session_on_connection_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { /* Handle connection-level flow control */ if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < session->remote_window_size) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_window_size += frame->window_update.window_size_increment; return session_call_on_frame_received(session, frame); } static int session_on_stream_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE to idle stream"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (state_reserved_remote(session, stream)) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPADATE to reserved stream"); } if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < stream->remote_window_size) { return session_handle_invalid_stream(session, frame, NGHTTP2_ERR_FLOW_CONTROL); } stream->remote_window_size += frame->window_update.window_size_increment; if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { if (frame->hd.stream_id == 0) { return session_on_connection_window_update_received(session, frame); } else { return session_on_stream_window_update_received(session, frame); } } static int session_process_window_update_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_window_update_payload(&frame->window_update, iframe->sbuf.pos); return nghttp2_session_on_window_update_received(session, frame); } int nghttp2_session_on_altsvc_received(nghttp2_session *session, nghttp2_frame *frame) { nghttp2_ext_altsvc *altsvc; nghttp2_stream *stream; altsvc = frame->ext.payload; /* session->server case has been excluded */ if (frame->hd.stream_id == 0) { if (altsvc->origin_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } } else { if (altsvc->origin_len > 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } } if (altsvc->field_value_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_origin_received(nghttp2_session *session, nghttp2_frame *frame) { return session_call_on_frame_received(session, frame); } static int session_process_altsvc_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_altsvc_payload( &frame->ext, nghttp2_get_uint16(iframe->sbuf.pos), iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); /* nghttp2_frame_unpack_altsvc_payload steals buffer from iframe->lbuf */ nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_altsvc_received(session, frame); } static int session_process_origin_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_mem *mem = &session->mem; int rv; rv = nghttp2_frame_unpack_origin_payload(&frame->ext, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf), mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } /* Ignore ORIGIN frame which cannot be parsed. */ return 0; } return nghttp2_session_on_origin_received(session, frame); } static int session_process_extension_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = session_call_unpack_extension_callback(session); if (nghttp2_is_fatal(rv)) { return rv; } /* This handles the case where rv == NGHTTP2_ERR_CANCEL as well */ if (rv != 0) { return 0; } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_data_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { /* This should be treated as stream error, but it results in lots of RST_STREAM. So just ignore frame against nonexistent stream for now. */ return 0; } if (session_enforce_http_messaging(session) && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { if (nghttp2_http_on_remote_end_stream(stream) != 0) { rv = nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* For errors, this function only returns FATAL error. */ static int session_process_data_frame(nghttp2_session *session) { int rv; nghttp2_frame *public_data_frame = &session->iframe.frame; rv = nghttp2_session_on_data_received(session, public_data_frame); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } /* * Now we have SETTINGS synchronization, flow control error can be * detected strictly. If DATA frame is received with length > 0 and * current received window size + delta length is strictly larger than * local window size, it is subject to FLOW_CONTROL_ERROR, so return * -1. Note that local_window_size is calculated after SETTINGS ACK is * received from peer, so peer must honor this limit. If the resulting * recv_window_size is strictly larger than NGHTTP2_MAX_WINDOW_SIZE, * return -1 too. */ static int adjust_recv_window_size(int32_t *recv_window_size_ptr, size_t delta, int32_t local_window_size) { if (*recv_window_size_ptr > local_window_size - (int32_t)delta || *recv_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - (int32_t)delta) { return -1; } *recv_window_size_ptr += (int32_t)delta; return 0; } int nghttp2_session_update_recv_stream_window_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size, int send_window_update) { int rv; rv = adjust_recv_window_size(&stream->recv_window_size, delta_size, stream->local_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* We don't have to send WINDOW_UPDATE if the data received is the last chunk in the incoming stream. */ /* We have to use local_settings here because it is the constraint the remote endpoint should honor. */ if (send_window_update && !(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } int nghttp2_session_update_recv_connection_window_size(nghttp2_session *session, size_t delta_size) { int rv; rv = adjust_recv_window_size(&session->recv_window_size, delta_size, session->local_window_size); if (rv != 0) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && session->window_update_queued == 0 && nghttp2_should_send_window_update(session->local_window_size, session->recv_window_size)) { /* Use stream ID 0 to update connection-level flow control window */ rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, 0, session->recv_window_size); if (rv != 0) { return rv; } session->recv_window_size = 0; } return 0; } static int session_update_consumed_size(nghttp2_session *session, int32_t *consumed_size_ptr, int32_t *recv_window_size_ptr, uint8_t window_update_queued, int32_t stream_id, size_t delta_size, int32_t local_window_size) { int32_t recv_size; int rv; if ((size_t)*consumed_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta_size) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } *consumed_size_ptr += (int32_t)delta_size; if (window_update_queued == 0) { /* recv_window_size may be smaller than consumed_size, because it may be decreased by negative value with nghttp2_submit_window_update(). */ recv_size = nghttp2_min(*consumed_size_ptr, *recv_window_size_ptr); if (nghttp2_should_send_window_update(local_window_size, recv_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream_id, recv_size); if (rv != 0) { return rv; } *recv_window_size_ptr -= recv_size; *consumed_size_ptr -= recv_size; } } return 0; } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size) { return session_update_consumed_size( session, &stream->consumed_size, &stream->recv_window_size, stream->window_update_queued, stream->stream_id, delta_size, stream->local_window_size); } static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size) { return session_update_consumed_size( session, &session->consumed_size, &session->recv_window_size, session->window_update_queued, 0, delta_size, session->local_window_size); } /* * Checks that we can receive the DATA frame for stream, which is * indicated by |session->iframe.frame.hd.stream_id|. If it is a * connection error situation, GOAWAY frame will be issued by this * function. * * If the DATA frame is allowed, returns 0. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_IGN_PAYLOAD * The reception of DATA frame is connection error; or should be * ignored. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_on_data_received_fail_fast(nghttp2_session *session) { int rv; nghttp2_stream *stream; nghttp2_inbound_frame *iframe; int32_t stream_id; const char *failure_reason; uint32_t error_code = NGHTTP2_PROTOCOL_ERROR; iframe = &session->iframe; stream_id = iframe->frame.hd.stream_id; if (stream_id == 0) { /* The spec says that if a DATA frame is received whose stream ID is 0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. */ failure_reason = "DATA: stream_id == 0"; goto fail; } if (session_detect_idle_stream(session, stream_id)) { failure_reason = "DATA: stream in idle"; error_code = NGHTTP2_PROTOCOL_ERROR; goto fail; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { failure_reason = "DATA: stream closed"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { failure_reason = "DATA: stream in half-closed(remote)"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->state != NGHTTP2_STREAM_OPENED) { failure_reason = "DATA: stream not opened"; goto fail; } return 0; } if (stream->state == NGHTTP2_STREAM_RESERVED) { failure_reason = "DATA: stream in reserved"; goto fail; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } return 0; fail: rv = nghttp2_session_terminate_session_with_reason(session, error_code, failure_reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_PAYLOAD; } static size_t inbound_frame_payload_readlen(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { return nghttp2_min((size_t)(last - in), iframe->payloadleft); } /* * Resets iframe->sbuf and advance its mark pointer by |left| bytes. */ static void inbound_frame_set_mark(nghttp2_inbound_frame *iframe, size_t left) { nghttp2_buf_reset(&iframe->sbuf); iframe->sbuf.mark += left; } static size_t inbound_frame_buf_read(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { size_t readlen; readlen = nghttp2_min((size_t)(last - in), nghttp2_buf_mark_avail(&iframe->sbuf)); iframe->sbuf.last = nghttp2_cpymem(iframe->sbuf.last, in, readlen); return readlen; } /* * Unpacks SETTINGS entry in iframe->sbuf. */ static void inbound_frame_set_settings_entry(nghttp2_inbound_frame *iframe) { nghttp2_settings_entry iv; nghttp2_settings_entry *min_header_table_size_entry; size_t i; nghttp2_frame_unpack_settings_entry(&iv, iframe->sbuf.pos); switch (iv.settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: case NGHTTP2_SETTINGS_ENABLE_PUSH: case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: break; default: DEBUGF("recv: unknown settings id=0x%02x\n", iv.settings_id); iframe->iv[iframe->niv++] = iv; return; } for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == iv.settings_id) { iframe->iv[i] = iv; break; } } if (i == iframe->niv) { iframe->iv[iframe->niv++] = iv; } if (iv.settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { /* Keep track of minimum value of SETTINGS_HEADER_TABLE_SIZE */ min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; if (iv.value < min_header_table_size_entry->value) { min_header_table_size_entry->value = iv.value; } } } /* * Checks PADDED flags and set iframe->sbuf to read them accordingly. * If padding is set, this function returns 1. If no padding is set, * this function returns 0. On error, returns -1. */ static int inbound_frame_handle_pad(nghttp2_inbound_frame *iframe, nghttp2_frame_hd *hd) { if (hd->flags & NGHTTP2_FLAG_PADDED) { if (hd->length < 1) { return -1; } inbound_frame_set_mark(iframe, 1); return 1; } DEBUGF("recv: no padding in payload\n"); return 0; } /* * Computes number of padding based on flags. This function returns * the calculated length if it succeeds, or -1. */ static ssize_t inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { size_t padlen; /* 1 for Pad Length field */ padlen = (size_t)(iframe->sbuf.pos[0] + 1); DEBUGF("recv: padlen=%zu\n", padlen); /* We cannot use iframe->frame.hd.length because of CONTINUATION */ if (padlen - 1 > iframe->payloadleft) { return -1; } iframe->padlen = padlen; return (ssize_t)padlen; } /* * This function returns the effective payload length in the data of * length |readlen| when the remaning payload is |payloadleft|. The * |payloadleft| does not include |readlen|. If padding was started * strictly before this data chunk, this function returns -1. */ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, size_t payloadleft, size_t readlen) { size_t trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); if (trail_padlen > payloadleft) { size_t padlen; padlen = trail_padlen - payloadleft; if (readlen < padlen) { return -1; } return (ssize_t)(readlen - padlen); } return (ssize_t)(readlen); } ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t inlen) { const uint8_t *first = in, *last = in + inlen; nghttp2_inbound_frame *iframe = &session->iframe; size_t readlen; ssize_t padlen; int rv; int busy = 0; nghttp2_frame_hd cont_hd; nghttp2_stream *stream; size_t pri_fieldlen; nghttp2_mem *mem; DEBUGF("recv: connection recv_window_size=%d, local_window=%d\n", session->recv_window_size, session->local_window_size); mem = &session->mem; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } if (!nghttp2_session_want_read(session)) { return (ssize_t)inlen; } for (;;) { switch (iframe->state) { case NGHTTP2_IB_READ_CLIENT_MAGIC: readlen = nghttp2_min(inlen, iframe->payloadleft); if (memcmp(&NGHTTP2_CLIENT_MAGIC[NGHTTP2_CLIENT_MAGIC_LEN - iframe->payloadleft], in, readlen) != 0) { return NGHTTP2_ERR_BAD_CLIENT_MAGIC; } iframe->payloadleft -= readlen; in += readlen; if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } break; case NGHTTP2_IB_READ_FIRST_SETTINGS: DEBUGF("recv: [IB_READ_FIRST_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } if (iframe->sbuf.pos[3] != NGHTTP2_SETTINGS || (iframe->sbuf.pos[4] & NGHTTP2_FLAG_ACK)) { rv = session_call_error_callback( session, NGHTTP2_ERR_SETTINGS_EXPECTED, "Remote peer returned unexpected data while we expected " "SETTINGS frame. Perhaps, peer does not support HTTP/2 " "properly."); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "SETTINGS expected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->state = NGHTTP2_IB_READ_HEAD; /* Fall through */ case NGHTTP2_IB_READ_HEAD: { int on_begin_frame_called = 0; DEBUGF("recv: [IB_READ_HEAD]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&iframe->frame.hd, iframe->sbuf.pos); iframe->payloadleft = iframe->frame.hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", iframe->frame.hd.length, iframe->frame.hd.type, iframe->frame.hd.flags, iframe->frame.hd.stream_id); if (iframe->frame.hd.length > session->local_settings.max_frame_size) { DEBUGF("recv: length is too large %zu > %u\n", iframe->frame.hd.length, session->local_settings.max_frame_size); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_FRAME_SIZE_ERROR, "too large frame size"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } switch (iframe->frame.hd.type) { case NGHTTP2_DATA: { DEBUGF("recv: DATA\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_PADDED); /* Check stream is open. If it is not open or closing, ignore payload. */ busy = 1; rv = session_on_data_received_fail_fast(session); if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_IGN_PAYLOAD) { DEBUGF("recv: DATA not allowed stream_id=%d\n", iframe->frame.hd.stream_id); iframe->state = NGHTTP2_IB_IGN_DATA; break; } if (nghttp2_is_fatal(rv)) { return rv; } rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_PAD_DATA; break; } iframe->state = NGHTTP2_IB_READ_DATA; break; } case NGHTTP2_HEADERS: DEBUGF("recv: HEADERS\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED | NGHTTP2_FLAG_PRIORITY); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } /* Call on_begin_frame_callback here because session_process_headers_frame() may call on_begin_headers_callback */ rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } on_begin_frame_called = 1; rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: DEBUGF("recv: PRIORITY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != NGHTTP2_PRIORITY_SPECLEN) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, NGHTTP2_PRIORITY_SPECLEN); break; case NGHTTP2_RST_STREAM: case NGHTTP2_WINDOW_UPDATE: #ifdef DEBUGBUILD switch (iframe->frame.hd.type) { case NGHTTP2_RST_STREAM: DEBUGF("recv: RST_STREAM\n"); break; case NGHTTP2_WINDOW_UPDATE: DEBUGF("recv: WINDOW_UPDATE\n"); break; } #endif /* DEBUGBUILD */ iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_SETTINGS: DEBUGF("recv: SETTINGS\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if ((iframe->frame.hd.length % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) || ((iframe->frame.hd.flags & NGHTTP2_FLAG_ACK) && iframe->payloadleft > 0)) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_SETTINGS; if (iframe->payloadleft) { nghttp2_settings_entry *min_header_table_size_entry; /* We allocate iv with additional one entry, to store the minimum header table size. */ iframe->max_niv = iframe->frame.hd.length / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH + 1; iframe->iv = nghttp2_mem_malloc(mem, sizeof(nghttp2_settings_entry) * iframe->max_niv); if (!iframe->iv) { return NGHTTP2_ERR_NOMEM; } min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; min_header_table_size_entry->settings_id = NGHTTP2_SETTINGS_HEADER_TABLE_SIZE; min_header_table_size_entry->value = UINT32_MAX; inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } busy = 1; inbound_frame_set_mark(iframe, 0); break; case NGHTTP2_PUSH_PROMISE: DEBUGF("recv: PUSH_PROMISE\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_PING: DEBUGF("recv: PING\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if (iframe->payloadleft != 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_GOAWAY: DEBUGF("recv: GOAWAY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft < 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_CONTINUATION: DEBUGF("recv: unexpected CONTINUATION\n"); /* Receiving CONTINUATION in this state are subject to connection error of type PROTOCOL_ERROR */ rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "CONTINUATION: unexpected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; default: DEBUGF("recv: extension frame\n"); if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { if (!session->callbacks.unpack_extension_callback) { /* Silently ignore unknown frame type. */ busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_EXTENSION_PAYLOAD; break; } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ALTSVC\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; iframe->frame.ext.payload = &iframe->ext_frame_payload.altsvc; if (session->server) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } if (iframe->payloadleft < 2) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 2); break; case NGHTTP2_ORIGIN: if (!(session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ORIGIN\n"); iframe->frame.ext.payload = &iframe->ext_frame_payload.origin; if (session->server || iframe->frame.hd.stream_id || (iframe->frame.hd.flags & 0xf0)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->payloadleft); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->payloadleft); } else { busy = 1; } iframe->state = NGHTTP2_IB_READ_ORIGIN_PAYLOAD; break; default: busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } } if (!on_begin_frame_called) { switch (iframe->state) { case NGHTTP2_IB_IGN_HEADER_BLOCK: case NGHTTP2_IB_IGN_PAYLOAD: case NGHTTP2_IB_FRAME_SIZE_ERROR: case NGHTTP2_IB_IGN_DATA: case NGHTTP2_IB_IGN_ALL: break; default: rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } } } break; } case NGHTTP2_IB_READ_NBYTE: DEBUGF("recv: [IB_READ_NBYTE]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zd\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + pri_fieldlen > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.headers.padlen = (size_t)padlen; if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } else { /* Truncate buffers used for padding spec */ inbound_frame_set_mark(iframe, 0); } } rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: rv = session_process_priority_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_RST_STREAM: rv = session_process_rst_stream_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_PUSH_PROMISE: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + 4 /* promised stream id */ > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.push_promise.padlen = (size_t)padlen; if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; } rv = session_process_push_promise_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.push_promise.promised_stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PING: rv = session_process_ping_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_GOAWAY: { size_t debuglen; /* 8 is Last-stream-ID + Error Code */ debuglen = iframe->frame.hd.length - 8; if (debuglen > 0) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, debuglen); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, debuglen); } busy = 1; iframe->state = NGHTTP2_IB_READ_GOAWAY_DEBUG; break; } case NGHTTP2_WINDOW_UPDATE: rv = session_process_window_update_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_ALTSVC: { size_t origin_len; origin_len = nghttp2_get_uint16(iframe->sbuf.pos); DEBUGF("recv: origin_len=%zu\n", origin_len); if (origin_len > iframe->payloadleft) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } if (iframe->frame.hd.length > 2) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->frame.hd.length - 2); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->frame.hd.length); } busy = 1; iframe->state = NGHTTP2_IB_READ_ALTSVC_PAYLOAD; break; } default: /* This is unknown frame */ session_inbound_frame_reset(session); break; } break; case NGHTTP2_IB_READ_HEADER_BLOCK: case NGHTTP2_IB_IGN_HEADER_BLOCK: { ssize_t data_readlen; size_t trail_padlen; int final; #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { DEBUGF("recv: [IB_READ_HEADER_BLOCK]\n"); } else { DEBUGF("recv: [IB_IGN_HEADER_BLOCK]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_payload_readlen(iframe, in, last); DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft - readlen); data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft - readlen, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); final = (iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) && iframe->payloadleft - (size_t)data_readlen == trail_padlen; if (data_readlen > 0 || (data_readlen == 0 && final)) { size_t hd_proclen = 0; DEBUGF("recv: block final=%d\n", final); rv = inflate_header_block(session, &iframe->frame, &hd_proclen, (uint8_t *)in, (size_t)data_readlen, final, iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_PAUSE) { in += hd_proclen; iframe->payloadleft -= hd_proclen; return in - first; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { /* The application says no more headers. We decompress the rest of the header block but not invoke on_header_callback and on_frame_recv_callback. */ in += hd_proclen; iframe->payloadleft -= hd_proclen; /* Use promised stream ID for PUSH_PROMISE */ rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.type == NGHTTP2_PUSH_PROMISE ? iframe->frame.push_promise.promised_stream_id : iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } in += readlen; iframe->payloadleft -= readlen; if (rv == NGHTTP2_ERR_HEADER_COMP) { /* GOAWAY is already issued */ if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); } else { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; } break; } } else { in += readlen; iframe->payloadleft -= readlen; } if (iframe->payloadleft) { break; } if ((iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) == 0) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_HDLEN); iframe->padlen = 0; if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_EXPECT_CONTINUATION; } else { iframe->state = NGHTTP2_IB_IGN_CONTINUATION; } } else { if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { rv = session_after_header_block_received(session); if (nghttp2_is_fatal(rv)) { return rv; } } session_inbound_frame_reset(session); } break; } case NGHTTP2_IB_IGN_PAYLOAD: DEBUGF("recv: [IB_IGN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { break; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: case NGHTTP2_PUSH_PROMISE: case NGHTTP2_CONTINUATION: /* Mark inflater bad so that we won't perform further decoding */ session->hd_inflater.ctx.bad = 1; break; default: break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_FRAME_SIZE_ERROR: DEBUGF("recv: [IB_FRAME_SIZE_ERROR]\n"); rv = session_handle_frame_size_error(session); if (nghttp2_is_fatal(rv)) { return rv; } assert(iframe->state == NGHTTP2_IB_IGN_ALL); return (ssize_t)inlen; case NGHTTP2_IB_READ_SETTINGS: DEBUGF("recv: [IB_READ_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { break; } if (readlen > 0) { inbound_frame_set_settings_entry(iframe); } if (iframe->payloadleft) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } rv = session_process_settings_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_GOAWAY_DEBUG: DEBUGF("recv: [IB_READ_GOAWAY_DEBUG]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_goaway_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_EXPECT_CONTINUATION: case NGHTTP2_IB_IGN_CONTINUATION: #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { fprintf(stderr, "recv: [IB_EXPECT_CONTINUATION]\n"); } else { fprintf(stderr, "recv: [IB_IGN_CONTINUATION]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&cont_hd, iframe->sbuf.pos); iframe->payloadleft = cont_hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", cont_hd.length, cont_hd.type, cont_hd.flags, cont_hd.stream_id); if (cont_hd.type != NGHTTP2_CONTINUATION || cont_hd.stream_id != iframe->frame.hd.stream_id) { DEBUGF("recv: expected stream_id=%d, type=%d, but got stream_id=%d, " "type=%u\n", iframe->frame.hd.stream_id, NGHTTP2_CONTINUATION, cont_hd.stream_id, cont_hd.type); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "unexpected non-CONTINUATION frame or stream_id is invalid"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } /* CONTINUATION won't bear NGHTTP2_PADDED flag */ iframe->frame.hd.flags = (uint8_t)( iframe->frame.hd.flags | (cont_hd.flags & NGHTTP2_FLAG_END_HEADERS)); iframe->frame.hd.length += cont_hd.length; busy = 1; if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; rv = session_call_on_begin_frame(session, &cont_hd); if (nghttp2_is_fatal(rv)) { return rv; } } else { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; } break; case NGHTTP2_IB_READ_PAD_DATA: DEBUGF("recv: [IB_READ_PAD_DATA]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zu\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } /* Pad Length field is subject to flow control */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } /* Pad Length field is consumed immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (stream) { rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } } busy = 1; padlen = inbound_frame_compute_pad(iframe); if (padlen < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.data.padlen = (size_t)padlen; iframe->state = NGHTTP2_IB_READ_DATA; break; case NGHTTP2_IB_READ_DATA: stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (!stream) { busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } DEBUGF("recv: [IB_READ_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { ssize_t data_readlen; rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } padlen = (ssize_t)readlen - data_readlen; if (padlen > 0) { /* Padding is considered as "consumed" immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, (size_t)padlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } DEBUGF("recv: data_readlen=%zd\n", data_readlen); if (data_readlen > 0) { if (session_enforce_http_messaging(session)) { if (nghttp2_http_on_data_chunk(stream, (size_t)data_readlen) != 0) { if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Consume all data for connection immediately here */ rv = session_update_connection_consumed_size( session, (size_t)data_readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_DATA) { return (ssize_t)inlen; } } rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } } if (session->callbacks.on_data_chunk_recv_callback) { rv = session->callbacks.on_data_chunk_recv_callback( session, iframe->frame.hd.flags, iframe->frame.hd.stream_id, in - readlen, (size_t)data_readlen, session->user_data); if (rv == NGHTTP2_ERR_PAUSE) { return in - first; } if (nghttp2_is_fatal(rv)) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } } if (iframe->payloadleft) { break; } rv = session_process_data_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_DATA: DEBUGF("recv: [IB_IGN_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { /* Update connection-level flow control window for ignored DATA frame too */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Ignored DATA is considered as "consumed" immediately. */ rv = session_update_connection_consumed_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } } if (iframe->payloadleft) { break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_ALL: return (ssize_t)inlen; case NGHTTP2_IB_READ_EXTENSION_PAYLOAD: DEBUGF("recv: [IB_READ_EXTENSION_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { rv = session_call_on_extension_chunk_recv_callback( session, in - readlen, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } if (iframe->payloadleft > 0) { break; } rv = session_process_extension_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ALTSVC_PAYLOAD: DEBUGF("recv: [IB_READ_ALTSVC_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_altsvc_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ORIGIN_PAYLOAD: DEBUGF("recv: [IB_READ_ORIGIN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_origin_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; } if (!busy && in == last) { break; } busy = 0; } assert(in == last); return in - first; } int nghttp2_session_recv(nghttp2_session *session) { uint8_t buf[NGHTTP2_INBOUND_BUFFER_LENGTH]; while (1) { ssize_t readlen; readlen = session_recv(session, buf, sizeof(buf)); if (readlen > 0) { ssize_t proclen = nghttp2_session_mem_recv(session, buf, (size_t)readlen); if (proclen < 0) { return (int)proclen; } assert(proclen == readlen); } else if (readlen == 0 || readlen == NGHTTP2_ERR_WOULDBLOCK) { return 0; } else if (readlen == NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_EOF; } else if (readlen < 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } /* * Returns the number of active streams, which includes streams in * reserved state. */ static size_t session_get_num_active_streams(nghttp2_session *session) { return nghttp2_map_size(&session->streams) - session->num_closed_streams - session->num_idle_streams; } int nghttp2_session_want_read(nghttp2_session *session) { size_t num_active_streams; /* If this flag is set, we don't want to read. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } num_active_streams = session_get_num_active_streams(session); /* Unless termination GOAWAY is sent or received, we always want to read incoming frames. */ if (num_active_streams > 0) { return 1; } /* If there is no active streams and GOAWAY has been sent or received, we are done with this session. */ return (session->goaway_flags & (NGHTTP2_GOAWAY_SENT | NGHTTP2_GOAWAY_RECV)) == 0; } int nghttp2_session_want_write(nghttp2_session *session) { /* If these flag is set, we don't want to write any data. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } /* * Unless termination GOAWAY is sent or received, we want to write * frames if there is pending ones. If pending frame is request/push * response HEADERS and concurrent stream limit is reached, we don't * want to write them. */ return session->aob.item || nghttp2_outbound_queue_top(&session->ob_urgent) || nghttp2_outbound_queue_top(&session->ob_reg) || (!nghttp2_pq_empty(&session->root.obq) && session->remote_window_size > 0) || (nghttp2_outbound_queue_top(&session->ob_syn) && !session_is_outgoing_concurrent_streams_max(session)); } int nghttp2_session_add_ping(nghttp2_session *session, uint8_t flags, const uint8_t *opaque_data) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; if ((flags & NGHTTP2_FLAG_ACK) && session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_ping_init(&frame->ping, flags, opaque_data); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_ping_free(&frame->ping); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } return 0; } int nghttp2_session_add_goaway(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const uint8_t *opaque_data, size_t opaque_data_len, uint8_t aux_flags) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; uint8_t *opaque_data_copy = NULL; nghttp2_goaway_aux_data *aux_data; nghttp2_mem *mem; mem = &session->mem; if (nghttp2_session_is_my_stream_id(session, last_stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (opaque_data_len) { if (opaque_data_len + 8 > NGHTTP2_MAX_PAYLOADLEN) { return NGHTTP2_ERR_INVALID_ARGUMENT; } opaque_data_copy = nghttp2_mem_malloc(mem, opaque_data_len); if (opaque_data_copy == NULL) { return NGHTTP2_ERR_NOMEM; } memcpy(opaque_data_copy, opaque_data, opaque_data_len); } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { nghttp2_mem_free(mem, opaque_data_copy); return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; /* last_stream_id must not be increased from the value previously sent */ last_stream_id = nghttp2_min(last_stream_id, session->local_last_stream_id); nghttp2_frame_goaway_init(&frame->goaway, last_stream_id, error_code, opaque_data_copy, opaque_data_len); aux_data = &item->aux_data.goaway; aux_data->flags = aux_flags; rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_goaway_free(&frame->goaway, mem); nghttp2_mem_free(mem, item); return rv; } return 0; } int nghttp2_session_add_window_update(nghttp2_session *session, uint8_t flags, int32_t stream_id, int32_t window_size_increment) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_window_update_init(&frame->window_update, flags, stream_id, window_size_increment); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_window_update_free(&frame->window_update); nghttp2_mem_free(mem, item); return rv; } return 0; } static void session_append_inflight_settings(nghttp2_session *session, nghttp2_inflight_settings *settings) { nghttp2_inflight_settings **i; for (i = &session->inflight_settings_head; *i; i = &(*i)->next) ; *i = settings; } int nghttp2_session_add_settings(nghttp2_session *session, uint8_t flags, const nghttp2_settings_entry *iv, size_t niv) { nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_settings_entry *iv_copy; size_t i; int rv; nghttp2_mem *mem; nghttp2_inflight_settings *inflight_settings = NULL; mem = &session->mem; if (flags & NGHTTP2_FLAG_ACK) { if (niv != 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } } if (!nghttp2_iv_check(iv, niv)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { iv_copy = nghttp2_frame_iv_copy(iv, niv, mem); if (iv_copy == NULL) { nghttp2_mem_free(mem, item); return NGHTTP2_ERR_NOMEM; } } else { iv_copy = NULL; } if ((flags & NGHTTP2_FLAG_ACK) == 0) { rv = inflight_settings_new(&inflight_settings, iv, niv, mem); if (rv != 0) { assert(nghttp2_is_fatal(rv)); nghttp2_mem_free(mem, iv_copy); nghttp2_mem_free(mem, item); return rv; } } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_settings_init(&frame->settings, flags, iv_copy, niv); rv = nghttp2_session_add_item(session, item); if (rv != 0) { /* The only expected error is fatal one */ assert(nghttp2_is_fatal(rv)); inflight_settings_del(inflight_settings, mem); nghttp2_frame_settings_free(&frame->settings, mem); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } else { session_append_inflight_settings(session, inflight_settings); } /* Extract NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS and ENABLE_PUSH here. We use it to refuse the incoming stream and PUSH_PROMISE with RST_STREAM. */ for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS) { session->pending_local_max_concurrent_stream = iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_PUSH) { session->pending_enable_push = (uint8_t)iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL) { session->pending_enable_connect_protocol = (uint8_t)iv[i - 1].value; break; } } return 0; } int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, size_t datamax, nghttp2_frame *frame, nghttp2_data_aux_data *aux_data, nghttp2_stream *stream) { int rv; uint32_t data_flags; ssize_t payloadlen; ssize_t padded_payloadlen; nghttp2_buf *buf; size_t max_payloadlen; assert(bufs->head == bufs->cur); buf = &bufs->cur->buf; if (session->callbacks.read_length_callback) { payloadlen = session->callbacks.read_length_callback( session, frame->hd.type, stream->stream_id, session->remote_window_size, stream->remote_window_size, session->remote_settings.max_frame_size, session->user_data); DEBUGF("send: read_length_callback=%zd\n", payloadlen); payloadlen = nghttp2_session_enforce_flow_control_limits(session, stream, payloadlen); DEBUGF("send: read_length_callback after flow control=%zd\n", payloadlen); if (payloadlen <= 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } if ((size_t)payloadlen > nghttp2_buf_avail(buf)) { /* Resize the current buffer(s). The reason why we do +1 for buffer size is for possible padding field. */ rv = nghttp2_bufs_realloc(&session->aob.framebufs, (size_t)(NGHTTP2_FRAME_HDLEN + 1 + payloadlen)); if (rv != 0) { DEBUGF("send: realloc buffer failed rv=%d", rv); /* If reallocation failed, old buffers are still in tact. So use safe limit. */ payloadlen = (ssize_t)datamax; DEBUGF("send: use safe limit payloadlen=%zd", payloadlen); } else { assert(&session->aob.framebufs == bufs); buf = &bufs->cur->buf; } } datamax = (size_t)payloadlen; } /* Current max DATA length is less then buffer chunk size */ assert(nghttp2_buf_avail(buf) >= datamax); data_flags = NGHTTP2_DATA_FLAG_NONE; payloadlen = aux_data->data_prd.read_callback( session, frame->hd.stream_id, buf->pos, datamax, &data_flags, &aux_data->data_prd.source, session->user_data); if (payloadlen == NGHTTP2_ERR_DEFERRED || payloadlen == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE || payloadlen == NGHTTP2_ERR_PAUSE) { DEBUGF("send: DATA postponed due to %s\n", nghttp2_strerror((int)payloadlen)); return (int)payloadlen; } if (payloadlen < 0 || datamax < (size_t)payloadlen) { /* This is the error code when callback is failed. */ return NGHTTP2_ERR_CALLBACK_FAILURE; } buf->last = buf->pos + payloadlen; buf->pos -= NGHTTP2_FRAME_HDLEN; /* Clear flags, because this may contain previous flags of previous DATA */ frame->hd.flags = NGHTTP2_FLAG_NONE; if (data_flags & NGHTTP2_DATA_FLAG_EOF) { aux_data->eof = 1; /* If NGHTTP2_DATA_FLAG_NO_END_STREAM is set, don't set NGHTTP2_FLAG_END_STREAM */ if ((aux_data->flags & NGHTTP2_FLAG_END_STREAM) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM) == 0) { frame->hd.flags |= NGHTTP2_FLAG_END_STREAM; } } if (data_flags & NGHTTP2_DATA_FLAG_NO_COPY) { if (session->callbacks.send_data_callback == NULL) { DEBUGF("NGHTTP2_DATA_FLAG_NO_COPY requires send_data_callback set\n"); return NGHTTP2_ERR_CALLBACK_FAILURE; } aux_data->no_copy = 1; } frame->hd.length = (size_t)payloadlen; frame->data.padlen = 0; max_payloadlen = nghttp2_min(datamax, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } frame->data.padlen = (size_t)(padded_payloadlen - payloadlen); nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); rv = nghttp2_frame_add_pad(bufs, &frame->hd, frame->data.padlen, aux_data->no_copy); if (rv != 0) { return rv; } reschedule_stream(stream); if (frame->hd.length == 0 && (data_flags & NGHTTP2_DATA_FLAG_EOF) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM)) { /* DATA payload length is 0, and DATA frame does not bear END_STREAM. In this case, there is no point to send 0 length DATA frame. */ return NGHTTP2_ERR_CANCEL; } return 0; } void *nghttp2_session_get_stream_user_data(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { return stream->stream_user_data; } else { return NULL; } } int nghttp2_session_set_stream_user_data(nghttp2_session *session, int32_t stream_id, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame *frame; nghttp2_outbound_item *item; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { stream->stream_user_data = stream_user_data; return 0; } if (session->server || !nghttp2_session_is_my_stream_id(session, stream_id) || !nghttp2_outbound_queue_top(&session->ob_syn)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(frame->hd.type == NGHTTP2_HEADERS); if (frame->hd.stream_id > stream_id || (uint32_t)stream_id >= session->next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } for (item = session->ob_syn.head; item; item = item->qnext) { if (item->frame.hd.stream_id < stream_id) { continue; } if (item->frame.hd.stream_id > stream_id) { break; } item->aux_data.headers.stream_user_data = stream_user_data; return 0; } return NGHTTP2_ERR_INVALID_ARGUMENT; } int nghttp2_session_resume_data(nghttp2_session *session, int32_t stream_id) { int rv; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL || !nghttp2_stream_check_deferred_item(stream)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } rv = nghttp2_stream_resume_deferred_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } size_t nghttp2_session_get_outbound_queue_size(nghttp2_session *session) { return nghttp2_outbound_queue_size(&session->ob_urgent) + nghttp2_outbound_queue_size(&session->ob_reg) + nghttp2_outbound_queue_size(&session->ob_syn); /* TODO account for item attached to stream */ } int32_t nghttp2_session_get_stream_effective_recv_data_length(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->recv_window_size < 0 ? 0 : stream->recv_window_size; } int32_t nghttp2_session_get_stream_effective_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->local_window_size; } int32_t nghttp2_session_get_stream_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; int32_t size; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } size = stream->local_window_size - stream->recv_window_size; /* size could be negative if local endpoint reduced SETTINGS_INITIAL_WINDOW_SIZE */ if (size < 0) { return 0; } return size; } int32_t nghttp2_session_get_effective_recv_data_length(nghttp2_session *session) { return session->recv_window_size < 0 ? 0 : session->recv_window_size; } int32_t nghttp2_session_get_effective_local_window_size(nghttp2_session *session) { return session->local_window_size; } int32_t nghttp2_session_get_local_window_size(nghttp2_session *session) { return session->local_window_size - session->recv_window_size; } int32_t nghttp2_session_get_stream_remote_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } /* stream->remote_window_size can be negative when SETTINGS_INITIAL_WINDOW_SIZE is changed. */ return nghttp2_max(0, stream->remote_window_size); } int32_t nghttp2_session_get_remote_window_size(nghttp2_session *session) { return session->remote_window_size; } uint32_t nghttp2_session_get_remote_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->remote_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->remote_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->remote_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->remote_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->remote_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->remote_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->remote_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } uint32_t nghttp2_session_get_local_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->local_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->local_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->local_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->local_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->local_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->local_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->local_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } static int nghttp2_session_upgrade_internal(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame frame; nghttp2_settings_entry *iv; size_t niv; int rv; nghttp2_priority_spec pri_spec; nghttp2_mem *mem; mem = &session->mem; if ((!session->server && session->next_stream_id != 1) || (session->server && session->last_recv_stream_id >= 1)) { return NGHTTP2_ERR_PROTO; } if (settings_payloadlen % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) { return NGHTTP2_ERR_INVALID_ARGUMENT; } rv = nghttp2_frame_unpack_settings_payload2(&iv, &niv, settings_payload, settings_payloadlen, mem); if (rv != 0) { return rv; } if (session->server) { nghttp2_frame_hd_init(&frame.hd, settings_payloadlen, NGHTTP2_SETTINGS, NGHTTP2_FLAG_NONE, 0); frame.settings.iv = iv; frame.settings.niv = niv; rv = nghttp2_session_on_settings_received(session, &frame, 1 /* No ACK */); } else { rv = nghttp2_submit_settings(session, NGHTTP2_FLAG_NONE, iv, niv); } nghttp2_mem_free(mem, iv); if (rv != 0) { return rv; } nghttp2_priority_spec_default_init(&pri_spec); stream = nghttp2_session_open_stream( session, 1, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_OPENING, session->server ? NULL : stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since this should be the first stream open. */ if (session->server) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); session->last_recv_stream_id = 1; session->last_proc_stream_id = 1; } else { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); session->last_sent_stream_id = 1; session->next_stream_id += 2; } return 0; } int nghttp2_session_upgrade(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); /* We have no information about request header fields when Upgrade was happened. So we don't know the request method here. If request method is HEAD, we have a trouble because we may have nonzero content-length header field in response headers, and we will going to check it against the actual DATA frames, but we may get mismatch because HEAD response body must be empty. Because of this reason, nghttp2_session_upgrade() was deprecated in favor of nghttp2_session_upgrade2(), which has |head_request| parameter to indicate that request method is HEAD or not. */ stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_UPGRADE_WORKAROUND; return 0; } int nghttp2_session_upgrade2(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, int head_request, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); if (head_request) { stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_HEAD; } return 0; } int nghttp2_session_get_stream_local_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_WR) != 0; } int nghttp2_session_get_stream_remote_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_RD) != 0; } int nghttp2_session_consume(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_connection(nghttp2_session *session, size_t size) { int rv; if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_stream(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_set_next_stream_id(nghttp2_session *session, int32_t next_stream_id) { if (next_stream_id <= 0 || session->next_stream_id > (uint32_t)next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->server) { if (next_stream_id % 2) { return NGHTTP2_ERR_INVALID_ARGUMENT; } } else if (next_stream_id % 2 == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } session->next_stream_id = (uint32_t)next_stream_id; return 0; } uint32_t nghttp2_session_get_next_stream_id(nghttp2_session *session) { return session->next_stream_id; } int32_t nghttp2_session_get_last_proc_stream_id(nghttp2_session *session) { return session->last_proc_stream_id; } nghttp2_stream *nghttp2_session_find_stream(nghttp2_session *session, int32_t stream_id) { if (stream_id == 0) { return &session->root; } return nghttp2_session_get_stream_raw(session, stream_id); } nghttp2_stream *nghttp2_session_get_root_stream(nghttp2_session *session) { return &session->root; } int nghttp2_session_check_server_session(nghttp2_session *session) { return session->server; } int nghttp2_session_change_stream_priority( nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { int rv; nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); rv = nghttp2_session_reprioritize_stream(session, stream, &pri_spec_copy); if (nghttp2_is_fatal(rv)) { return rv; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } int nghttp2_session_create_idle_stream(nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id || !session_detect_idle_stream(session, stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); stream = nghttp2_session_open_stream(session, stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec_copy, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } size_t nghttp2_session_get_hd_inflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_inflate_get_dynamic_table_size(&session->hd_inflater); } size_t nghttp2_session_get_hd_deflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_deflate_get_dynamic_table_size(&session->hd_deflater); } void nghttp2_session_set_user_data(nghttp2_session *session, void *user_data) { session->user_data = user_data; }
./CrossVul/dataset_final_sorted/CWE-707/c/bad_3936_6
crossvul-cpp_data_bad_3937_0
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_session.h" #include <string.h> #include <stddef.h> #include <stdio.h> #include <assert.h> #include <stdarg.h> #include "nghttp2_helper.h" #include "nghttp2_net.h" #include "nghttp2_priority_spec.h" #include "nghttp2_option.h" #include "nghttp2_http.h" #include "nghttp2_pq.h" #include "nghttp2_debug.h" /* * Returns non-zero if the number of outgoing opened streams is larger * than or equal to * remote_settings.max_concurrent_streams. */ static int session_is_outgoing_concurrent_streams_max(nghttp2_session *session) { return session->remote_settings.max_concurrent_streams <= session->num_outgoing_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * local_settings.max_concurrent_streams. */ static int session_is_incoming_concurrent_streams_max(nghttp2_session *session) { return session->local_settings.max_concurrent_streams <= session->num_incoming_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * session->pending_local_max_concurrent_stream. */ static int session_is_incoming_concurrent_streams_pending_max(nghttp2_session *session) { return session->pending_local_max_concurrent_stream <= session->num_incoming_streams; } /* * Returns non-zero if |lib_error| is non-fatal error. */ static int is_non_fatal(int lib_error_code) { return lib_error_code < 0 && lib_error_code > NGHTTP2_ERR_FATAL; } int nghttp2_is_fatal(int lib_error_code) { return lib_error_code < NGHTTP2_ERR_FATAL; } static int session_enforce_http_messaging(nghttp2_session *session) { return (session->opt_flags & NGHTTP2_OPTMASK_NO_HTTP_MESSAGING) == 0; } /* * Returns nonzero if |frame| is trailer headers. */ static int session_trailer_headers(nghttp2_session *session, nghttp2_stream *stream, nghttp2_frame *frame) { if (!stream || frame->hd.type != NGHTTP2_HEADERS) { return 0; } if (session->server) { return frame->headers.cat == NGHTTP2_HCAT_HEADERS; } return frame->headers.cat == NGHTTP2_HCAT_HEADERS && (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) == 0; } /* Returns nonzero if the |stream| is in reserved(remote) state */ static int state_reserved_remote(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && !nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* Returns nonzero if the |stream| is in reserved(local) state */ static int state_reserved_local(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* * Checks whether received stream_id is valid. This function returns * 1 if it succeeds, or 0. */ static int session_is_new_peer_stream_id(nghttp2_session *session, int32_t stream_id) { return stream_id != 0 && !nghttp2_session_is_my_stream_id(session, stream_id) && session->last_recv_stream_id < stream_id; } static int session_detect_idle_stream(nghttp2_session *session, int32_t stream_id) { /* Assume that stream object with stream_id does not exist */ if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (session->last_sent_stream_id < stream_id) { return 1; } return 0; } if (session_is_new_peer_stream_id(session, stream_id)) { return 1; } return 0; } static int check_ext_type_set(const uint8_t *ext_types, uint8_t type) { return (ext_types[type / 8] & (1 << (type & 0x7))) > 0; } static int session_call_error_callback(nghttp2_session *session, int lib_error_code, const char *fmt, ...) { size_t bufsize; va_list ap; char *buf; int rv; nghttp2_mem *mem; if (!session->callbacks.error_callback && !session->callbacks.error_callback2) { return 0; } mem = &session->mem; va_start(ap, fmt); rv = vsnprintf(NULL, 0, fmt, ap); va_end(ap); if (rv < 0) { return NGHTTP2_ERR_NOMEM; } bufsize = (size_t)(rv + 1); buf = nghttp2_mem_malloc(mem, bufsize); if (buf == NULL) { return NGHTTP2_ERR_NOMEM; } va_start(ap, fmt); rv = vsnprintf(buf, bufsize, fmt, ap); va_end(ap); if (rv < 0) { nghttp2_mem_free(mem, buf); /* vsnprintf may return error because of various things we can imagine, but typically we don't want to drop session just for debug callback. */ DEBUGF("error_callback: vsnprintf failed. The template was %s\n", fmt); return 0; } if (session->callbacks.error_callback2) { rv = session->callbacks.error_callback2(session, lib_error_code, buf, (size_t)rv, session->user_data); } else { rv = session->callbacks.error_callback(session, buf, (size_t)rv, session->user_data); } nghttp2_mem_free(mem, buf); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_terminate_session(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const char *reason) { int rv; const uint8_t *debug_data; size_t debug_datalen; if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return 0; } /* Ignore all incoming frames because we are going to tear down the session. */ session->iframe.state = NGHTTP2_IB_IGN_ALL; if (reason == NULL) { debug_data = NULL; debug_datalen = 0; } else { debug_data = (const uint8_t *)reason; debug_datalen = strlen(reason); } rv = nghttp2_session_add_goaway(session, last_stream_id, error_code, debug_data, debug_datalen, NGHTTP2_GOAWAY_AUX_TERM_ON_SEND); if (rv != 0) { return rv; } session->goaway_flags |= NGHTTP2_GOAWAY_TERM_ON_SEND; return 0; } int nghttp2_session_terminate_session(nghttp2_session *session, uint32_t error_code) { return session_terminate_session(session, session->last_proc_stream_id, error_code, NULL); } int nghttp2_session_terminate_session2(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code) { return session_terminate_session(session, last_stream_id, error_code, NULL); } int nghttp2_session_terminate_session_with_reason(nghttp2_session *session, uint32_t error_code, const char *reason) { return session_terminate_session(session, session->last_proc_stream_id, error_code, reason); } int nghttp2_session_is_my_stream_id(nghttp2_session *session, int32_t stream_id) { int rem; if (stream_id == 0) { return 0; } rem = stream_id & 0x1; if (session->server) { return rem == 0; } return rem == 1; } nghttp2_stream *nghttp2_session_get_stream(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); if (stream == NULL || (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) || stream->state == NGHTTP2_STREAM_IDLE) { return NULL; } return stream; } nghttp2_stream *nghttp2_session_get_stream_raw(nghttp2_session *session, int32_t stream_id) { return (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); } static void session_inbound_frame_reset(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_mem *mem = &session->mem; /* A bit risky code, since if this function is called from nghttp2_session_new(), we rely on the fact that iframe->frame.hd.type is 0, so that no free is performed. */ switch (iframe->frame.hd.type) { case NGHTTP2_DATA: break; case NGHTTP2_HEADERS: nghttp2_frame_headers_free(&iframe->frame.headers, mem); break; case NGHTTP2_PRIORITY: nghttp2_frame_priority_free(&iframe->frame.priority); break; case NGHTTP2_RST_STREAM: nghttp2_frame_rst_stream_free(&iframe->frame.rst_stream); break; case NGHTTP2_SETTINGS: nghttp2_frame_settings_free(&iframe->frame.settings, mem); nghttp2_mem_free(mem, iframe->iv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; break; case NGHTTP2_PUSH_PROMISE: nghttp2_frame_push_promise_free(&iframe->frame.push_promise, mem); break; case NGHTTP2_PING: nghttp2_frame_ping_free(&iframe->frame.ping); break; case NGHTTP2_GOAWAY: nghttp2_frame_goaway_free(&iframe->frame.goaway, mem); break; case NGHTTP2_WINDOW_UPDATE: nghttp2_frame_window_update_free(&iframe->frame.window_update); break; default: /* extension frame */ if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { nghttp2_frame_extension_free(&iframe->frame.ext); } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { break; } nghttp2_frame_altsvc_free(&iframe->frame.ext, mem); break; case NGHTTP2_ORIGIN: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN) == 0) { break; } nghttp2_frame_origin_free(&iframe->frame.ext, mem); break; } } break; } memset(&iframe->frame, 0, sizeof(nghttp2_frame)); memset(&iframe->ext_frame_payload, 0, sizeof(nghttp2_ext_frame_payload)); iframe->state = NGHTTP2_IB_READ_HEAD; nghttp2_buf_wrap_init(&iframe->sbuf, iframe->raw_sbuf, sizeof(iframe->raw_sbuf)); iframe->sbuf.mark += NGHTTP2_FRAME_HDLEN; nghttp2_buf_free(&iframe->lbuf, mem); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); iframe->raw_lbuf = NULL; iframe->payloadleft = 0; iframe->padlen = 0; } static void init_settings(nghttp2_settings_storage *settings) { settings->header_table_size = NGHTTP2_HD_DEFAULT_MAX_BUFFER_SIZE; settings->enable_push = 1; settings->max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; settings->initial_window_size = NGHTTP2_INITIAL_WINDOW_SIZE; settings->max_frame_size = NGHTTP2_MAX_FRAME_SIZE_MIN; settings->max_header_list_size = UINT32_MAX; } static void active_outbound_item_reset(nghttp2_active_outbound_item *aob, nghttp2_mem *mem) { DEBUGF("send: reset nghttp2_active_outbound_item\n"); DEBUGF("send: aob->item = %p\n", aob->item); nghttp2_outbound_item_free(aob->item, mem); nghttp2_mem_free(mem, aob->item); aob->item = NULL; nghttp2_bufs_reset(&aob->framebufs); aob->state = NGHTTP2_OB_POP_ITEM; } int nghttp2_enable_strict_preface = 1; static int session_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, int server, const nghttp2_option *option, nghttp2_mem *mem) { int rv; size_t nbuffer; size_t max_deflate_dynamic_table_size = NGHTTP2_HD_DEFAULT_MAX_DEFLATE_BUFFER_SIZE; if (mem == NULL) { mem = nghttp2_mem_default(); } *session_ptr = nghttp2_mem_calloc(mem, 1, sizeof(nghttp2_session)); if (*session_ptr == NULL) { rv = NGHTTP2_ERR_NOMEM; goto fail_session; } (*session_ptr)->mem = *mem; mem = &(*session_ptr)->mem; /* next_stream_id is initialized in either nghttp2_session_client_new2 or nghttp2_session_server_new2 */ nghttp2_stream_init(&(*session_ptr)->root, 0, NGHTTP2_STREAM_FLAG_NONE, NGHTTP2_STREAM_IDLE, NGHTTP2_DEFAULT_WEIGHT, 0, 0, NULL, mem); (*session_ptr)->remote_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->recv_window_size = 0; (*session_ptr)->consumed_size = 0; (*session_ptr)->recv_reduction = 0; (*session_ptr)->local_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->goaway_flags = NGHTTP2_GOAWAY_NONE; (*session_ptr)->local_last_stream_id = (1u << 31) - 1; (*session_ptr)->remote_last_stream_id = (1u << 31) - 1; (*session_ptr)->pending_local_max_concurrent_stream = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; (*session_ptr)->pending_enable_push = 1; if (server) { (*session_ptr)->server = 1; } init_settings(&(*session_ptr)->remote_settings); init_settings(&(*session_ptr)->local_settings); (*session_ptr)->max_incoming_reserved_streams = NGHTTP2_MAX_INCOMING_RESERVED_STREAMS; /* Limit max outgoing concurrent streams to sensible value */ (*session_ptr)->remote_settings.max_concurrent_streams = 100; (*session_ptr)->max_send_header_block_length = NGHTTP2_MAX_HEADERSLEN; (*session_ptr)->max_outbound_ack = NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM; (*session_ptr)->max_settings = NGHTTP2_DEFAULT_MAX_SETTINGS; if (option) { if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE) && option->no_auto_window_update) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE; } if (option->opt_set_mask & NGHTTP2_OPT_PEER_MAX_CONCURRENT_STREAMS) { (*session_ptr)->remote_settings.max_concurrent_streams = option->peer_max_concurrent_streams; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_RESERVED_REMOTE_STREAMS) { (*session_ptr)->max_incoming_reserved_streams = option->max_reserved_remote_streams; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_RECV_CLIENT_MAGIC) && option->no_recv_client_magic) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_HTTP_MESSAGING) && option->no_http_messaging) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_HTTP_MESSAGING; } if (option->opt_set_mask & NGHTTP2_OPT_USER_RECV_EXT_TYPES) { memcpy((*session_ptr)->user_recv_ext_types, option->user_recv_ext_types, sizeof((*session_ptr)->user_recv_ext_types)); } if (option->opt_set_mask & NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES) { (*session_ptr)->builtin_recv_ext_types = option->builtin_recv_ext_types; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_PING_ACK) && option->no_auto_ping_ack) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_PING_ACK; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_SEND_HEADER_BLOCK_LENGTH) { (*session_ptr)->max_send_header_block_length = option->max_send_header_block_length; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_DEFLATE_DYNAMIC_TABLE_SIZE) { max_deflate_dynamic_table_size = option->max_deflate_dynamic_table_size; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_CLOSED_STREAMS) && option->no_closed_streams) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_CLOSED_STREAMS; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_OUTBOUND_ACK) { (*session_ptr)->max_outbound_ack = option->max_outbound_ack; } if ((option->opt_set_mask & NGHTTP2_OPT_MAX_SETTINGS) && option->max_settings) { (*session_ptr)->max_settings = option->max_settings; } } rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater, max_deflate_dynamic_table_size, mem); if (rv != 0) { goto fail_hd_deflater; } rv = nghttp2_hd_inflate_init(&(*session_ptr)->hd_inflater, mem); if (rv != 0) { goto fail_hd_inflater; } rv = nghttp2_map_init(&(*session_ptr)->streams, mem); if (rv != 0) { goto fail_map; } nbuffer = ((*session_ptr)->max_send_header_block_length + NGHTTP2_FRAMEBUF_CHUNKLEN - 1) / NGHTTP2_FRAMEBUF_CHUNKLEN; if (nbuffer == 0) { nbuffer = 1; } /* 1 for Pad Field. */ rv = nghttp2_bufs_init3(&(*session_ptr)->aob.framebufs, NGHTTP2_FRAMEBUF_CHUNKLEN, nbuffer, 1, NGHTTP2_FRAME_HDLEN + 1, mem); if (rv != 0) { goto fail_aob_framebuf; } active_outbound_item_reset(&(*session_ptr)->aob, mem); (*session_ptr)->callbacks = *callbacks; (*session_ptr)->user_data = user_data; session_inbound_frame_reset(*session_ptr); if (nghttp2_enable_strict_preface) { nghttp2_inbound_frame *iframe = &(*session_ptr)->iframe; if (server && ((*session_ptr)->opt_flags & NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC) == 0) { iframe->state = NGHTTP2_IB_READ_CLIENT_MAGIC; iframe->payloadleft = NGHTTP2_CLIENT_MAGIC_LEN; } else { iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } if (!server) { (*session_ptr)->aob.state = NGHTTP2_OB_SEND_CLIENT_MAGIC; nghttp2_bufs_add(&(*session_ptr)->aob.framebufs, NGHTTP2_CLIENT_MAGIC, NGHTTP2_CLIENT_MAGIC_LEN); } } return 0; fail_aob_framebuf: nghttp2_map_free(&(*session_ptr)->streams); fail_map: nghttp2_hd_inflate_free(&(*session_ptr)->hd_inflater); fail_hd_inflater: nghttp2_hd_deflate_free(&(*session_ptr)->hd_deflater); fail_hd_deflater: nghttp2_mem_free(mem, *session_ptr); fail_session: return rv; } int nghttp2_session_client_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_client_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_client_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 0, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 1; *session_ptr = session; return 0; } int nghttp2_session_server_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_server_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_server_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 1, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 2; *session_ptr = session; return 0; } static int free_streams(nghttp2_map_entry *entry, void *ptr) { nghttp2_session *session; nghttp2_stream *stream; nghttp2_outbound_item *item; nghttp2_mem *mem; session = (nghttp2_session *)ptr; mem = &session->mem; stream = (nghttp2_stream *)entry; item = stream->item; if (item && !item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } static void ob_q_free(nghttp2_outbound_queue *q, nghttp2_mem *mem) { nghttp2_outbound_item *item, *next; for (item = q->head; item;) { next = item->qnext; nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); item = next; } } static int inflight_settings_new(nghttp2_inflight_settings **settings_ptr, const nghttp2_settings_entry *iv, size_t niv, nghttp2_mem *mem) { *settings_ptr = nghttp2_mem_malloc(mem, sizeof(nghttp2_inflight_settings)); if (!*settings_ptr) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { (*settings_ptr)->iv = nghttp2_frame_iv_copy(iv, niv, mem); if (!(*settings_ptr)->iv) { nghttp2_mem_free(mem, *settings_ptr); return NGHTTP2_ERR_NOMEM; } } else { (*settings_ptr)->iv = NULL; } (*settings_ptr)->niv = niv; (*settings_ptr)->next = NULL; return 0; } static void inflight_settings_del(nghttp2_inflight_settings *settings, nghttp2_mem *mem) { if (!settings) { return; } nghttp2_mem_free(mem, settings->iv); nghttp2_mem_free(mem, settings); } void nghttp2_session_del(nghttp2_session *session) { nghttp2_mem *mem; nghttp2_inflight_settings *settings; if (session == NULL) { return; } mem = &session->mem; for (settings = session->inflight_settings_head; settings;) { nghttp2_inflight_settings *next = settings->next; inflight_settings_del(settings, mem); settings = next; } nghttp2_stream_free(&session->root); /* Have to free streams first, so that we can check stream->item->queued */ nghttp2_map_each_free(&session->streams, free_streams, session); nghttp2_map_free(&session->streams); ob_q_free(&session->ob_urgent, mem); ob_q_free(&session->ob_reg, mem); ob_q_free(&session->ob_syn, mem); active_outbound_item_reset(&session->aob, mem); session_inbound_frame_reset(session); nghttp2_hd_deflate_free(&session->hd_deflater); nghttp2_hd_inflate_free(&session->hd_inflater); nghttp2_bufs_free(&session->aob.framebufs); nghttp2_mem_free(mem, session); } int nghttp2_session_reprioritize_stream( nghttp2_session *session, nghttp2_stream *stream, const nghttp2_priority_spec *pri_spec_in) { int rv; nghttp2_stream *dep_stream = NULL; nghttp2_priority_spec pri_spec_default; const nghttp2_priority_spec *pri_spec = pri_spec_in; assert(pri_spec->stream_id != stream->stream_id); if (!nghttp2_stream_in_dep_tree(stream)) { return 0; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { return NGHTTP2_ERR_NOMEM; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } else if (nghttp2_stream_dep_find_ancestor(dep_stream, stream)) { DEBUGF("stream: cycle detected, dep_stream(%p)=%d stream(%p)=%d\n", dep_stream, dep_stream->stream_id, stream, stream->stream_id); nghttp2_stream_dep_remove_subtree(dep_stream); rv = nghttp2_stream_dep_add_subtree(stream->dep_prev, dep_stream); if (rv != 0) { return rv; } } assert(dep_stream); if (dep_stream == stream->dep_prev && !pri_spec->exclusive) { /* This is minor optimization when just weight is changed. */ nghttp2_stream_change_weight(stream, pri_spec->weight); return 0; } nghttp2_stream_dep_remove_subtree(stream); /* We have to update weight after removing stream from tree */ stream->weight = pri_spec->weight; if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert_subtree(dep_stream, stream); } else { rv = nghttp2_stream_dep_add_subtree(dep_stream, stream); } if (rv != 0) { return rv; } return 0; } int nghttp2_session_add_item(nghttp2_session *session, nghttp2_outbound_item *item) { /* TODO Return error if stream is not found for the frame requiring stream presence. */ int rv = 0; nghttp2_stream *stream; nghttp2_frame *frame; frame = &item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); switch (frame->hd.type) { case NGHTTP2_DATA: if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->item) { return NGHTTP2_ERR_DATA_EXIST; } rv = nghttp2_stream_attach_item(stream, item); if (rv != 0) { return rv; } return 0; case NGHTTP2_HEADERS: /* We push request HEADERS and push response HEADERS to dedicated queue because their transmission is affected by SETTINGS_MAX_CONCURRENT_STREAMS */ /* TODO If 2 HEADERS are submitted for reserved stream, then both of them are queued into ob_syn, which is not desirable. */ if (frame->headers.cat == NGHTTP2_HCAT_REQUEST || (stream && stream->state == NGHTTP2_STREAM_RESERVED)) { nghttp2_outbound_queue_push(&session->ob_syn, item); item->queued = 1; return 0; ; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_SETTINGS: case NGHTTP2_PING: nghttp2_outbound_queue_push(&session->ob_urgent, item); item->queued = 1; return 0; case NGHTTP2_RST_STREAM: if (stream) { stream->state = NGHTTP2_STREAM_CLOSING; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_PUSH_PROMISE: { nghttp2_headers_aux_data *aux_data; nghttp2_priority_spec pri_spec; aux_data = &item->aux_data.headers; if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); if (!nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, aux_data->stream_user_data)) { return NGHTTP2_ERR_NOMEM; } /* We don't have to call nghttp2_session_adjust_closed_stream() here, since stream->stream_id is local stream_id, and it does not affect closed stream count. */ nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } case NGHTTP2_WINDOW_UPDATE: if (stream) { stream->window_update_queued = 1; } else if (frame->hd.stream_id == 0) { session->window_update_queued = 1; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; default: nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } } int nghttp2_session_add_rst_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_stream *stream; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (stream && stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } /* Cancel pending request HEADERS in ob_syn if this RST_STREAM refers to that stream. */ if (!session->server && nghttp2_session_is_my_stream_id(session, stream_id) && nghttp2_outbound_queue_top(&session->ob_syn)) { nghttp2_headers_aux_data *aux_data; nghttp2_frame *headers_frame; headers_frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(headers_frame->hd.type == NGHTTP2_HEADERS); if (headers_frame->hd.stream_id <= stream_id && (uint32_t)stream_id < session->next_stream_id) { for (item = session->ob_syn.head; item; item = item->qnext) { aux_data = &item->aux_data.headers; if (item->frame.hd.stream_id < stream_id) { continue; } /* stream_id in ob_syn queue must be strictly increasing. If we found larger ID, then we can break here. */ if (item->frame.hd.stream_id > stream_id || aux_data->canceled) { break; } aux_data->error_code = error_code; aux_data->canceled = 1; return 0; } } } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_rst_stream_init(&frame->rst_stream, stream_id, error_code); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_rst_stream_free(&frame->rst_stream); nghttp2_mem_free(mem, item); return rv; } return 0; } nghttp2_stream *nghttp2_session_open_stream(nghttp2_session *session, int32_t stream_id, uint8_t flags, nghttp2_priority_spec *pri_spec_in, nghttp2_stream_state initial_state, void *stream_user_data) { int rv; nghttp2_stream *stream; nghttp2_stream *dep_stream = NULL; int stream_alloc = 0; nghttp2_priority_spec pri_spec_default; nghttp2_priority_spec *pri_spec = pri_spec_in; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { assert(stream->state == NGHTTP2_STREAM_IDLE); assert(nghttp2_stream_in_dep_tree(stream)); nghttp2_session_detach_idle_stream(session, stream); rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return NULL; } } else { stream = nghttp2_mem_malloc(mem, sizeof(nghttp2_stream)); if (stream == NULL) { return NULL; } stream_alloc = 1; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { /* Depends on idle stream, which does not exist in memory. Assign default priority for it. */ nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { if (stream_alloc) { nghttp2_mem_free(mem, stream); } return NULL; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { /* If dep_stream is not part of dependency tree, stream will get default priority. This handles the case when pri_spec->stream_id == stream_id. This happens because we don't check pri_spec->stream_id against new stream ID in nghttp2_submit_request. This also handles the case when idle stream created by PRIORITY frame was opened. Somehow we first remove the idle stream from dependency tree. This is done to simplify code base, but ideally we should retain old dependency. But I'm not sure this adds values. */ nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (initial_state == NGHTTP2_STREAM_RESERVED) { flags |= NGHTTP2_STREAM_FLAG_PUSH; } if (stream_alloc) { nghttp2_stream_init(stream, stream_id, flags, initial_state, pri_spec->weight, (int32_t)session->remote_settings.initial_window_size, (int32_t)session->local_settings.initial_window_size, stream_user_data, mem); rv = nghttp2_map_insert(&session->streams, &stream->map_entry); if (rv != 0) { nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return NULL; } } else { stream->flags = flags; stream->state = initial_state; stream->weight = pri_spec->weight; stream->stream_user_data = stream_user_data; } switch (initial_state) { case NGHTTP2_STREAM_RESERVED: if (nghttp2_session_is_my_stream_id(session, stream_id)) { /* reserved (local) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } else { /* reserved (remote) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); ++session->num_incoming_reserved_streams; } /* Reserved stream does not count in the concurrent streams limit. That is one of the DOS vector. */ break; case NGHTTP2_STREAM_IDLE: /* Idle stream does not count toward the concurrent streams limit. This is used as anchor node in dependency tree. */ nghttp2_session_keep_idle_stream(session, stream); break; default: if (nghttp2_session_is_my_stream_id(session, stream_id)) { ++session->num_outgoing_streams; } else { ++session->num_incoming_streams; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } assert(dep_stream); if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert(dep_stream, stream); if (rv != 0) { return NULL; } } else { nghttp2_stream_dep_add(dep_stream, stream); } return stream; } int nghttp2_session_close_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_stream *stream; nghttp2_mem *mem; int is_my_stream_id; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } DEBUGF("stream: stream(%p)=%d close\n", stream, stream->stream_id); if (stream->item) { nghttp2_outbound_item *item; item = stream->item; rv = nghttp2_stream_detach_item(stream); if (rv != 0) { return rv; } /* If item is queued, it will be deleted when it is popped (nghttp2_session_prep_frame() will fail). If session->aob.item points to this item, let active_outbound_item_reset() free the item. */ if (!item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } } /* We call on_stream_close_callback even if stream->state is NGHTTP2_STREAM_INITIAL. This will happen while sending request HEADERS, a local endpoint receives RST_STREAM for that stream. It may be PROTOCOL_ERROR, but without notifying stream closure will hang the stream in a local endpoint. */ if (session->callbacks.on_stream_close_callback) { if (session->callbacks.on_stream_close_callback( session, stream_id, error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } is_my_stream_id = nghttp2_session_is_my_stream_id(session, stream_id); /* pushed streams which is not opened yet is not counted toward max concurrent limits */ if ((stream->flags & NGHTTP2_STREAM_FLAG_PUSH)) { if (!is_my_stream_id) { --session->num_incoming_reserved_streams; } } else { if (is_my_stream_id) { --session->num_outgoing_streams; } else { --session->num_incoming_streams; } } /* Closes both directions just in case they are not closed yet */ stream->flags |= NGHTTP2_STREAM_FLAG_CLOSED; if ((session->opt_flags & NGHTTP2_OPTMASK_NO_CLOSED_STREAMS) == 0 && session->server && !is_my_stream_id && nghttp2_stream_in_dep_tree(stream)) { /* On server side, retain stream at most MAX_CONCURRENT_STREAMS combined with the current active incoming streams to make dependency tree work better. */ nghttp2_session_keep_closed_stream(session, stream); } else { rv = nghttp2_session_destroy_stream(session, stream); if (rv != 0) { return rv; } } return 0; } int nghttp2_session_destroy_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_mem *mem; int rv; DEBUGF("stream: destroy closed stream(%p)=%d\n", stream, stream->stream_id); mem = &session->mem; if (nghttp2_stream_in_dep_tree(stream)) { rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return rv; } } nghttp2_map_remove(&session->streams, stream->stream_id); nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } void nghttp2_session_keep_closed_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep closed stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->closed_stream_tail) { session->closed_stream_tail->closed_next = stream; stream->closed_prev = session->closed_stream_tail; } else { session->closed_stream_head = stream; } session->closed_stream_tail = stream; ++session->num_closed_streams; } void nghttp2_session_keep_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->idle_stream_tail) { session->idle_stream_tail->closed_next = stream; stream->closed_prev = session->idle_stream_tail; } else { session->idle_stream_head = stream; } session->idle_stream_tail = stream; ++session->num_idle_streams; } void nghttp2_session_detach_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_stream *prev_stream, *next_stream; DEBUGF("stream: detach idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); prev_stream = stream->closed_prev; next_stream = stream->closed_next; if (prev_stream) { prev_stream->closed_next = next_stream; } else { session->idle_stream_head = next_stream; } if (next_stream) { next_stream->closed_prev = prev_stream; } else { session->idle_stream_tail = prev_stream; } stream->closed_prev = NULL; stream->closed_next = NULL; --session->num_idle_streams; } int nghttp2_session_adjust_closed_stream(nghttp2_session *session) { size_t num_stream_max; int rv; if (session->local_settings.max_concurrent_streams == NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS) { num_stream_max = session->pending_local_max_concurrent_stream; } else { num_stream_max = session->local_settings.max_concurrent_streams; } DEBUGF("stream: adjusting kept closed streams num_closed_streams=%zu, " "num_incoming_streams=%zu, max_concurrent_streams=%zu\n", session->num_closed_streams, session->num_incoming_streams, num_stream_max); while (session->num_closed_streams > 0 && session->num_closed_streams + session->num_incoming_streams > num_stream_max) { nghttp2_stream *head_stream; nghttp2_stream *next; head_stream = session->closed_stream_head; assert(head_stream); next = head_stream->closed_next; rv = nghttp2_session_destroy_stream(session, head_stream); if (rv != 0) { return rv; } /* head_stream is now freed */ session->closed_stream_head = next; if (session->closed_stream_head) { session->closed_stream_head->closed_prev = NULL; } else { session->closed_stream_tail = NULL; } --session->num_closed_streams; } return 0; } int nghttp2_session_adjust_idle_stream(nghttp2_session *session) { size_t max; int rv; /* Make minimum number of idle streams 16, and maximum 100, which are arbitrary chosen numbers. */ max = nghttp2_min( 100, nghttp2_max( 16, nghttp2_min(session->local_settings.max_concurrent_streams, session->pending_local_max_concurrent_stream))); DEBUGF("stream: adjusting kept idle streams num_idle_streams=%zu, max=%zu\n", session->num_idle_streams, max); while (session->num_idle_streams > max) { nghttp2_stream *head; nghttp2_stream *next; head = session->idle_stream_head; assert(head); next = head->closed_next; rv = nghttp2_session_destroy_stream(session, head); if (rv != 0) { return rv; } /* head is now destroyed */ session->idle_stream_head = next; if (session->idle_stream_head) { session->idle_stream_head->closed_prev = NULL; } else { session->idle_stream_tail = NULL; } --session->num_idle_streams; } return 0; } /* * Closes stream with stream ID |stream_id| if both transmission and * reception of the stream were disallowed. The |error_code| indicates * the reason of the closure. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_INVALID_ARGUMENT * The stream is not found. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ int nghttp2_session_close_stream_if_shut_rdwr(nghttp2_session *session, nghttp2_stream *stream) { if ((stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR) { return nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_NO_ERROR); } return 0; } /* * Returns nonzero if local endpoint allows reception of new stream * from remote. */ static int session_allow_incoming_new_stream(nghttp2_session *session) { return (session->goaway_flags & (NGHTTP2_GOAWAY_TERM_ON_SEND | NGHTTP2_GOAWAY_SENT)) == 0; } /* * This function returns nonzero if session is closing. */ static int session_is_closing(nghttp2_session *session) { return (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) != 0 || (nghttp2_session_want_read(session) == 0 && nghttp2_session_want_write(session) == 0); } /* * Check that we can send a frame to the |stream|. This function * returns 0 if we can send a frame to the |frame|, or one of the * following negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_for_stream_send(nghttp2_session *session, nghttp2_stream *stream) { if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream->shut_flags & NGHTTP2_SHUT_WR) { return NGHTTP2_ERR_STREAM_SHUT_WR; } return 0; } int nghttp2_session_check_request_allowed(nghttp2_session *session) { return !session->server && session->next_stream_id <= INT32_MAX && (session->goaway_flags & NGHTTP2_GOAWAY_RECV) == 0 && !session_is_closing(session); } /* * This function checks request HEADERS frame, which opens stream, can * be sent at this time. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because of GOAWAY: session is * going down or received last_stream_id is strictly less than * frame->hd.stream_id. * NGHTTP2_ERR_STREAM_CLOSING * request HEADERS was canceled by RST_STREAM while it is in queue. */ static int session_predicate_request_headers_send(nghttp2_session *session, nghttp2_outbound_item *item) { if (item->aux_data.headers.canceled) { return NGHTTP2_ERR_STREAM_CLOSING; } /* If we are terminating session (NGHTTP2_GOAWAY_TERM_ON_SEND), GOAWAY was received from peer, or session is about to close, new request is not allowed. */ if ((session->goaway_flags & NGHTTP2_GOAWAY_RECV) || session_is_closing(session)) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is the first frame from the * server, with the |stream| can be sent at this time. The |stream| * can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_INVALID_STREAM_ID * The stream ID is invalid. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_PROTO * Client side attempted to send response. */ static int session_predicate_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return NGHTTP2_ERR_INVALID_STREAM_ID; } switch (stream->state) { case NGHTTP2_STREAM_OPENING: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks HEADERS for reserved stream can be sent. The * |stream| must be reserved state and the |session| is server side. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_PROTO * The stream is not reserved state * NGHTTP2_ERR_STREAM_CLOSED * RST_STREAM was queued for this stream. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * Client side attempted to send push response. */ static int session_predicate_push_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; /* TODO Should disallow HEADERS if GOAWAY has already been issued? */ rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (stream->state != NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_PROTO; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is neither stream-opening nor * first response header, with the |stream| can be sent at this time. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); switch (stream->state) { case NGHTTP2_STREAM_OPENED: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return 0; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks PUSH_PROMISE frame |frame| with the |stream| * can be sent at this time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * The client side attempts to send PUSH_PROMISE, or the server * sends PUSH_PROMISE for the stream not initiated by the client. * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_PUSH_DISABLED * The remote peer disabled reception of PUSH_PROMISE. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_push_promise_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; if (!session->server) { return NGHTTP2_ERR_PROTO; } rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (session->remote_settings.enable_push == 0) { return NGHTTP2_ERR_PUSH_DISABLED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks WINDOW_UPDATE with the stream ID |stream_id| * can be sent at this time. Note that END_STREAM flag of the previous * frame does not affect the transmission of the WINDOW_UPDATE frame. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_window_update_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { /* Connection-level window update */ return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (state_reserved_local(session, stream)) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } static int session_predicate_altsvc_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return 0; } static int session_predicate_origin_send(nghttp2_session *session) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return 0; } /* Take into account settings max frame size and both connection-level flow control here */ static ssize_t nghttp2_session_enforce_flow_control_limits(nghttp2_session *session, nghttp2_stream *stream, ssize_t requested_window_size) { DEBUGF("send: remote windowsize connection=%d, remote maxframsize=%u, " "stream(id %d)=%d\n", session->remote_window_size, session->remote_settings.max_frame_size, stream->stream_id, stream->remote_window_size); return nghttp2_min(nghttp2_min(nghttp2_min(requested_window_size, stream->remote_window_size), session->remote_window_size), (int32_t)session->remote_settings.max_frame_size); } /* * Returns the maximum length of next data read. If the * connection-level and/or stream-wise flow control are enabled, the * return value takes into account those current window sizes. The remote * settings for max frame size is also taken into account. */ static size_t nghttp2_session_next_data_read(nghttp2_session *session, nghttp2_stream *stream) { ssize_t window_size; window_size = nghttp2_session_enforce_flow_control_limits( session, stream, NGHTTP2_DATA_PAYLOADLEN); DEBUGF("send: available window=%zd\n", window_size); return window_size > 0 ? (size_t)window_size : 0; } /* * This function checks DATA with the |stream| can be sent at this * time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int nghttp2_session_predicate_data_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { /* Request body data */ /* If stream->state is NGHTTP2_STREAM_CLOSING, RST_STREAM was queued but not yet sent. In this case, we won't send DATA frames. */ if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (stream->state == NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } /* Response body data */ if (stream->state == NGHTTP2_STREAM_OPENED) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } static ssize_t session_call_select_padding(nghttp2_session *session, const nghttp2_frame *frame, size_t max_payloadlen) { ssize_t rv; if (frame->hd.length >= max_payloadlen) { return (ssize_t)frame->hd.length; } if (session->callbacks.select_padding_callback) { size_t max_paddedlen; max_paddedlen = nghttp2_min(frame->hd.length + NGHTTP2_MAX_PADLEN, max_payloadlen); rv = session->callbacks.select_padding_callback( session, frame, max_paddedlen, session->user_data); if (rv < (ssize_t)frame->hd.length || rv > (ssize_t)max_paddedlen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } return (ssize_t)frame->hd.length; } /* Add padding to HEADERS or PUSH_PROMISE. We use frame->headers.padlen in this function to use the fact that frame->push_promise has also padlen in the same position. */ static int session_headers_add_pad(nghttp2_session *session, nghttp2_frame *frame) { int rv; ssize_t padded_payloadlen; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; size_t padlen; size_t max_payloadlen; aob = &session->aob; framebufs = &aob->framebufs; max_payloadlen = nghttp2_min(NGHTTP2_MAX_PAYLOADLEN, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } padlen = (size_t)padded_payloadlen - frame->hd.length; DEBUGF("send: padding selected: payloadlen=%zd, padlen=%zu\n", padded_payloadlen, padlen); rv = nghttp2_frame_add_pad(framebufs, &frame->hd, padlen, 0); if (rv != 0) { return rv; } frame->headers.padlen = padlen; return 0; } static size_t session_estimate_headers_payload(nghttp2_session *session, const nghttp2_nv *nva, size_t nvlen, size_t additional) { return nghttp2_hd_deflate_bound(&session->hd_deflater, nva, nvlen) + additional; } static int session_pack_extension(nghttp2_session *session, nghttp2_bufs *bufs, nghttp2_frame *frame) { ssize_t rv; nghttp2_buf *buf; size_t buflen; size_t framelen; assert(session->callbacks.pack_extension_callback); buf = &bufs->head->buf; buflen = nghttp2_min(nghttp2_buf_avail(buf), NGHTTP2_MAX_PAYLOADLEN); rv = session->callbacks.pack_extension_callback(session, buf->last, buflen, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return (int)rv; } if (rv < 0 || (size_t)rv > buflen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } framelen = (size_t)rv; frame->hd.length = framelen; assert(buf->pos == buf->last); buf->last += framelen; buf->pos -= NGHTTP2_FRAME_HDLEN; nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); return 0; } /* * This function serializes frame for transmission. * * This function returns 0 if it succeeds, or one of negative error * codes, including both fatal and non-fatal ones. */ static int session_prep_frame(nghttp2_session *session, nghttp2_outbound_item *item) { int rv; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; frame = &item->frame; switch (frame->hd.type) { case NGHTTP2_DATA: { size_t next_readmax; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { assert(stream->item == item); } rv = nghttp2_session_predicate_data_send(session, stream); if (rv != 0) { // If stream was already closed, nghttp2_session_get_stream() // returns NULL, but item is still attached to the stream. // Search stream including closed again. stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } } return rv; } /* Assuming stream is not NULL */ assert(stream); next_readmax = nghttp2_session_next_data_read(session, stream); if (next_readmax == 0) { /* This must be true since we only pop DATA frame item from queue when session->remote_window_size > 0 */ assert(session->remote_window_size > 0); rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } rv = nghttp2_session_pack_data(session, &session->aob.framebufs, next_readmax, frame, &item->aux_data.data, stream); if (rv == NGHTTP2_ERR_PAUSE) { return rv; } if (rv == NGHTTP2_ERR_DEFERRED) { rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv != 0) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } return rv; } return 0; } case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; size_t estimated_payloadlen; aux_data = &item->aux_data.headers; if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { /* initial HEADERS, which opens stream */ nghttp2_stream *stream; stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_INITIAL, aux_data->stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream() here, since we don't keep closed stream in client side */ rv = session_predicate_request_headers_send(session, item); if (rv != 0) { return rv; } if (session_enforce_http_messaging(session)) { nghttp2_http_record_request_method(stream, frame); } } else { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream && stream->state == NGHTTP2_STREAM_RESERVED) { rv = session_predicate_push_response_headers_send(session, stream); if (rv == 0) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; if (aux_data->stream_user_data) { stream->stream_user_data = aux_data->stream_user_data; } } } else if (session_predicate_response_headers_send(session, stream) == 0) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; rv = 0; } else { frame->headers.cat = NGHTTP2_HCAT_HEADERS; rv = session_predicate_headers_send(session, stream); } if (rv != 0) { return rv; } } estimated_payloadlen = session_estimate_headers_payload( session, frame->headers.nva, frame->headers.nvlen, NGHTTP2_PRIORITY_SPECLEN); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_headers(&session->aob.framebufs, &frame->headers, &session->hd_deflater); if (rv != 0) { return rv; } DEBUGF("send: before padding, HEADERS serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } DEBUGF("send: HEADERS finally serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { assert(session->last_sent_stream_id < frame->hd.stream_id); session->last_sent_stream_id = frame->hd.stream_id; } return 0; } case NGHTTP2_PRIORITY: { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } /* PRIORITY frame can be sent at any time and to any stream ID. */ nghttp2_frame_pack_priority(&session->aob.framebufs, &frame->priority); /* Peer can send PRIORITY frame against idle stream to create "anchor" in dependency tree. Only client can do this in nghttp2. In nghttp2, only server retains non-active (closed or idle) streams in memory, so we don't open stream here. */ return 0; } case NGHTTP2_RST_STREAM: if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_rst_stream(&session->aob.framebufs, &frame->rst_stream); return 0; case NGHTTP2_SETTINGS: { if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; /* When session is about to close, don't send SETTINGS ACK. We are required to send SETTINGS without ACK though; for example, we have to send SETTINGS as a part of connection preface. */ if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } } rv = nghttp2_frame_pack_settings(&session->aob.framebufs, &frame->settings); if (rv != 0) { return rv; } return 0; } case NGHTTP2_PUSH_PROMISE: { nghttp2_stream *stream; size_t estimated_payloadlen; /* stream could be NULL if associated stream was already closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* predicate should fail if stream is NULL. */ rv = session_predicate_push_promise_send(session, stream); if (rv != 0) { return rv; } assert(stream); estimated_payloadlen = session_estimate_headers_payload( session, frame->push_promise.nva, frame->push_promise.nvlen, 0); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_push_promise( &session->aob.framebufs, &frame->push_promise, &session->hd_deflater); if (rv != 0) { return rv; } rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } assert(session->last_sent_stream_id + 2 <= frame->push_promise.promised_stream_id); session->last_sent_stream_id = frame->push_promise.promised_stream_id; return 0; } case NGHTTP2_PING: if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; } /* PING frame is allowed to be sent unless termination GOAWAY is sent */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_ping(&session->aob.framebufs, &frame->ping); return 0; case NGHTTP2_GOAWAY: rv = nghttp2_frame_pack_goaway(&session->aob.framebufs, &frame->goaway); if (rv != 0) { return rv; } session->local_last_stream_id = frame->goaway.last_stream_id; return 0; case NGHTTP2_WINDOW_UPDATE: rv = session_predicate_window_update_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_window_update(&session->aob.framebufs, &frame->window_update); return 0; case NGHTTP2_CONTINUATION: /* We never handle CONTINUATION here. */ assert(0); return 0; default: { nghttp2_ext_aux_data *aux_data; /* extension frame */ aux_data = &item->aux_data.ext; if (aux_data->builtin == 0) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return session_pack_extension(session, &session->aob.framebufs, frame); } switch (frame->hd.type) { case NGHTTP2_ALTSVC: rv = session_predicate_altsvc_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_altsvc(&session->aob.framebufs, &frame->ext); return 0; case NGHTTP2_ORIGIN: rv = session_predicate_origin_send(session); if (rv != 0) { return rv; } rv = nghttp2_frame_pack_origin(&session->aob.framebufs, &frame->ext); if (rv != 0) { return rv; } return 0; default: /* Unreachable here */ assert(0); return 0; } } } } nghttp2_outbound_item * nghttp2_session_get_next_ob_item(nghttp2_session *session) { if (nghttp2_outbound_queue_top(&session->ob_urgent)) { return nghttp2_outbound_queue_top(&session->ob_urgent); } if (nghttp2_outbound_queue_top(&session->ob_reg)) { return nghttp2_outbound_queue_top(&session->ob_reg); } if (!session_is_outgoing_concurrent_streams_max(session)) { if (nghttp2_outbound_queue_top(&session->ob_syn)) { return nghttp2_outbound_queue_top(&session->ob_syn); } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } nghttp2_outbound_item * nghttp2_session_pop_next_ob_item(nghttp2_session *session) { nghttp2_outbound_item *item; item = nghttp2_outbound_queue_top(&session->ob_urgent); if (item) { nghttp2_outbound_queue_pop(&session->ob_urgent); item->queued = 0; return item; } item = nghttp2_outbound_queue_top(&session->ob_reg); if (item) { nghttp2_outbound_queue_pop(&session->ob_reg); item->queued = 0; return item; } if (!session_is_outgoing_concurrent_streams_max(session)) { item = nghttp2_outbound_queue_top(&session->ob_syn); if (item) { nghttp2_outbound_queue_pop(&session->ob_syn); item->queued = 0; return item; } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } static int session_call_before_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.before_frame_send_callback) { rv = session->callbacks.before_frame_send_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_send_callback) { rv = session->callbacks.on_frame_send_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int find_stream_on_goaway_func(nghttp2_map_entry *entry, void *ptr) { nghttp2_close_stream_on_goaway_arg *arg; nghttp2_stream *stream; arg = (nghttp2_close_stream_on_goaway_arg *)ptr; stream = (nghttp2_stream *)entry; if (nghttp2_session_is_my_stream_id(arg->session, stream->stream_id)) { if (arg->incoming) { return 0; } } else if (!arg->incoming) { return 0; } if (stream->state != NGHTTP2_STREAM_IDLE && (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) == 0 && stream->stream_id > arg->last_stream_id) { /* We are collecting streams to close because we cannot call nghttp2_session_close_stream() inside nghttp2_map_each(). Reuse closed_next member.. bad choice? */ assert(stream->closed_next == NULL); assert(stream->closed_prev == NULL); if (arg->head) { stream->closed_next = arg->head; arg->head = stream; } else { arg->head = stream; } } return 0; } /* Closes non-idle and non-closed streams whose stream ID > last_stream_id. If incoming is nonzero, we are going to close incoming streams. Otherwise, close outgoing streams. */ static int session_close_stream_on_goaway(nghttp2_session *session, int32_t last_stream_id, int incoming) { int rv; nghttp2_stream *stream, *next_stream; nghttp2_close_stream_on_goaway_arg arg = {session, NULL, last_stream_id, incoming}; rv = nghttp2_map_each(&session->streams, find_stream_on_goaway_func, &arg); assert(rv == 0); stream = arg.head; while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; rv = nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_REFUSED_STREAM); /* stream may be deleted here */ stream = next_stream; if (nghttp2_is_fatal(rv)) { /* Clean up closed_next member just in case */ while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; stream = next_stream; } return rv; } } return 0; } static void reschedule_stream(nghttp2_stream *stream) { stream->last_writelen = stream->item->frame.hd.length; nghttp2_stream_reschedule(stream); } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size); static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size); /* * Called after a frame is sent. This function runs * on_frame_send_callback and handles stream closure upon END_STREAM * or RST_STREAM. This function does not reset session->aob. It is a * responsibility of session_after_frame_sent2. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent1(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_stream *stream; frame = &item->frame; if (frame->hd.type == NGHTTP2_DATA) { nghttp2_data_aux_data *aux_data; aux_data = &item->aux_data.data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* We update flow control window after a frame was completely sent. This is possible because we choose payload length not to exceed the window */ session->remote_window_size -= (int32_t)frame->hd.length; if (stream) { stream->remote_window_size -= (int32_t)frame->hd.length; } if (stream && aux_data->eof) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } /* Call on_frame_send_callback after nghttp2_stream_detach_item(), so that application can issue nghttp2_submit_data() in the callback. */ if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { int stream_closed; stream_closed = (stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR; nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* stream may be NULL if it was closed */ if (stream_closed) { stream = NULL; } } return 0; } if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* non-DATA frame */ if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { DEBUGF("send: CONTINUATION exists, just return\n"); return 0; } } rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } switch (frame->hd.type) { case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: { stream->state = NGHTTP2_STREAM_OPENING; if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { /* nghttp2_submit_data() makes a copy of aux_data->data_prd */ rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; } case NGHTTP2_HCAT_PUSH_RESPONSE: stream->flags = (uint8_t)(stream->flags & ~NGHTTP2_STREAM_FLAG_PUSH); ++session->num_outgoing_streams; /* Fall through */ case NGHTTP2_HCAT_RESPONSE: stream->state = NGHTTP2_STREAM_OPENED; /* Fall through */ case NGHTTP2_HCAT_HEADERS: if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; default: /* Unreachable */ assert(0); return 0; } } case NGHTTP2_PRIORITY: if (session->server) { return 0; ; } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_RST_STREAM: rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_GOAWAY: { nghttp2_goaway_aux_data *aux_data; aux_data = &item->aux_data.goaway; if ((aux_data->flags & NGHTTP2_GOAWAY_AUX_SHUTDOWN_NOTICE) == 0) { if (aux_data->flags & NGHTTP2_GOAWAY_AUX_TERM_ON_SEND) { session->goaway_flags |= NGHTTP2_GOAWAY_TERM_SENT; } session->goaway_flags |= NGHTTP2_GOAWAY_SENT; rv = session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 1); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } case NGHTTP2_WINDOW_UPDATE: if (frame->hd.stream_id == 0) { session->window_update_queued = 0; if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_connection_consumed_size(session, 0); } else { rv = nghttp2_session_update_recv_connection_window_size(session, 0); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } stream->window_update_queued = 0; /* We don't have to send WINDOW_UPDATE if END_STREAM from peer is seen. */ if (stream->shut_flags & NGHTTP2_SHUT_RD) { return 0; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_stream_consumed_size(session, stream, 0); } else { rv = nghttp2_session_update_recv_stream_window_size(session, stream, 0, 1); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; default: return 0; } } /* * Called after a frame is sent and session_after_frame_sent1. This * function is responsible to reset session->aob. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent2(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_mem *mem; nghttp2_stream *stream; nghttp2_data_aux_data *aux_data; mem = &session->mem; frame = &item->frame; if (frame->hd.type != NGHTTP2_DATA) { if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { framebufs->cur = framebufs->cur->next; DEBUGF("send: next CONTINUATION frame, %zu bytes\n", nghttp2_buf_len(&framebufs->cur->buf)); return 0; } } active_outbound_item_reset(&session->aob, mem); return 0; } /* DATA frame */ aux_data = &item->aux_data.data; /* On EOF, we have already detached data. Please note that application may issue nghttp2_submit_data() in on_frame_send_callback (call from session_after_frame_sent1), which attach data to stream. We don't want to detach it. */ if (aux_data->eof) { active_outbound_item_reset(aob, mem); return 0; } /* Reset no_copy here because next write may not use this. */ aux_data->no_copy = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* If session is closed or RST_STREAM was queued, we won't send further data. */ if (nghttp2_session_predicate_data_send(session, stream) != 0) { if (stream) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } } active_outbound_item_reset(aob, mem); return 0; } aob->item = NULL; active_outbound_item_reset(&session->aob, mem); return 0; } static int session_call_send_data(nghttp2_session *session, nghttp2_outbound_item *item, nghttp2_bufs *framebufs) { int rv; nghttp2_buf *buf; size_t length; nghttp2_frame *frame; nghttp2_data_aux_data *aux_data; buf = &framebufs->cur->buf; frame = &item->frame; length = frame->hd.length - frame->data.padlen; aux_data = &item->aux_data.data; rv = session->callbacks.send_data_callback(session, frame, buf->pos, length, &aux_data->data_prd.source, session->user_data); switch (rv) { case 0: case NGHTTP2_ERR_WOULDBLOCK: case NGHTTP2_ERR_PAUSE: case NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE: return rv; default: return NGHTTP2_ERR_CALLBACK_FAILURE; } } static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, const uint8_t **data_ptr, int fast_cb) { int rv; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; nghttp2_mem *mem; mem = &session->mem; aob = &session->aob; framebufs = &aob->framebufs; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } for (;;) { switch (aob->state) { case NGHTTP2_OB_POP_ITEM: { nghttp2_outbound_item *item; item = nghttp2_session_pop_next_ob_item(session); if (item == NULL) { return 0; } rv = session_prep_frame(session, item); if (rv == NGHTTP2_ERR_PAUSE) { return 0; } if (rv == NGHTTP2_ERR_DEFERRED) { DEBUGF("send: frame transmission deferred\n"); break; } if (rv < 0) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; DEBUGF("send: frame preparation failed with %s\n", nghttp2_strerror(rv)); /* TODO If the error comes from compressor, the connection must be closed. */ if (item->frame.hd.type != NGHTTP2_DATA && session->callbacks.on_frame_not_send_callback && is_non_fatal(rv)) { nghttp2_frame *frame = &item->frame; /* The library is responsible for the transmission of WINDOW_UPDATE frame, so we don't call error callback for it. */ if (frame->hd.type != NGHTTP2_WINDOW_UPDATE && session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by failed request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; if (item->aux_data.headers.canceled) { error_code = item->aux_data.headers.error_code; } else { /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); active_outbound_item_reset(aob, mem); if (rv == NGHTTP2_ERR_HEADER_COMP) { /* If header compression error occurred, should terminiate connection. */ rv = nghttp2_session_terminate_session(session, NGHTTP2_INTERNAL_ERROR); } if (nghttp2_is_fatal(rv)) { return rv; } break; } aob->item = item; nghttp2_bufs_rewind(framebufs); if (item->frame.hd.type != NGHTTP2_DATA) { nghttp2_frame *frame; frame = &item->frame; DEBUGF("send: next frame: payloadlen=%zu, type=%u, flags=0x%02x, " "stream_id=%d\n", frame->hd.length, frame->hd.type, frame->hd.flags, frame->hd.stream_id); rv = session_call_before_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_CANCEL) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; if (session->callbacks.on_frame_not_send_callback) { if (session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by canceled request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; /* We don't have to check item->aux_data.headers.canceled since it has already been checked. */ /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } active_outbound_item_reset(aob, mem); break; } } else { DEBUGF("send: next frame: DATA\n"); if (item->aux_data.data.no_copy) { aob->state = NGHTTP2_OB_SEND_NO_COPY; break; } } DEBUGF("send: start transmitting frame type=%u, length=%zd\n", framebufs->cur->buf.pos[3], framebufs->cur->buf.last - framebufs->cur->buf.pos); aob->state = NGHTTP2_OB_SEND_DATA; break; } case NGHTTP2_OB_SEND_DATA: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of a frame\n"); /* Frame has completely sent */ if (fast_cb) { rv = session_after_frame_sent2(session); } else { rv = session_after_frame_sent1(session); if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); } if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); /* We increment the offset here. If send_callback does not send everything, we will adjust it. */ buf->pos += datalen; return (ssize_t)datalen; } case NGHTTP2_OB_SEND_NO_COPY: { nghttp2_stream *stream; nghttp2_frame *frame; int pause; DEBUGF("send: no copy DATA\n"); frame = &aob->item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream == NULL) { DEBUGF("send: no copy DATA cancelled because stream was closed\n"); active_outbound_item_reset(aob, mem); break; } rv = session_call_send_data(session, aob->item, framebufs); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } active_outbound_item_reset(aob, mem); break; } if (rv == NGHTTP2_ERR_WOULDBLOCK) { return 0; } pause = (rv == NGHTTP2_ERR_PAUSE); rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ if (pause) { return 0; } break; } case NGHTTP2_OB_SEND_CLIENT_MAGIC: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of client magic\n"); active_outbound_item_reset(aob, mem); break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); buf->pos += datalen; return (ssize_t)datalen; } } } } ssize_t nghttp2_session_mem_send(nghttp2_session *session, const uint8_t **data_ptr) { int rv; ssize_t len; *data_ptr = NULL; len = nghttp2_session_mem_send_internal(session, data_ptr, 1); if (len <= 0) { return len; } if (session->aob.item) { /* We have to call session_after_frame_sent1 here to handle stream closure upon transmission of frames. Otherwise, END_STREAM may be reached to client before we call nghttp2_session_mem_send again and we may get exceeding number of incoming streams. */ rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return (ssize_t)rv; } } return len; } int nghttp2_session_send(nghttp2_session *session) { const uint8_t *data = NULL; ssize_t datalen; ssize_t sentlen; nghttp2_bufs *framebufs; framebufs = &session->aob.framebufs; for (;;) { datalen = nghttp2_session_mem_send_internal(session, &data, 0); if (datalen <= 0) { return (int)datalen; } sentlen = session->callbacks.send_callback(session, data, (size_t)datalen, 0, session->user_data); if (sentlen < 0) { if (sentlen == NGHTTP2_ERR_WOULDBLOCK) { /* Transmission canceled. Rewind the offset */ framebufs->cur->buf.pos -= datalen; return 0; } return NGHTTP2_ERR_CALLBACK_FAILURE; } /* Rewind the offset to the amount of unsent bytes */ framebufs->cur->buf.pos -= datalen - sentlen; } } static ssize_t session_recv(nghttp2_session *session, uint8_t *buf, size_t len) { ssize_t rv; rv = session->callbacks.recv_callback(session, buf, len, 0, session->user_data); if (rv > 0) { if ((size_t)rv > len) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } else if (rv < 0 && rv != NGHTTP2_ERR_WOULDBLOCK && rv != NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } static int session_call_on_begin_frame(nghttp2_session *session, const nghttp2_frame_hd *hd) { int rv; if (session->callbacks.on_begin_frame_callback) { rv = session->callbacks.on_begin_frame_callback(session, hd, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_recv_callback) { rv = session->callbacks.on_frame_recv_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_begin_headers(nghttp2_session *session, nghttp2_frame *frame) { int rv; DEBUGF("recv: call on_begin_headers callback stream_id=%d\n", frame->hd.stream_id); if (session->callbacks.on_begin_headers_callback) { rv = session->callbacks.on_begin_headers_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv = 0; if (session->callbacks.on_header_callback2) { rv = session->callbacks.on_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_header_callback) { rv = session->callbacks.on_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_invalid_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv; if (session->callbacks.on_invalid_header_callback2) { rv = session->callbacks.on_invalid_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_invalid_header_callback) { rv = session->callbacks.on_invalid_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } else { return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_extension_chunk_recv_callback(nghttp2_session *session, const uint8_t *data, size_t len) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; if (session->callbacks.on_extension_chunk_recv_callback) { rv = session->callbacks.on_extension_chunk_recv_callback( session, &frame->hd, data, len, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_unpack_extension_callback(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; void *payload = NULL; rv = session->callbacks.unpack_extension_callback( session, &payload, &frame->hd, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } frame->ext.payload = payload; return 0; } /* * Handles frame size error. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_handle_frame_size_error(nghttp2_session *session) { /* TODO Currently no callback is called for this error, because we call this callback before reading any payload */ return nghttp2_session_terminate_session(session, NGHTTP2_FRAME_SIZE_ERROR); } static uint32_t get_error_code_from_lib_error_code(int lib_error_code) { switch (lib_error_code) { case NGHTTP2_ERR_STREAM_CLOSED: return NGHTTP2_STREAM_CLOSED; case NGHTTP2_ERR_HEADER_COMP: return NGHTTP2_COMPRESSION_ERROR; case NGHTTP2_ERR_FRAME_SIZE_ERROR: return NGHTTP2_FRAME_SIZE_ERROR; case NGHTTP2_ERR_FLOW_CONTROL: return NGHTTP2_FLOW_CONTROL_ERROR; case NGHTTP2_ERR_REFUSED_STREAM: return NGHTTP2_REFUSED_STREAM; case NGHTTP2_ERR_PROTO: case NGHTTP2_ERR_HTTP_HEADER: case NGHTTP2_ERR_HTTP_MESSAGING: return NGHTTP2_PROTOCOL_ERROR; default: return NGHTTP2_INTERNAL_ERROR; } } /* * Calls on_invalid_frame_recv_callback if it is set to |session|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * User defined callback function fails. */ static int session_call_on_invalid_frame_recv_callback(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream2(nghttp2_session *session, int32_t stream_id, nghttp2_frame *frame, int lib_error_code) { int rv; rv = nghttp2_session_add_rst_stream( session, stream_id, get_error_code_from_lib_error_code(lib_error_code)); if (rv != 0) { return rv; } if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { return session_handle_invalid_stream2(session, frame->hd.stream_id, frame, lib_error_code); } static int session_inflate_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { int rv; rv = session_handle_invalid_stream(session, frame, lib_error_code); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Handles invalid frame which causes connection error. */ static int session_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return nghttp2_session_terminate_session_with_reason( session, get_error_code_from_lib_error_code(lib_error_code), reason); } static int session_inflate_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { int rv; rv = session_handle_invalid_connection(session, frame, lib_error_code, reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Inflates header block in the memory pointed by |in| with |inlen| * bytes. If this function returns NGHTTP2_ERR_PAUSE, the caller must * call this function again, until it returns 0 or one of negative * error code. If |call_header_cb| is zero, the on_header_callback * are not invoked and the function never return NGHTTP2_ERR_PAUSE. If * the given |in| is the last chunk of header block, the |final| must * be nonzero. If header block is successfully processed (which is * indicated by the return value 0, NGHTTP2_ERR_PAUSE or * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE), the number of processed * input bytes is assigned to the |*readlen_ptr|. * * This function return 0 if it succeeds, or one of the negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE * The callback returns this error code, indicating that this * stream should be RST_STREAMed. * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_PAUSE * The callback function returned NGHTTP2_ERR_PAUSE * NGHTTP2_ERR_HEADER_COMP * Header decompression failed */ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, size_t *readlen_ptr, uint8_t *in, size_t inlen, int final, int call_header_cb) { ssize_t proclen; int rv; int inflate_flags; nghttp2_hd_nv nv; nghttp2_stream *stream; nghttp2_stream *subject_stream; int trailer = 0; *readlen_ptr = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); } else { subject_stream = stream; trailer = session_trailer_headers(session, stream, frame); } DEBUGF("recv: decoding header block %zu bytes\n", inlen); for (;;) { inflate_flags = 0; proclen = nghttp2_hd_inflate_hd_nv(&session->hd_inflater, &nv, &inflate_flags, in, inlen, final); if (nghttp2_is_fatal((int)proclen)) { return (int)proclen; } if (proclen < 0) { if (session->iframe.state == NGHTTP2_IB_READ_HEADER_BLOCK) { if (subject_stream && subject_stream->state != NGHTTP2_STREAM_CLOSING) { /* Adding RST_STREAM here is very important. It prevents from invoking subsequent callbacks for the same stream ID. */ rv = nghttp2_session_add_rst_stream( session, subject_stream->stream_id, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } } } rv = nghttp2_session_terminate_session(session, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_HEADER_COMP; } in += proclen; inlen -= (size_t)proclen; *readlen_ptr += (size_t)proclen; DEBUGF("recv: proclen=%zd\n", proclen); if (call_header_cb && (inflate_flags & NGHTTP2_HD_INFLATE_EMIT)) { rv = 0; if (subject_stream) { if (session_enforce_http_messaging(session)) { rv = nghttp2_http_on_header(session, subject_stream, frame, &nv, trailer); if (rv == NGHTTP2_ERR_IGN_HTTP_HEADER) { /* Don't overwrite rv here */ int rv2; rv2 = session_call_on_invalid_header(session, frame, &nv); if (rv2 == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = NGHTTP2_ERR_HTTP_HEADER; } else { if (rv2 != 0) { return rv2; } /* header is ignored */ DEBUGF("recv: HTTP ignored: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv2 = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Ignoring received invalid HTTP header field: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv2)) { return rv2; } } } if (rv == NGHTTP2_ERR_HTTP_HEADER) { DEBUGF("recv: HTTP error: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Invalid HTTP header field was received: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv)) { return rv; } rv = session_handle_invalid_stream2(session, subject_stream->stream_id, frame, NGHTTP2_ERR_HTTP_HEADER); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } } if (rv == 0) { rv = session_call_on_header(session, frame, &nv); /* This handles NGHTTP2_ERR_PAUSE and NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE as well */ if (rv != 0) { return rv; } } } } if (inflate_flags & NGHTTP2_HD_INFLATE_FINAL) { nghttp2_hd_inflate_end_headers(&session->hd_inflater); break; } if ((inflate_flags & NGHTTP2_HD_INFLATE_EMIT) == 0 && inlen == 0) { break; } } return 0; } /* * Call this function when HEADERS frame was completely received. * * This function returns 0 if it succeeds, or one of negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_end_stream_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; if ((frame->hd.flags & NGHTTP2_FLAG_END_STREAM) == 0) { return 0; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_after_header_block_received(nghttp2_session *session) { int rv = 0; nghttp2_frame *frame = &session->iframe.frame; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } if (session_enforce_http_messaging(session)) { if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { nghttp2_stream *subject_stream; subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); if (subject_stream) { rv = nghttp2_http_on_request_headers(subject_stream, frame); } } else { assert(frame->hd.type == NGHTTP2_HEADERS); switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: rv = nghttp2_http_on_request_headers(stream, frame); break; case NGHTTP2_HCAT_RESPONSE: case NGHTTP2_HCAT_PUSH_RESPONSE: rv = nghttp2_http_on_response_headers(stream); break; case NGHTTP2_HCAT_HEADERS: if (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) { assert(!session->server); rv = nghttp2_http_on_response_headers(stream); } else { rv = nghttp2_http_on_trailer_headers(stream, frame); } break; default: assert(0); } if (rv == 0 && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { rv = nghttp2_http_on_remote_end_stream(stream); } } if (rv != 0) { int32_t stream_id; if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { stream_id = frame->push_promise.promised_stream_id; } else { stream_id = frame->hd.stream_id; } rv = session_handle_invalid_stream2(session, stream_id, frame, NGHTTP2_ERR_HTTP_MESSAGING); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type == NGHTTP2_HEADERS && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ } return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type != NGHTTP2_HEADERS) { return 0; } return session_end_stream_headers_received(session, frame, stream); } int nghttp2_session_on_request_headers_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: stream_id == 0"); } /* If client receives idle stream from server, it is invalid regardless stream ID is even or odd. This is because client is not expected to receive request from server. */ if (!session->server) { if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: client received request"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } assert(session->server); if (!session_is_new_peer_stream_id(session, frame->hd.stream_id)) { if (frame->hd.stream_id == 0 || nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: invalid stream_id"); } /* RFC 7540 says if an endpoint receives a HEADERS with invalid * stream ID (e.g, numerically smaller than previous), it MUST * issue connection error with error code PROTOCOL_ERROR. It is a * bit hard to detect this, since we cannot remember all streams * we observed so far. * * You might imagine this is really easy. But no. HTTP/2 is * asynchronous protocol, and usually client and server do not * share the complete picture of open/closed stream status. For * example, after server sends RST_STREAM for a stream, client may * send trailer HEADERS for that stream. If naive server detects * that, and issued connection error, then it is a bug of server * implementation since client is not wrong if it did not get * RST_STREAM when it issued trailer HEADERS. * * At the moment, we are very conservative here. We only use * connection error if stream ID refers idle stream, or we are * sure that stream is half-closed(remote) or closed. Otherwise * we just ignore HEADERS for now. */ stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } session->last_recv_stream_id = frame->hd.stream_id; if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We just ignore stream after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (frame->headers.pri_spec.stream_id == frame->hd.stream_id) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: depend on itself"); } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_OPENING, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_closed_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; /* This function is only called if stream->state == NGHTTP2_STREAM_OPENING and stream_id is local side initiated. */ assert(stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "response HEADERS: stream_id == 0"); } if (stream->shut_flags & NGHTTP2_SHUT_RD) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. We go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } stream->state = NGHTTP2_STREAM_OPENED; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_push_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; assert(stream->state == NGHTTP2_STREAM_RESERVED); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: stream_id == 0"); } if (session->server) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: no HEADERS allowed from client in reserved state"); } if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We don't accept new stream after GOAWAY was sent. */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } nghttp2_stream_promise_fulfilled(stream); if (!nghttp2_session_is_my_stream_id(session, stream->stream_id)) { --session->num_incoming_reserved_streams; } ++session->num_incoming_streams; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: stream_id == 0"); } if ((stream->shut_flags & NGHTTP2_SHUT_RD)) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. we go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } if (nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { if (stream->state == NGHTTP2_STREAM_OPENED) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* If this is remote peer initiated stream, it is OK unless it has sent END_STREAM frame already. But if stream is in NGHTTP2_STREAM_CLOSING, we discard the frame. This is a race condition. */ if (stream->state != NGHTTP2_STREAM_CLOSING) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } static int session_process_headers_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_stream *stream; rv = nghttp2_frame_unpack_headers_payload(&frame->headers, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: could not unpack"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { frame->headers.cat = NGHTTP2_HCAT_REQUEST; return nghttp2_session_on_request_headers_received(session, frame); } if (stream->state == NGHTTP2_STREAM_RESERVED) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; return nghttp2_session_on_push_response_headers_received(session, frame, stream); } if (stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; return nghttp2_session_on_response_headers_received(session, frame, stream); } frame->headers.cat = NGHTTP2_HCAT_HEADERS; return nghttp2_session_on_headers_received(session, frame, stream); } int nghttp2_session_on_priority_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PRIORITY: stream_id == 0"); } if (frame->priority.pri_spec.stream_id == frame->hd.stream_id) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "depend on itself"); } if (!session->server) { /* Re-prioritization works only in server */ return session_call_on_frame_received(session, frame); } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { /* PRIORITY against idle stream can create anchor node in dependency tree. */ if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_priority_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_priority_payload(&frame->priority, iframe->sbuf.pos); return nghttp2_session_on_priority_received(session, frame); } int nghttp2_session_on_rst_stream_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream_id == 0"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream in idle"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { /* We may use stream->shut_flags for strict error checking. */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } rv = session_call_on_frame_received(session, frame); if (rv != 0) { return rv; } rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_process_rst_stream_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_rst_stream_payload(&frame->rst_stream, iframe->sbuf.pos); return nghttp2_session_on_rst_stream_received(session, frame); } static int update_remote_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_remote_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* If window size gets positive, push deferred DATA frame to outbound queue. */ if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* * Updates the remote initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_remote_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = (int32_t)session->remote_settings.initial_window_size; return nghttp2_map_each(&session->streams, update_remote_initial_window_size_func, &arg); } static int update_local_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_local_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(arg->session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(arg->session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } /* * Updates the local initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_local_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size, int32_t old_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = old_initial_window_size; return nghttp2_map_each(&session->streams, update_local_initial_window_size_func, &arg); } /* * Apply SETTINGS values |iv| having |niv| elements to the local * settings. We assumes that all values in |iv| is correct, since we * validated them in nghttp2_session_add_settings() already. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_HEADER_COMP * The header table size is out of range * NGHTTP2_ERR_NOMEM * Out of memory */ int nghttp2_session_update_local_settings(nghttp2_session *session, nghttp2_settings_entry *iv, size_t niv) { int rv; size_t i; int32_t new_initial_window_size = -1; uint32_t header_table_size = 0; uint32_t min_header_table_size = UINT32_MAX; uint8_t header_table_size_seen = 0; /* For NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, use the value last seen. For NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, use both minimum value and last seen value. */ for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: header_table_size_seen = 1; header_table_size = iv[i].value; min_header_table_size = nghttp2_min(min_header_table_size, iv[i].value); break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: new_initial_window_size = (int32_t)iv[i].value; break; } } if (header_table_size_seen) { if (min_header_table_size < header_table_size) { rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, min_header_table_size); if (rv != 0) { return rv; } } rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, header_table_size); if (rv != 0) { return rv; } } if (new_initial_window_size != -1) { rv = session_update_local_initial_window_size( session, new_initial_window_size, (int32_t)session->local_settings.initial_window_size); if (rv != 0) { return rv; } } for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: session->local_settings.header_table_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: session->local_settings.enable_push = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->local_settings.max_concurrent_streams = iv[i].value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: session->local_settings.initial_window_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: session->local_settings.max_frame_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->local_settings.max_header_list_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: session->local_settings.enable_connect_protocol = iv[i].value; break; } } return 0; } int nghttp2_session_on_settings_received(nghttp2_session *session, nghttp2_frame *frame, int noack) { int rv; size_t i; nghttp2_mem *mem; nghttp2_inflight_settings *settings; mem = &session->mem; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: stream_id != 0"); } if (frame->hd.flags & NGHTTP2_FLAG_ACK) { if (frame->settings.niv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FRAME_SIZE_ERROR, "SETTINGS: ACK and payload != 0"); } settings = session->inflight_settings_head; if (!settings) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: unexpected ACK"); } rv = nghttp2_session_update_local_settings(session, settings->iv, settings->niv); session->inflight_settings_head = settings->next; inflight_settings_del(settings, mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, rv, NULL); } return session_call_on_frame_received(session, frame); } if (!session->remote_settings_received) { session->remote_settings.max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; session->remote_settings_received = 1; } for (i = 0; i < frame->settings.niv; ++i) { nghttp2_settings_entry *entry = &frame->settings.iv[i]; switch (entry->settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: rv = nghttp2_hd_deflate_change_table_size(&session->hd_deflater, entry->value); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } else { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_HEADER_COMP, NULL); } } session->remote_settings.header_table_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENBLE_PUSH"); } if (!session->server && entry->value != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to enable push"); } session->remote_settings.enable_push = entry->value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->remote_settings.max_concurrent_streams = entry->value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: /* Update the initial window size of the all active streams */ /* Check that initial_window_size < (1u << 31) */ if (entry->value > NGHTTP2_MAX_WINDOW_SIZE) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, "SETTINGS: too large SETTINGS_INITIAL_WINDOW_SIZE"); } rv = session_update_remote_initial_window_size(session, (int32_t)entry->value); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_settings.initial_window_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: if (entry->value < NGHTTP2_MAX_FRAME_SIZE_MIN || entry->value > NGHTTP2_MAX_FRAME_SIZE_MAX) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_MAX_FRAME_SIZE"); } session->remote_settings.max_frame_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->remote_settings.max_header_list_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENABLE_CONNECT_PROTOCOL"); } if (!session->server && session->remote_settings.enable_connect_protocol && entry->value == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to disable " "SETTINGS_ENABLE_CONNECT_PROTOCOL"); } session->remote_settings.enable_connect_protocol = entry->value; break; } } if (!noack && !session_is_closing(session)) { rv = nghttp2_session_add_settings(session, NGHTTP2_FLAG_ACK, NULL, 0); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_INTERNAL, NULL); } } return session_call_on_frame_received(session, frame); } static int session_process_settings_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; size_t i; nghttp2_settings_entry min_header_size_entry; if (iframe->max_niv) { min_header_size_entry = iframe->iv[iframe->max_niv - 1]; if (min_header_size_entry.value < UINT32_MAX) { /* If we have less value, then we must have SETTINGS_HEADER_TABLE_SIZE in i < iframe->niv */ for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { break; } } assert(i < iframe->niv); if (min_header_size_entry.value != iframe->iv[i].value) { iframe->iv[iframe->niv++] = iframe->iv[i]; iframe->iv[i] = min_header_size_entry; } } } nghttp2_frame_unpack_settings_payload(&frame->settings, iframe->iv, iframe->niv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; return nghttp2_session_on_settings_received(session, frame, 0 /* ACK */); } int nghttp2_session_on_push_promise_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; nghttp2_stream *promised_stream; nghttp2_priority_spec pri_spec; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream_id == 0"); } if (session->server || session->local_settings.enable_push == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: push disabled"); } if (!nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid stream_id"); } if (!session_allow_incoming_new_stream(session)) { /* We just discard PUSH_PROMISE after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (!session_is_new_peer_stream_id(session, frame->push_promise.promised_stream_id)) { /* The spec says if an endpoint receives a PUSH_PROMISE with illegal stream ID is subject to a connection error of type PROTOCOL_ERROR. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid promised_stream_id"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream in idle"); } session->last_recv_stream_id = frame->push_promise.promised_stream_id; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING || !session->pending_enable_push || session->num_incoming_reserved_streams >= session->max_incoming_reserved_streams) { /* Currently, client does not retain closed stream, so we don't check NGHTTP2_SHUT_RD condition here. */ rv = nghttp2_session_add_rst_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_CANCEL); if (rv != 0) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "PUSH_PROMISE: stream closed"); } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); promised_stream = nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, NULL); if (!promised_stream) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since we don't keep closed stream in client side */ session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } static int session_process_push_promise_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = nghttp2_frame_unpack_push_promise_payload(&frame->push_promise, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: could not unpack"); } return nghttp2_session_on_push_promise_received(session, frame); } int nghttp2_session_on_ping_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PING: stream_id != 0"); } if ((session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_PING_ACK) == 0 && (frame->hd.flags & NGHTTP2_FLAG_ACK) == 0 && !session_is_closing(session)) { /* Peer sent ping, so ping it back */ rv = nghttp2_session_add_ping(session, NGHTTP2_FLAG_ACK, frame->ping.opaque_data); if (rv != 0) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_ping_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_ping_payload(&frame->ping, iframe->sbuf.pos); return nghttp2_session_on_ping_received(session, frame); } int nghttp2_session_on_goaway_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: stream_id != 0"); } /* Spec says Endpoints MUST NOT increase the value they send in the last stream identifier. */ if ((frame->goaway.last_stream_id > 0 && !nghttp2_session_is_my_stream_id(session, frame->goaway.last_stream_id)) || session->remote_last_stream_id < frame->goaway.last_stream_id) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: invalid last_stream_id"); } session->goaway_flags |= NGHTTP2_GOAWAY_RECV; session->remote_last_stream_id = frame->goaway.last_stream_id; rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } return session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 0); } static int session_process_goaway_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_goaway_payload(&frame->goaway, iframe->sbuf.pos, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_goaway_received(session, frame); } static int session_on_connection_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { /* Handle connection-level flow control */ if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < session->remote_window_size) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_window_size += frame->window_update.window_size_increment; return session_call_on_frame_received(session, frame); } static int session_on_stream_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE to idle stream"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (state_reserved_remote(session, stream)) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPADATE to reserved stream"); } if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < stream->remote_window_size) { return session_handle_invalid_stream(session, frame, NGHTTP2_ERR_FLOW_CONTROL); } stream->remote_window_size += frame->window_update.window_size_increment; if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { if (frame->hd.stream_id == 0) { return session_on_connection_window_update_received(session, frame); } else { return session_on_stream_window_update_received(session, frame); } } static int session_process_window_update_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_window_update_payload(&frame->window_update, iframe->sbuf.pos); return nghttp2_session_on_window_update_received(session, frame); } int nghttp2_session_on_altsvc_received(nghttp2_session *session, nghttp2_frame *frame) { nghttp2_ext_altsvc *altsvc; nghttp2_stream *stream; altsvc = frame->ext.payload; /* session->server case has been excluded */ if (frame->hd.stream_id == 0) { if (altsvc->origin_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } } else { if (altsvc->origin_len > 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } } if (altsvc->field_value_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_origin_received(nghttp2_session *session, nghttp2_frame *frame) { return session_call_on_frame_received(session, frame); } static int session_process_altsvc_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_altsvc_payload( &frame->ext, nghttp2_get_uint16(iframe->sbuf.pos), iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); /* nghttp2_frame_unpack_altsvc_payload steals buffer from iframe->lbuf */ nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_altsvc_received(session, frame); } static int session_process_origin_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_mem *mem = &session->mem; int rv; rv = nghttp2_frame_unpack_origin_payload(&frame->ext, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf), mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } /* Ignore ORIGIN frame which cannot be parsed. */ return 0; } return nghttp2_session_on_origin_received(session, frame); } static int session_process_extension_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = session_call_unpack_extension_callback(session); if (nghttp2_is_fatal(rv)) { return rv; } /* This handles the case where rv == NGHTTP2_ERR_CANCEL as well */ if (rv != 0) { return 0; } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_data_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { /* This should be treated as stream error, but it results in lots of RST_STREAM. So just ignore frame against nonexistent stream for now. */ return 0; } if (session_enforce_http_messaging(session) && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { if (nghttp2_http_on_remote_end_stream(stream) != 0) { rv = nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* For errors, this function only returns FATAL error. */ static int session_process_data_frame(nghttp2_session *session) { int rv; nghttp2_frame *public_data_frame = &session->iframe.frame; rv = nghttp2_session_on_data_received(session, public_data_frame); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } /* * Now we have SETTINGS synchronization, flow control error can be * detected strictly. If DATA frame is received with length > 0 and * current received window size + delta length is strictly larger than * local window size, it is subject to FLOW_CONTROL_ERROR, so return * -1. Note that local_window_size is calculated after SETTINGS ACK is * received from peer, so peer must honor this limit. If the resulting * recv_window_size is strictly larger than NGHTTP2_MAX_WINDOW_SIZE, * return -1 too. */ static int adjust_recv_window_size(int32_t *recv_window_size_ptr, size_t delta, int32_t local_window_size) { if (*recv_window_size_ptr > local_window_size - (int32_t)delta || *recv_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - (int32_t)delta) { return -1; } *recv_window_size_ptr += (int32_t)delta; return 0; } int nghttp2_session_update_recv_stream_window_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size, int send_window_update) { int rv; rv = adjust_recv_window_size(&stream->recv_window_size, delta_size, stream->local_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* We don't have to send WINDOW_UPDATE if the data received is the last chunk in the incoming stream. */ /* We have to use local_settings here because it is the constraint the remote endpoint should honor. */ if (send_window_update && !(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } int nghttp2_session_update_recv_connection_window_size(nghttp2_session *session, size_t delta_size) { int rv; rv = adjust_recv_window_size(&session->recv_window_size, delta_size, session->local_window_size); if (rv != 0) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && session->window_update_queued == 0 && nghttp2_should_send_window_update(session->local_window_size, session->recv_window_size)) { /* Use stream ID 0 to update connection-level flow control window */ rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, 0, session->recv_window_size); if (rv != 0) { return rv; } session->recv_window_size = 0; } return 0; } static int session_update_consumed_size(nghttp2_session *session, int32_t *consumed_size_ptr, int32_t *recv_window_size_ptr, uint8_t window_update_queued, int32_t stream_id, size_t delta_size, int32_t local_window_size) { int32_t recv_size; int rv; if ((size_t)*consumed_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta_size) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } *consumed_size_ptr += (int32_t)delta_size; if (window_update_queued == 0) { /* recv_window_size may be smaller than consumed_size, because it may be decreased by negative value with nghttp2_submit_window_update(). */ recv_size = nghttp2_min(*consumed_size_ptr, *recv_window_size_ptr); if (nghttp2_should_send_window_update(local_window_size, recv_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream_id, recv_size); if (rv != 0) { return rv; } *recv_window_size_ptr -= recv_size; *consumed_size_ptr -= recv_size; } } return 0; } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size) { return session_update_consumed_size( session, &stream->consumed_size, &stream->recv_window_size, stream->window_update_queued, stream->stream_id, delta_size, stream->local_window_size); } static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size) { return session_update_consumed_size( session, &session->consumed_size, &session->recv_window_size, session->window_update_queued, 0, delta_size, session->local_window_size); } /* * Checks that we can receive the DATA frame for stream, which is * indicated by |session->iframe.frame.hd.stream_id|. If it is a * connection error situation, GOAWAY frame will be issued by this * function. * * If the DATA frame is allowed, returns 0. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_IGN_PAYLOAD * The reception of DATA frame is connection error; or should be * ignored. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_on_data_received_fail_fast(nghttp2_session *session) { int rv; nghttp2_stream *stream; nghttp2_inbound_frame *iframe; int32_t stream_id; const char *failure_reason; uint32_t error_code = NGHTTP2_PROTOCOL_ERROR; iframe = &session->iframe; stream_id = iframe->frame.hd.stream_id; if (stream_id == 0) { /* The spec says that if a DATA frame is received whose stream ID is 0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. */ failure_reason = "DATA: stream_id == 0"; goto fail; } if (session_detect_idle_stream(session, stream_id)) { failure_reason = "DATA: stream in idle"; error_code = NGHTTP2_PROTOCOL_ERROR; goto fail; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { failure_reason = "DATA: stream closed"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { failure_reason = "DATA: stream in half-closed(remote)"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->state != NGHTTP2_STREAM_OPENED) { failure_reason = "DATA: stream not opened"; goto fail; } return 0; } if (stream->state == NGHTTP2_STREAM_RESERVED) { failure_reason = "DATA: stream in reserved"; goto fail; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } return 0; fail: rv = nghttp2_session_terminate_session_with_reason(session, error_code, failure_reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_PAYLOAD; } static size_t inbound_frame_payload_readlen(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { return nghttp2_min((size_t)(last - in), iframe->payloadleft); } /* * Resets iframe->sbuf and advance its mark pointer by |left| bytes. */ static void inbound_frame_set_mark(nghttp2_inbound_frame *iframe, size_t left) { nghttp2_buf_reset(&iframe->sbuf); iframe->sbuf.mark += left; } static size_t inbound_frame_buf_read(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { size_t readlen; readlen = nghttp2_min((size_t)(last - in), nghttp2_buf_mark_avail(&iframe->sbuf)); iframe->sbuf.last = nghttp2_cpymem(iframe->sbuf.last, in, readlen); return readlen; } /* * Unpacks SETTINGS entry in iframe->sbuf. */ static void inbound_frame_set_settings_entry(nghttp2_inbound_frame *iframe) { nghttp2_settings_entry iv; nghttp2_settings_entry *min_header_table_size_entry; size_t i; nghttp2_frame_unpack_settings_entry(&iv, iframe->sbuf.pos); switch (iv.settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: case NGHTTP2_SETTINGS_ENABLE_PUSH: case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: break; default: DEBUGF("recv: unknown settings id=0x%02x\n", iv.settings_id); iframe->iv[iframe->niv++] = iv; return; } for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == iv.settings_id) { iframe->iv[i] = iv; break; } } if (i == iframe->niv) { iframe->iv[iframe->niv++] = iv; } if (iv.settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { /* Keep track of minimum value of SETTINGS_HEADER_TABLE_SIZE */ min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; if (iv.value < min_header_table_size_entry->value) { min_header_table_size_entry->value = iv.value; } } } /* * Checks PADDED flags and set iframe->sbuf to read them accordingly. * If padding is set, this function returns 1. If no padding is set, * this function returns 0. On error, returns -1. */ static int inbound_frame_handle_pad(nghttp2_inbound_frame *iframe, nghttp2_frame_hd *hd) { if (hd->flags & NGHTTP2_FLAG_PADDED) { if (hd->length < 1) { return -1; } inbound_frame_set_mark(iframe, 1); return 1; } DEBUGF("recv: no padding in payload\n"); return 0; } /* * Computes number of padding based on flags. This function returns * the calculated length if it succeeds, or -1. */ static ssize_t inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { size_t padlen; /* 1 for Pad Length field */ padlen = (size_t)(iframe->sbuf.pos[0] + 1); DEBUGF("recv: padlen=%zu\n", padlen); /* We cannot use iframe->frame.hd.length because of CONTINUATION */ if (padlen - 1 > iframe->payloadleft) { return -1; } iframe->padlen = padlen; return (ssize_t)padlen; } /* * This function returns the effective payload length in the data of * length |readlen| when the remaning payload is |payloadleft|. The * |payloadleft| does not include |readlen|. If padding was started * strictly before this data chunk, this function returns -1. */ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, size_t payloadleft, size_t readlen) { size_t trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); if (trail_padlen > payloadleft) { size_t padlen; padlen = trail_padlen - payloadleft; if (readlen < padlen) { return -1; } return (ssize_t)(readlen - padlen); } return (ssize_t)(readlen); } ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t inlen) { const uint8_t *first = in, *last = in + inlen; nghttp2_inbound_frame *iframe = &session->iframe; size_t readlen; ssize_t padlen; int rv; int busy = 0; nghttp2_frame_hd cont_hd; nghttp2_stream *stream; size_t pri_fieldlen; nghttp2_mem *mem; DEBUGF("recv: connection recv_window_size=%d, local_window=%d\n", session->recv_window_size, session->local_window_size); mem = &session->mem; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } if (!nghttp2_session_want_read(session)) { return (ssize_t)inlen; } for (;;) { switch (iframe->state) { case NGHTTP2_IB_READ_CLIENT_MAGIC: readlen = nghttp2_min(inlen, iframe->payloadleft); if (memcmp(&NGHTTP2_CLIENT_MAGIC[NGHTTP2_CLIENT_MAGIC_LEN - iframe->payloadleft], in, readlen) != 0) { return NGHTTP2_ERR_BAD_CLIENT_MAGIC; } iframe->payloadleft -= readlen; in += readlen; if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } break; case NGHTTP2_IB_READ_FIRST_SETTINGS: DEBUGF("recv: [IB_READ_FIRST_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } if (iframe->sbuf.pos[3] != NGHTTP2_SETTINGS || (iframe->sbuf.pos[4] & NGHTTP2_FLAG_ACK)) { rv = session_call_error_callback( session, NGHTTP2_ERR_SETTINGS_EXPECTED, "Remote peer returned unexpected data while we expected " "SETTINGS frame. Perhaps, peer does not support HTTP/2 " "properly."); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "SETTINGS expected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->state = NGHTTP2_IB_READ_HEAD; /* Fall through */ case NGHTTP2_IB_READ_HEAD: { int on_begin_frame_called = 0; DEBUGF("recv: [IB_READ_HEAD]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&iframe->frame.hd, iframe->sbuf.pos); iframe->payloadleft = iframe->frame.hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", iframe->frame.hd.length, iframe->frame.hd.type, iframe->frame.hd.flags, iframe->frame.hd.stream_id); if (iframe->frame.hd.length > session->local_settings.max_frame_size) { DEBUGF("recv: length is too large %zu > %u\n", iframe->frame.hd.length, session->local_settings.max_frame_size); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_FRAME_SIZE_ERROR, "too large frame size"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } switch (iframe->frame.hd.type) { case NGHTTP2_DATA: { DEBUGF("recv: DATA\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_PADDED); /* Check stream is open. If it is not open or closing, ignore payload. */ busy = 1; rv = session_on_data_received_fail_fast(session); if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_IGN_PAYLOAD) { DEBUGF("recv: DATA not allowed stream_id=%d\n", iframe->frame.hd.stream_id); iframe->state = NGHTTP2_IB_IGN_DATA; break; } if (nghttp2_is_fatal(rv)) { return rv; } rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_PAD_DATA; break; } iframe->state = NGHTTP2_IB_READ_DATA; break; } case NGHTTP2_HEADERS: DEBUGF("recv: HEADERS\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED | NGHTTP2_FLAG_PRIORITY); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } /* Call on_begin_frame_callback here because session_process_headers_frame() may call on_begin_headers_callback */ rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } on_begin_frame_called = 1; rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: DEBUGF("recv: PRIORITY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != NGHTTP2_PRIORITY_SPECLEN) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, NGHTTP2_PRIORITY_SPECLEN); break; case NGHTTP2_RST_STREAM: case NGHTTP2_WINDOW_UPDATE: #ifdef DEBUGBUILD switch (iframe->frame.hd.type) { case NGHTTP2_RST_STREAM: DEBUGF("recv: RST_STREAM\n"); break; case NGHTTP2_WINDOW_UPDATE: DEBUGF("recv: WINDOW_UPDATE\n"); break; } #endif /* DEBUGBUILD */ iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_SETTINGS: DEBUGF("recv: SETTINGS\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if ((iframe->frame.hd.length % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) || ((iframe->frame.hd.flags & NGHTTP2_FLAG_ACK) && iframe->payloadleft > 0)) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_SETTINGS; if (iframe->payloadleft) { nghttp2_settings_entry *min_header_table_size_entry; /* We allocate iv with additional one entry, to store the minimum header table size. */ iframe->max_niv = iframe->frame.hd.length / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH + 1; if (iframe->max_niv - 1 > session->max_settings) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_ENHANCE_YOUR_CALM, "SETTINGS: too many setting entries"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->iv = nghttp2_mem_malloc(mem, sizeof(nghttp2_settings_entry) * iframe->max_niv); if (!iframe->iv) { return NGHTTP2_ERR_NOMEM; } min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; min_header_table_size_entry->settings_id = NGHTTP2_SETTINGS_HEADER_TABLE_SIZE; min_header_table_size_entry->value = UINT32_MAX; inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } busy = 1; inbound_frame_set_mark(iframe, 0); break; case NGHTTP2_PUSH_PROMISE: DEBUGF("recv: PUSH_PROMISE\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_PING: DEBUGF("recv: PING\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if (iframe->payloadleft != 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_GOAWAY: DEBUGF("recv: GOAWAY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft < 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_CONTINUATION: DEBUGF("recv: unexpected CONTINUATION\n"); /* Receiving CONTINUATION in this state are subject to connection error of type PROTOCOL_ERROR */ rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "CONTINUATION: unexpected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; default: DEBUGF("recv: extension frame\n"); if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { if (!session->callbacks.unpack_extension_callback) { /* Silently ignore unknown frame type. */ busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_EXTENSION_PAYLOAD; break; } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ALTSVC\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; iframe->frame.ext.payload = &iframe->ext_frame_payload.altsvc; if (session->server) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } if (iframe->payloadleft < 2) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 2); break; case NGHTTP2_ORIGIN: if (!(session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ORIGIN\n"); iframe->frame.ext.payload = &iframe->ext_frame_payload.origin; if (session->server || iframe->frame.hd.stream_id || (iframe->frame.hd.flags & 0xf0)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->payloadleft); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->payloadleft); } else { busy = 1; } iframe->state = NGHTTP2_IB_READ_ORIGIN_PAYLOAD; break; default: busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } } if (!on_begin_frame_called) { switch (iframe->state) { case NGHTTP2_IB_IGN_HEADER_BLOCK: case NGHTTP2_IB_IGN_PAYLOAD: case NGHTTP2_IB_FRAME_SIZE_ERROR: case NGHTTP2_IB_IGN_DATA: case NGHTTP2_IB_IGN_ALL: break; default: rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } } } break; } case NGHTTP2_IB_READ_NBYTE: DEBUGF("recv: [IB_READ_NBYTE]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zd\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + pri_fieldlen > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.headers.padlen = (size_t)padlen; if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } else { /* Truncate buffers used for padding spec */ inbound_frame_set_mark(iframe, 0); } } rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: rv = session_process_priority_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_RST_STREAM: rv = session_process_rst_stream_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_PUSH_PROMISE: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + 4 /* promised stream id */ > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.push_promise.padlen = (size_t)padlen; if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; } rv = session_process_push_promise_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.push_promise.promised_stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PING: rv = session_process_ping_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_GOAWAY: { size_t debuglen; /* 8 is Last-stream-ID + Error Code */ debuglen = iframe->frame.hd.length - 8; if (debuglen > 0) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, debuglen); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, debuglen); } busy = 1; iframe->state = NGHTTP2_IB_READ_GOAWAY_DEBUG; break; } case NGHTTP2_WINDOW_UPDATE: rv = session_process_window_update_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_ALTSVC: { size_t origin_len; origin_len = nghttp2_get_uint16(iframe->sbuf.pos); DEBUGF("recv: origin_len=%zu\n", origin_len); if (origin_len > iframe->payloadleft) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } if (iframe->frame.hd.length > 2) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->frame.hd.length - 2); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->frame.hd.length); } busy = 1; iframe->state = NGHTTP2_IB_READ_ALTSVC_PAYLOAD; break; } default: /* This is unknown frame */ session_inbound_frame_reset(session); break; } break; case NGHTTP2_IB_READ_HEADER_BLOCK: case NGHTTP2_IB_IGN_HEADER_BLOCK: { ssize_t data_readlen; size_t trail_padlen; int final; #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { DEBUGF("recv: [IB_READ_HEADER_BLOCK]\n"); } else { DEBUGF("recv: [IB_IGN_HEADER_BLOCK]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_payload_readlen(iframe, in, last); DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft - readlen); data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft - readlen, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); final = (iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) && iframe->payloadleft - (size_t)data_readlen == trail_padlen; if (data_readlen > 0 || (data_readlen == 0 && final)) { size_t hd_proclen = 0; DEBUGF("recv: block final=%d\n", final); rv = inflate_header_block(session, &iframe->frame, &hd_proclen, (uint8_t *)in, (size_t)data_readlen, final, iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_PAUSE) { in += hd_proclen; iframe->payloadleft -= hd_proclen; return in - first; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { /* The application says no more headers. We decompress the rest of the header block but not invoke on_header_callback and on_frame_recv_callback. */ in += hd_proclen; iframe->payloadleft -= hd_proclen; /* Use promised stream ID for PUSH_PROMISE */ rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.type == NGHTTP2_PUSH_PROMISE ? iframe->frame.push_promise.promised_stream_id : iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } in += readlen; iframe->payloadleft -= readlen; if (rv == NGHTTP2_ERR_HEADER_COMP) { /* GOAWAY is already issued */ if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); } else { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; } break; } } else { in += readlen; iframe->payloadleft -= readlen; } if (iframe->payloadleft) { break; } if ((iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) == 0) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_HDLEN); iframe->padlen = 0; if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_EXPECT_CONTINUATION; } else { iframe->state = NGHTTP2_IB_IGN_CONTINUATION; } } else { if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { rv = session_after_header_block_received(session); if (nghttp2_is_fatal(rv)) { return rv; } } session_inbound_frame_reset(session); } break; } case NGHTTP2_IB_IGN_PAYLOAD: DEBUGF("recv: [IB_IGN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { break; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: case NGHTTP2_PUSH_PROMISE: case NGHTTP2_CONTINUATION: /* Mark inflater bad so that we won't perform further decoding */ session->hd_inflater.ctx.bad = 1; break; default: break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_FRAME_SIZE_ERROR: DEBUGF("recv: [IB_FRAME_SIZE_ERROR]\n"); rv = session_handle_frame_size_error(session); if (nghttp2_is_fatal(rv)) { return rv; } assert(iframe->state == NGHTTP2_IB_IGN_ALL); return (ssize_t)inlen; case NGHTTP2_IB_READ_SETTINGS: DEBUGF("recv: [IB_READ_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { break; } if (readlen > 0) { inbound_frame_set_settings_entry(iframe); } if (iframe->payloadleft) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } rv = session_process_settings_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_GOAWAY_DEBUG: DEBUGF("recv: [IB_READ_GOAWAY_DEBUG]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_goaway_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_EXPECT_CONTINUATION: case NGHTTP2_IB_IGN_CONTINUATION: #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { fprintf(stderr, "recv: [IB_EXPECT_CONTINUATION]\n"); } else { fprintf(stderr, "recv: [IB_IGN_CONTINUATION]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&cont_hd, iframe->sbuf.pos); iframe->payloadleft = cont_hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", cont_hd.length, cont_hd.type, cont_hd.flags, cont_hd.stream_id); if (cont_hd.type != NGHTTP2_CONTINUATION || cont_hd.stream_id != iframe->frame.hd.stream_id) { DEBUGF("recv: expected stream_id=%d, type=%d, but got stream_id=%d, " "type=%u\n", iframe->frame.hd.stream_id, NGHTTP2_CONTINUATION, cont_hd.stream_id, cont_hd.type); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "unexpected non-CONTINUATION frame or stream_id is invalid"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } /* CONTINUATION won't bear NGHTTP2_PADDED flag */ iframe->frame.hd.flags = (uint8_t)( iframe->frame.hd.flags | (cont_hd.flags & NGHTTP2_FLAG_END_HEADERS)); iframe->frame.hd.length += cont_hd.length; busy = 1; if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; rv = session_call_on_begin_frame(session, &cont_hd); if (nghttp2_is_fatal(rv)) { return rv; } } else { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; } break; case NGHTTP2_IB_READ_PAD_DATA: DEBUGF("recv: [IB_READ_PAD_DATA]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zu\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } /* Pad Length field is subject to flow control */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } /* Pad Length field is consumed immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (stream) { rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } } busy = 1; padlen = inbound_frame_compute_pad(iframe); if (padlen < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.data.padlen = (size_t)padlen; iframe->state = NGHTTP2_IB_READ_DATA; break; case NGHTTP2_IB_READ_DATA: stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (!stream) { busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } DEBUGF("recv: [IB_READ_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { ssize_t data_readlen; rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } padlen = (ssize_t)readlen - data_readlen; if (padlen > 0) { /* Padding is considered as "consumed" immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, (size_t)padlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } DEBUGF("recv: data_readlen=%zd\n", data_readlen); if (data_readlen > 0) { if (session_enforce_http_messaging(session)) { if (nghttp2_http_on_data_chunk(stream, (size_t)data_readlen) != 0) { if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Consume all data for connection immediately here */ rv = session_update_connection_consumed_size( session, (size_t)data_readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_DATA) { return (ssize_t)inlen; } } rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } } if (session->callbacks.on_data_chunk_recv_callback) { rv = session->callbacks.on_data_chunk_recv_callback( session, iframe->frame.hd.flags, iframe->frame.hd.stream_id, in - readlen, (size_t)data_readlen, session->user_data); if (rv == NGHTTP2_ERR_PAUSE) { return in - first; } if (nghttp2_is_fatal(rv)) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } } if (iframe->payloadleft) { break; } rv = session_process_data_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_DATA: DEBUGF("recv: [IB_IGN_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { /* Update connection-level flow control window for ignored DATA frame too */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Ignored DATA is considered as "consumed" immediately. */ rv = session_update_connection_consumed_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } } if (iframe->payloadleft) { break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_ALL: return (ssize_t)inlen; case NGHTTP2_IB_READ_EXTENSION_PAYLOAD: DEBUGF("recv: [IB_READ_EXTENSION_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { rv = session_call_on_extension_chunk_recv_callback( session, in - readlen, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } if (iframe->payloadleft > 0) { break; } rv = session_process_extension_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ALTSVC_PAYLOAD: DEBUGF("recv: [IB_READ_ALTSVC_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_altsvc_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ORIGIN_PAYLOAD: DEBUGF("recv: [IB_READ_ORIGIN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_origin_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; } if (!busy && in == last) { break; } busy = 0; } assert(in == last); return in - first; } int nghttp2_session_recv(nghttp2_session *session) { uint8_t buf[NGHTTP2_INBOUND_BUFFER_LENGTH]; while (1) { ssize_t readlen; readlen = session_recv(session, buf, sizeof(buf)); if (readlen > 0) { ssize_t proclen = nghttp2_session_mem_recv(session, buf, (size_t)readlen); if (proclen < 0) { return (int)proclen; } assert(proclen == readlen); } else if (readlen == 0 || readlen == NGHTTP2_ERR_WOULDBLOCK) { return 0; } else if (readlen == NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_EOF; } else if (readlen < 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } /* * Returns the number of active streams, which includes streams in * reserved state. */ static size_t session_get_num_active_streams(nghttp2_session *session) { return nghttp2_map_size(&session->streams) - session->num_closed_streams - session->num_idle_streams; } int nghttp2_session_want_read(nghttp2_session *session) { size_t num_active_streams; /* If this flag is set, we don't want to read. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } num_active_streams = session_get_num_active_streams(session); /* Unless termination GOAWAY is sent or received, we always want to read incoming frames. */ if (num_active_streams > 0) { return 1; } /* If there is no active streams and GOAWAY has been sent or received, we are done with this session. */ return (session->goaway_flags & (NGHTTP2_GOAWAY_SENT | NGHTTP2_GOAWAY_RECV)) == 0; } int nghttp2_session_want_write(nghttp2_session *session) { /* If these flag is set, we don't want to write any data. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } /* * Unless termination GOAWAY is sent or received, we want to write * frames if there is pending ones. If pending frame is request/push * response HEADERS and concurrent stream limit is reached, we don't * want to write them. */ return session->aob.item || nghttp2_outbound_queue_top(&session->ob_urgent) || nghttp2_outbound_queue_top(&session->ob_reg) || (!nghttp2_pq_empty(&session->root.obq) && session->remote_window_size > 0) || (nghttp2_outbound_queue_top(&session->ob_syn) && !session_is_outgoing_concurrent_streams_max(session)); } int nghttp2_session_add_ping(nghttp2_session *session, uint8_t flags, const uint8_t *opaque_data) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; if ((flags & NGHTTP2_FLAG_ACK) && session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_ping_init(&frame->ping, flags, opaque_data); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_ping_free(&frame->ping); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } return 0; } int nghttp2_session_add_goaway(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const uint8_t *opaque_data, size_t opaque_data_len, uint8_t aux_flags) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; uint8_t *opaque_data_copy = NULL; nghttp2_goaway_aux_data *aux_data; nghttp2_mem *mem; mem = &session->mem; if (nghttp2_session_is_my_stream_id(session, last_stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (opaque_data_len) { if (opaque_data_len + 8 > NGHTTP2_MAX_PAYLOADLEN) { return NGHTTP2_ERR_INVALID_ARGUMENT; } opaque_data_copy = nghttp2_mem_malloc(mem, opaque_data_len); if (opaque_data_copy == NULL) { return NGHTTP2_ERR_NOMEM; } memcpy(opaque_data_copy, opaque_data, opaque_data_len); } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { nghttp2_mem_free(mem, opaque_data_copy); return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; /* last_stream_id must not be increased from the value previously sent */ last_stream_id = nghttp2_min(last_stream_id, session->local_last_stream_id); nghttp2_frame_goaway_init(&frame->goaway, last_stream_id, error_code, opaque_data_copy, opaque_data_len); aux_data = &item->aux_data.goaway; aux_data->flags = aux_flags; rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_goaway_free(&frame->goaway, mem); nghttp2_mem_free(mem, item); return rv; } return 0; } int nghttp2_session_add_window_update(nghttp2_session *session, uint8_t flags, int32_t stream_id, int32_t window_size_increment) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_window_update_init(&frame->window_update, flags, stream_id, window_size_increment); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_window_update_free(&frame->window_update); nghttp2_mem_free(mem, item); return rv; } return 0; } static void session_append_inflight_settings(nghttp2_session *session, nghttp2_inflight_settings *settings) { nghttp2_inflight_settings **i; for (i = &session->inflight_settings_head; *i; i = &(*i)->next) ; *i = settings; } int nghttp2_session_add_settings(nghttp2_session *session, uint8_t flags, const nghttp2_settings_entry *iv, size_t niv) { nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_settings_entry *iv_copy; size_t i; int rv; nghttp2_mem *mem; nghttp2_inflight_settings *inflight_settings = NULL; mem = &session->mem; if (flags & NGHTTP2_FLAG_ACK) { if (niv != 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } } if (!nghttp2_iv_check(iv, niv)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { iv_copy = nghttp2_frame_iv_copy(iv, niv, mem); if (iv_copy == NULL) { nghttp2_mem_free(mem, item); return NGHTTP2_ERR_NOMEM; } } else { iv_copy = NULL; } if ((flags & NGHTTP2_FLAG_ACK) == 0) { rv = inflight_settings_new(&inflight_settings, iv, niv, mem); if (rv != 0) { assert(nghttp2_is_fatal(rv)); nghttp2_mem_free(mem, iv_copy); nghttp2_mem_free(mem, item); return rv; } } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_settings_init(&frame->settings, flags, iv_copy, niv); rv = nghttp2_session_add_item(session, item); if (rv != 0) { /* The only expected error is fatal one */ assert(nghttp2_is_fatal(rv)); inflight_settings_del(inflight_settings, mem); nghttp2_frame_settings_free(&frame->settings, mem); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } else { session_append_inflight_settings(session, inflight_settings); } /* Extract NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS and ENABLE_PUSH here. We use it to refuse the incoming stream and PUSH_PROMISE with RST_STREAM. */ for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS) { session->pending_local_max_concurrent_stream = iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_PUSH) { session->pending_enable_push = (uint8_t)iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL) { session->pending_enable_connect_protocol = (uint8_t)iv[i - 1].value; break; } } return 0; } int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, size_t datamax, nghttp2_frame *frame, nghttp2_data_aux_data *aux_data, nghttp2_stream *stream) { int rv; uint32_t data_flags; ssize_t payloadlen; ssize_t padded_payloadlen; nghttp2_buf *buf; size_t max_payloadlen; assert(bufs->head == bufs->cur); buf = &bufs->cur->buf; if (session->callbacks.read_length_callback) { payloadlen = session->callbacks.read_length_callback( session, frame->hd.type, stream->stream_id, session->remote_window_size, stream->remote_window_size, session->remote_settings.max_frame_size, session->user_data); DEBUGF("send: read_length_callback=%zd\n", payloadlen); payloadlen = nghttp2_session_enforce_flow_control_limits(session, stream, payloadlen); DEBUGF("send: read_length_callback after flow control=%zd\n", payloadlen); if (payloadlen <= 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } if ((size_t)payloadlen > nghttp2_buf_avail(buf)) { /* Resize the current buffer(s). The reason why we do +1 for buffer size is for possible padding field. */ rv = nghttp2_bufs_realloc(&session->aob.framebufs, (size_t)(NGHTTP2_FRAME_HDLEN + 1 + payloadlen)); if (rv != 0) { DEBUGF("send: realloc buffer failed rv=%d", rv); /* If reallocation failed, old buffers are still in tact. So use safe limit. */ payloadlen = (ssize_t)datamax; DEBUGF("send: use safe limit payloadlen=%zd", payloadlen); } else { assert(&session->aob.framebufs == bufs); buf = &bufs->cur->buf; } } datamax = (size_t)payloadlen; } /* Current max DATA length is less then buffer chunk size */ assert(nghttp2_buf_avail(buf) >= datamax); data_flags = NGHTTP2_DATA_FLAG_NONE; payloadlen = aux_data->data_prd.read_callback( session, frame->hd.stream_id, buf->pos, datamax, &data_flags, &aux_data->data_prd.source, session->user_data); if (payloadlen == NGHTTP2_ERR_DEFERRED || payloadlen == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE || payloadlen == NGHTTP2_ERR_PAUSE) { DEBUGF("send: DATA postponed due to %s\n", nghttp2_strerror((int)payloadlen)); return (int)payloadlen; } if (payloadlen < 0 || datamax < (size_t)payloadlen) { /* This is the error code when callback is failed. */ return NGHTTP2_ERR_CALLBACK_FAILURE; } buf->last = buf->pos + payloadlen; buf->pos -= NGHTTP2_FRAME_HDLEN; /* Clear flags, because this may contain previous flags of previous DATA */ frame->hd.flags = NGHTTP2_FLAG_NONE; if (data_flags & NGHTTP2_DATA_FLAG_EOF) { aux_data->eof = 1; /* If NGHTTP2_DATA_FLAG_NO_END_STREAM is set, don't set NGHTTP2_FLAG_END_STREAM */ if ((aux_data->flags & NGHTTP2_FLAG_END_STREAM) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM) == 0) { frame->hd.flags |= NGHTTP2_FLAG_END_STREAM; } } if (data_flags & NGHTTP2_DATA_FLAG_NO_COPY) { if (session->callbacks.send_data_callback == NULL) { DEBUGF("NGHTTP2_DATA_FLAG_NO_COPY requires send_data_callback set\n"); return NGHTTP2_ERR_CALLBACK_FAILURE; } aux_data->no_copy = 1; } frame->hd.length = (size_t)payloadlen; frame->data.padlen = 0; max_payloadlen = nghttp2_min(datamax, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } frame->data.padlen = (size_t)(padded_payloadlen - payloadlen); nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); rv = nghttp2_frame_add_pad(bufs, &frame->hd, frame->data.padlen, aux_data->no_copy); if (rv != 0) { return rv; } reschedule_stream(stream); if (frame->hd.length == 0 && (data_flags & NGHTTP2_DATA_FLAG_EOF) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM)) { /* DATA payload length is 0, and DATA frame does not bear END_STREAM. In this case, there is no point to send 0 length DATA frame. */ return NGHTTP2_ERR_CANCEL; } return 0; } void *nghttp2_session_get_stream_user_data(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { return stream->stream_user_data; } else { return NULL; } } int nghttp2_session_set_stream_user_data(nghttp2_session *session, int32_t stream_id, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame *frame; nghttp2_outbound_item *item; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { stream->stream_user_data = stream_user_data; return 0; } if (session->server || !nghttp2_session_is_my_stream_id(session, stream_id) || !nghttp2_outbound_queue_top(&session->ob_syn)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(frame->hd.type == NGHTTP2_HEADERS); if (frame->hd.stream_id > stream_id || (uint32_t)stream_id >= session->next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } for (item = session->ob_syn.head; item; item = item->qnext) { if (item->frame.hd.stream_id < stream_id) { continue; } if (item->frame.hd.stream_id > stream_id) { break; } item->aux_data.headers.stream_user_data = stream_user_data; return 0; } return NGHTTP2_ERR_INVALID_ARGUMENT; } int nghttp2_session_resume_data(nghttp2_session *session, int32_t stream_id) { int rv; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL || !nghttp2_stream_check_deferred_item(stream)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } rv = nghttp2_stream_resume_deferred_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } size_t nghttp2_session_get_outbound_queue_size(nghttp2_session *session) { return nghttp2_outbound_queue_size(&session->ob_urgent) + nghttp2_outbound_queue_size(&session->ob_reg) + nghttp2_outbound_queue_size(&session->ob_syn); /* TODO account for item attached to stream */ } int32_t nghttp2_session_get_stream_effective_recv_data_length(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->recv_window_size < 0 ? 0 : stream->recv_window_size; } int32_t nghttp2_session_get_stream_effective_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->local_window_size; } int32_t nghttp2_session_get_stream_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; int32_t size; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } size = stream->local_window_size - stream->recv_window_size; /* size could be negative if local endpoint reduced SETTINGS_INITIAL_WINDOW_SIZE */ if (size < 0) { return 0; } return size; } int32_t nghttp2_session_get_effective_recv_data_length(nghttp2_session *session) { return session->recv_window_size < 0 ? 0 : session->recv_window_size; } int32_t nghttp2_session_get_effective_local_window_size(nghttp2_session *session) { return session->local_window_size; } int32_t nghttp2_session_get_local_window_size(nghttp2_session *session) { return session->local_window_size - session->recv_window_size; } int32_t nghttp2_session_get_stream_remote_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } /* stream->remote_window_size can be negative when SETTINGS_INITIAL_WINDOW_SIZE is changed. */ return nghttp2_max(0, stream->remote_window_size); } int32_t nghttp2_session_get_remote_window_size(nghttp2_session *session) { return session->remote_window_size; } uint32_t nghttp2_session_get_remote_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->remote_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->remote_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->remote_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->remote_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->remote_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->remote_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->remote_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } uint32_t nghttp2_session_get_local_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->local_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->local_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->local_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->local_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->local_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->local_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->local_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } static int nghttp2_session_upgrade_internal(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame frame; nghttp2_settings_entry *iv; size_t niv; int rv; nghttp2_priority_spec pri_spec; nghttp2_mem *mem; mem = &session->mem; if ((!session->server && session->next_stream_id != 1) || (session->server && session->last_recv_stream_id >= 1)) { return NGHTTP2_ERR_PROTO; } if (settings_payloadlen % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) { return NGHTTP2_ERR_INVALID_ARGUMENT; } /* SETTINGS frame contains too many settings */ if (settings_payloadlen / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH > session->max_settings) { return NGHTTP2_ERR_TOO_MANY_SETTINGS; } rv = nghttp2_frame_unpack_settings_payload2(&iv, &niv, settings_payload, settings_payloadlen, mem); if (rv != 0) { return rv; } if (session->server) { nghttp2_frame_hd_init(&frame.hd, settings_payloadlen, NGHTTP2_SETTINGS, NGHTTP2_FLAG_NONE, 0); frame.settings.iv = iv; frame.settings.niv = niv; rv = nghttp2_session_on_settings_received(session, &frame, 1 /* No ACK */); } else { rv = nghttp2_submit_settings(session, NGHTTP2_FLAG_NONE, iv, niv); } nghttp2_mem_free(mem, iv); if (rv != 0) { return rv; } nghttp2_priority_spec_default_init(&pri_spec); stream = nghttp2_session_open_stream( session, 1, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_OPENING, session->server ? NULL : stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since this should be the first stream open. */ if (session->server) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); session->last_recv_stream_id = 1; session->last_proc_stream_id = 1; } else { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); session->last_sent_stream_id = 1; session->next_stream_id += 2; } return 0; } int nghttp2_session_upgrade(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); /* We have no information about request header fields when Upgrade was happened. So we don't know the request method here. If request method is HEAD, we have a trouble because we may have nonzero content-length header field in response headers, and we will going to check it against the actual DATA frames, but we may get mismatch because HEAD response body must be empty. Because of this reason, nghttp2_session_upgrade() was deprecated in favor of nghttp2_session_upgrade2(), which has |head_request| parameter to indicate that request method is HEAD or not. */ stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_UPGRADE_WORKAROUND; return 0; } int nghttp2_session_upgrade2(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, int head_request, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); if (head_request) { stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_HEAD; } return 0; } int nghttp2_session_get_stream_local_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_WR) != 0; } int nghttp2_session_get_stream_remote_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_RD) != 0; } int nghttp2_session_consume(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_connection(nghttp2_session *session, size_t size) { int rv; if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_stream(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_set_next_stream_id(nghttp2_session *session, int32_t next_stream_id) { if (next_stream_id <= 0 || session->next_stream_id > (uint32_t)next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->server) { if (next_stream_id % 2) { return NGHTTP2_ERR_INVALID_ARGUMENT; } } else if (next_stream_id % 2 == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } session->next_stream_id = (uint32_t)next_stream_id; return 0; } uint32_t nghttp2_session_get_next_stream_id(nghttp2_session *session) { return session->next_stream_id; } int32_t nghttp2_session_get_last_proc_stream_id(nghttp2_session *session) { return session->last_proc_stream_id; } nghttp2_stream *nghttp2_session_find_stream(nghttp2_session *session, int32_t stream_id) { if (stream_id == 0) { return &session->root; } return nghttp2_session_get_stream_raw(session, stream_id); } nghttp2_stream *nghttp2_session_get_root_stream(nghttp2_session *session) { return &session->root; } int nghttp2_session_check_server_session(nghttp2_session *session) { return session->server; } int nghttp2_session_change_stream_priority( nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { int rv; nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); rv = nghttp2_session_reprioritize_stream(session, stream, &pri_spec_copy); if (nghttp2_is_fatal(rv)) { return rv; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } int nghttp2_session_create_idle_stream(nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id || !session_detect_idle_stream(session, stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); stream = nghttp2_session_open_stream(session, stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec_copy, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } size_t nghttp2_session_get_hd_inflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_inflate_get_dynamic_table_size(&session->hd_inflater); } size_t nghttp2_session_get_hd_deflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_deflate_get_dynamic_table_size(&session->hd_deflater); } void nghttp2_session_set_user_data(nghttp2_session *session, void *user_data) { session->user_data = user_data; }
./CrossVul/dataset_final_sorted/CWE-707/c/bad_3937_0
crossvul-cpp_data_good_3936_3
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_helper.h" #include <assert.h> #include <string.h> #include "nghttp2_net.h" void nghttp2_put_uint16be(uint8_t *buf, uint16_t n) { uint16_t x = htons(n); memcpy(buf, &x, sizeof(uint16_t)); } void nghttp2_put_uint32be(uint8_t *buf, uint32_t n) { uint32_t x = htonl(n); memcpy(buf, &x, sizeof(uint32_t)); } uint16_t nghttp2_get_uint16(const uint8_t *data) { uint16_t n; memcpy(&n, data, sizeof(uint16_t)); return ntohs(n); } uint32_t nghttp2_get_uint32(const uint8_t *data) { uint32_t n; memcpy(&n, data, sizeof(uint32_t)); return ntohl(n); } /* Generated by gendowncasetbl.py */ static const uint8_t DOWNCASE_TBL[] = { 0 /* NUL */, 1 /* SOH */, 2 /* STX */, 3 /* ETX */, 4 /* EOT */, 5 /* ENQ */, 6 /* ACK */, 7 /* BEL */, 8 /* BS */, 9 /* HT */, 10 /* LF */, 11 /* VT */, 12 /* FF */, 13 /* CR */, 14 /* SO */, 15 /* SI */, 16 /* DLE */, 17 /* DC1 */, 18 /* DC2 */, 19 /* DC3 */, 20 /* DC4 */, 21 /* NAK */, 22 /* SYN */, 23 /* ETB */, 24 /* CAN */, 25 /* EM */, 26 /* SUB */, 27 /* ESC */, 28 /* FS */, 29 /* GS */, 30 /* RS */, 31 /* US */, 32 /* SPC */, 33 /* ! */, 34 /* " */, 35 /* # */, 36 /* $ */, 37 /* % */, 38 /* & */, 39 /* ' */, 40 /* ( */, 41 /* ) */, 42 /* * */, 43 /* + */, 44 /* , */, 45 /* - */, 46 /* . */, 47 /* / */, 48 /* 0 */, 49 /* 1 */, 50 /* 2 */, 51 /* 3 */, 52 /* 4 */, 53 /* 5 */, 54 /* 6 */, 55 /* 7 */, 56 /* 8 */, 57 /* 9 */, 58 /* : */, 59 /* ; */, 60 /* < */, 61 /* = */, 62 /* > */, 63 /* ? */, 64 /* @ */, 97 /* A */, 98 /* B */, 99 /* C */, 100 /* D */, 101 /* E */, 102 /* F */, 103 /* G */, 104 /* H */, 105 /* I */, 106 /* J */, 107 /* K */, 108 /* L */, 109 /* M */, 110 /* N */, 111 /* O */, 112 /* P */, 113 /* Q */, 114 /* R */, 115 /* S */, 116 /* T */, 117 /* U */, 118 /* V */, 119 /* W */, 120 /* X */, 121 /* Y */, 122 /* Z */, 91 /* [ */, 92 /* \ */, 93 /* ] */, 94 /* ^ */, 95 /* _ */, 96 /* ` */, 97 /* a */, 98 /* b */, 99 /* c */, 100 /* d */, 101 /* e */, 102 /* f */, 103 /* g */, 104 /* h */, 105 /* i */, 106 /* j */, 107 /* k */, 108 /* l */, 109 /* m */, 110 /* n */, 111 /* o */, 112 /* p */, 113 /* q */, 114 /* r */, 115 /* s */, 116 /* t */, 117 /* u */, 118 /* v */, 119 /* w */, 120 /* x */, 121 /* y */, 122 /* z */, 123 /* { */, 124 /* | */, 125 /* } */, 126 /* ~ */, 127 /* DEL */, 128 /* 0x80 */, 129 /* 0x81 */, 130 /* 0x82 */, 131 /* 0x83 */, 132 /* 0x84 */, 133 /* 0x85 */, 134 /* 0x86 */, 135 /* 0x87 */, 136 /* 0x88 */, 137 /* 0x89 */, 138 /* 0x8a */, 139 /* 0x8b */, 140 /* 0x8c */, 141 /* 0x8d */, 142 /* 0x8e */, 143 /* 0x8f */, 144 /* 0x90 */, 145 /* 0x91 */, 146 /* 0x92 */, 147 /* 0x93 */, 148 /* 0x94 */, 149 /* 0x95 */, 150 /* 0x96 */, 151 /* 0x97 */, 152 /* 0x98 */, 153 /* 0x99 */, 154 /* 0x9a */, 155 /* 0x9b */, 156 /* 0x9c */, 157 /* 0x9d */, 158 /* 0x9e */, 159 /* 0x9f */, 160 /* 0xa0 */, 161 /* 0xa1 */, 162 /* 0xa2 */, 163 /* 0xa3 */, 164 /* 0xa4 */, 165 /* 0xa5 */, 166 /* 0xa6 */, 167 /* 0xa7 */, 168 /* 0xa8 */, 169 /* 0xa9 */, 170 /* 0xaa */, 171 /* 0xab */, 172 /* 0xac */, 173 /* 0xad */, 174 /* 0xae */, 175 /* 0xaf */, 176 /* 0xb0 */, 177 /* 0xb1 */, 178 /* 0xb2 */, 179 /* 0xb3 */, 180 /* 0xb4 */, 181 /* 0xb5 */, 182 /* 0xb6 */, 183 /* 0xb7 */, 184 /* 0xb8 */, 185 /* 0xb9 */, 186 /* 0xba */, 187 /* 0xbb */, 188 /* 0xbc */, 189 /* 0xbd */, 190 /* 0xbe */, 191 /* 0xbf */, 192 /* 0xc0 */, 193 /* 0xc1 */, 194 /* 0xc2 */, 195 /* 0xc3 */, 196 /* 0xc4 */, 197 /* 0xc5 */, 198 /* 0xc6 */, 199 /* 0xc7 */, 200 /* 0xc8 */, 201 /* 0xc9 */, 202 /* 0xca */, 203 /* 0xcb */, 204 /* 0xcc */, 205 /* 0xcd */, 206 /* 0xce */, 207 /* 0xcf */, 208 /* 0xd0 */, 209 /* 0xd1 */, 210 /* 0xd2 */, 211 /* 0xd3 */, 212 /* 0xd4 */, 213 /* 0xd5 */, 214 /* 0xd6 */, 215 /* 0xd7 */, 216 /* 0xd8 */, 217 /* 0xd9 */, 218 /* 0xda */, 219 /* 0xdb */, 220 /* 0xdc */, 221 /* 0xdd */, 222 /* 0xde */, 223 /* 0xdf */, 224 /* 0xe0 */, 225 /* 0xe1 */, 226 /* 0xe2 */, 227 /* 0xe3 */, 228 /* 0xe4 */, 229 /* 0xe5 */, 230 /* 0xe6 */, 231 /* 0xe7 */, 232 /* 0xe8 */, 233 /* 0xe9 */, 234 /* 0xea */, 235 /* 0xeb */, 236 /* 0xec */, 237 /* 0xed */, 238 /* 0xee */, 239 /* 0xef */, 240 /* 0xf0 */, 241 /* 0xf1 */, 242 /* 0xf2 */, 243 /* 0xf3 */, 244 /* 0xf4 */, 245 /* 0xf5 */, 246 /* 0xf6 */, 247 /* 0xf7 */, 248 /* 0xf8 */, 249 /* 0xf9 */, 250 /* 0xfa */, 251 /* 0xfb */, 252 /* 0xfc */, 253 /* 0xfd */, 254 /* 0xfe */, 255 /* 0xff */, }; void nghttp2_downcase(uint8_t *s, size_t len) { size_t i; for (i = 0; i < len; ++i) { s[i] = DOWNCASE_TBL[s[i]]; } } /* * local_window_size * ^ * * | * recv_window_size * | * * ^ * | * * | * 0+++++++++ * | * * \ * | * * | This rage is hidden in flow control. But it must be * v * * / kept in order to restore it when window size is enlarged. * recv_reduction * (+ for negative direction) * * recv_window_size could be negative if we decrease * local_window_size more than recv_window_size: * * local_window_size * ^ * * | * * | * * 0++++++++ * | * ^ recv_window_size (negative) * | * | * v * * * recv_reduction */ int nghttp2_adjust_local_window_size(int32_t *local_window_size_ptr, int32_t *recv_window_size_ptr, int32_t *recv_reduction_ptr, int32_t *delta_ptr) { if (*delta_ptr > 0) { int32_t recv_reduction_delta; int32_t delta; int32_t new_recv_window_size = nghttp2_max(0, *recv_window_size_ptr) - *delta_ptr; if (new_recv_window_size >= 0) { *recv_window_size_ptr = new_recv_window_size; return 0; } delta = -new_recv_window_size; /* The delta size is strictly more than received bytes. Increase local_window_size by that difference |delta|. */ if (*local_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta) { return NGHTTP2_ERR_FLOW_CONTROL; } *local_window_size_ptr += delta; /* If there is recv_reduction due to earlier window_size reduction, we have to adjust it too. */ recv_reduction_delta = nghttp2_min(*recv_reduction_ptr, delta); *recv_reduction_ptr -= recv_reduction_delta; if (*recv_window_size_ptr < 0) { *recv_window_size_ptr += recv_reduction_delta; } else { /* If *recv_window_size_ptr > 0, then those bytes are going to be returned to the remote peer (by WINDOW_UPDATE with the adjusted *delta_ptr), so it is effectively 0 now. We set to *recv_reduction_delta, because caller does not take into account it in *delta_ptr. */ *recv_window_size_ptr = recv_reduction_delta; } /* recv_reduction_delta must be paid from *delta_ptr, since it was added in window size reduction (see below). */ *delta_ptr -= recv_reduction_delta; return 0; } if (*local_window_size_ptr + *delta_ptr < 0 || *recv_window_size_ptr < INT32_MIN - *delta_ptr || *recv_reduction_ptr > INT32_MAX + *delta_ptr) { return NGHTTP2_ERR_FLOW_CONTROL; } /* Decreasing local window size. Note that we achieve this without noticing to the remote peer. To do this, we cut recv_window_size by -delta. This means that we don't send WINDOW_UPDATE for -delta bytes. */ *local_window_size_ptr += *delta_ptr; *recv_window_size_ptr += *delta_ptr; *recv_reduction_ptr -= *delta_ptr; *delta_ptr = 0; return 0; } int nghttp2_increase_local_window_size(int32_t *local_window_size_ptr, int32_t *recv_window_size_ptr, int32_t *recv_reduction_ptr, int32_t *delta_ptr) { int32_t recv_reduction_delta; int32_t delta; delta = *delta_ptr; assert(delta >= 0); /* The delta size is strictly more than received bytes. Increase local_window_size by that difference |delta|. */ if (*local_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta) { return NGHTTP2_ERR_FLOW_CONTROL; } *local_window_size_ptr += delta; /* If there is recv_reduction due to earlier window_size reduction, we have to adjust it too. */ recv_reduction_delta = nghttp2_min(*recv_reduction_ptr, delta); *recv_reduction_ptr -= recv_reduction_delta; *recv_window_size_ptr += recv_reduction_delta; /* recv_reduction_delta must be paid from *delta_ptr, since it was added in window size reduction (see below). */ *delta_ptr -= recv_reduction_delta; return 0; } int nghttp2_should_send_window_update(int32_t local_window_size, int32_t recv_window_size) { return recv_window_size > 0 && recv_window_size >= local_window_size / 2; } const char *nghttp2_strerror(int error_code) { switch (error_code) { case 0: return "Success"; case NGHTTP2_ERR_INVALID_ARGUMENT: return "Invalid argument"; case NGHTTP2_ERR_BUFFER_ERROR: return "Out of buffer space"; case NGHTTP2_ERR_UNSUPPORTED_VERSION: return "Unsupported SPDY version"; case NGHTTP2_ERR_WOULDBLOCK: return "Operation would block"; case NGHTTP2_ERR_PROTO: return "Protocol error"; case NGHTTP2_ERR_INVALID_FRAME: return "Invalid frame octets"; case NGHTTP2_ERR_EOF: return "EOF"; case NGHTTP2_ERR_DEFERRED: return "Data transfer deferred"; case NGHTTP2_ERR_STREAM_ID_NOT_AVAILABLE: return "No more Stream ID available"; case NGHTTP2_ERR_STREAM_CLOSED: return "Stream was already closed or invalid"; case NGHTTP2_ERR_STREAM_CLOSING: return "Stream is closing"; case NGHTTP2_ERR_STREAM_SHUT_WR: return "The transmission is not allowed for this stream"; case NGHTTP2_ERR_INVALID_STREAM_ID: return "Stream ID is invalid"; case NGHTTP2_ERR_INVALID_STREAM_STATE: return "Invalid stream state"; case NGHTTP2_ERR_DEFERRED_DATA_EXIST: return "Another DATA frame has already been deferred"; case NGHTTP2_ERR_START_STREAM_NOT_ALLOWED: return "request HEADERS is not allowed"; case NGHTTP2_ERR_GOAWAY_ALREADY_SENT: return "GOAWAY has already been sent"; case NGHTTP2_ERR_INVALID_HEADER_BLOCK: return "Invalid header block"; case NGHTTP2_ERR_INVALID_STATE: return "Invalid state"; case NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE: return "The user callback function failed due to the temporal error"; case NGHTTP2_ERR_FRAME_SIZE_ERROR: return "The length of the frame is invalid"; case NGHTTP2_ERR_HEADER_COMP: return "Header compression/decompression error"; case NGHTTP2_ERR_FLOW_CONTROL: return "Flow control error"; case NGHTTP2_ERR_INSUFF_BUFSIZE: return "Insufficient buffer size given to function"; case NGHTTP2_ERR_PAUSE: return "Callback was paused by the application"; case NGHTTP2_ERR_TOO_MANY_INFLIGHT_SETTINGS: return "Too many inflight SETTINGS"; case NGHTTP2_ERR_PUSH_DISABLED: return "Server push is disabled by peer"; case NGHTTP2_ERR_DATA_EXIST: return "DATA or HEADERS frame has already been submitted for the stream"; case NGHTTP2_ERR_SESSION_CLOSING: return "The current session is closing"; case NGHTTP2_ERR_HTTP_HEADER: return "Invalid HTTP header field was received"; case NGHTTP2_ERR_HTTP_MESSAGING: return "Violation in HTTP messaging rule"; case NGHTTP2_ERR_REFUSED_STREAM: return "Stream was refused"; case NGHTTP2_ERR_INTERNAL: return "Internal error"; case NGHTTP2_ERR_CANCEL: return "Cancel"; case NGHTTP2_ERR_SETTINGS_EXPECTED: return "When a local endpoint expects to receive SETTINGS frame, it " "receives an other type of frame"; case NGHTTP2_ERR_NOMEM: return "Out of memory"; case NGHTTP2_ERR_CALLBACK_FAILURE: return "The user callback function failed"; case NGHTTP2_ERR_BAD_CLIENT_MAGIC: return "Received bad client magic byte string"; case NGHTTP2_ERR_FLOODED: return "Flooding was detected in this HTTP/2 session, and it must be " "closed"; case NGHTTP2_ERR_TOO_MANY_SETTINGS: return "SETTINGS frame contained more than the maximum allowed entries"; default: return "Unknown error code"; } } /* Generated by gennmchartbl.py */ static const int VALID_HD_NAME_CHARS[] = { 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, 0 /* RS */, 0 /* US */, 0 /* SPC */, 1 /* ! */, 0 /* " */, 1 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, 0 /* ( */, 0 /* ) */, 1 /* * */, 1 /* + */, 0 /* , */, 1 /* - */, 1 /* . */, 0 /* / */, 1 /* 0 */, 1 /* 1 */, 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 1 /* ^ */, 1 /* _ */, 1 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, 1 /* x */, 1 /* y */, 1 /* z */, 0 /* { */, 1 /* | */, 0 /* } */, 1 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, 0 /* 0xff */ }; int nghttp2_check_header_name(const uint8_t *name, size_t len) { const uint8_t *last; if (len == 0) { return 0; } if (*name == ':') { if (len == 1) { return 0; } ++name; --len; } for (last = name + len; name != last; ++name) { if (!VALID_HD_NAME_CHARS[*name]) { return 0; } } return 1; } /* Generated by genvchartbl.py */ static const int VALID_HD_VALUE_CHARS[] = { 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 1 /* HT */, 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, 0 /* RS */, 0 /* US */, 1 /* SPC */, 1 /* ! */, 1 /* " */, 1 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, 1 /* ( */, 1 /* ) */, 1 /* * */, 1 /* + */, 1 /* , */, 1 /* - */, 1 /* . */, 1 /* / */, 1 /* 0 */, 1 /* 1 */, 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 1 /* : */, 1 /* ; */, 1 /* < */, 1 /* = */, 1 /* > */, 1 /* ? */, 1 /* @ */, 1 /* A */, 1 /* B */, 1 /* C */, 1 /* D */, 1 /* E */, 1 /* F */, 1 /* G */, 1 /* H */, 1 /* I */, 1 /* J */, 1 /* K */, 1 /* L */, 1 /* M */, 1 /* N */, 1 /* O */, 1 /* P */, 1 /* Q */, 1 /* R */, 1 /* S */, 1 /* T */, 1 /* U */, 1 /* V */, 1 /* W */, 1 /* X */, 1 /* Y */, 1 /* Z */, 1 /* [ */, 1 /* \ */, 1 /* ] */, 1 /* ^ */, 1 /* _ */, 1 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, 1 /* x */, 1 /* y */, 1 /* z */, 1 /* { */, 1 /* | */, 1 /* } */, 1 /* ~ */, 0 /* DEL */, 1 /* 0x80 */, 1 /* 0x81 */, 1 /* 0x82 */, 1 /* 0x83 */, 1 /* 0x84 */, 1 /* 0x85 */, 1 /* 0x86 */, 1 /* 0x87 */, 1 /* 0x88 */, 1 /* 0x89 */, 1 /* 0x8a */, 1 /* 0x8b */, 1 /* 0x8c */, 1 /* 0x8d */, 1 /* 0x8e */, 1 /* 0x8f */, 1 /* 0x90 */, 1 /* 0x91 */, 1 /* 0x92 */, 1 /* 0x93 */, 1 /* 0x94 */, 1 /* 0x95 */, 1 /* 0x96 */, 1 /* 0x97 */, 1 /* 0x98 */, 1 /* 0x99 */, 1 /* 0x9a */, 1 /* 0x9b */, 1 /* 0x9c */, 1 /* 0x9d */, 1 /* 0x9e */, 1 /* 0x9f */, 1 /* 0xa0 */, 1 /* 0xa1 */, 1 /* 0xa2 */, 1 /* 0xa3 */, 1 /* 0xa4 */, 1 /* 0xa5 */, 1 /* 0xa6 */, 1 /* 0xa7 */, 1 /* 0xa8 */, 1 /* 0xa9 */, 1 /* 0xaa */, 1 /* 0xab */, 1 /* 0xac */, 1 /* 0xad */, 1 /* 0xae */, 1 /* 0xaf */, 1 /* 0xb0 */, 1 /* 0xb1 */, 1 /* 0xb2 */, 1 /* 0xb3 */, 1 /* 0xb4 */, 1 /* 0xb5 */, 1 /* 0xb6 */, 1 /* 0xb7 */, 1 /* 0xb8 */, 1 /* 0xb9 */, 1 /* 0xba */, 1 /* 0xbb */, 1 /* 0xbc */, 1 /* 0xbd */, 1 /* 0xbe */, 1 /* 0xbf */, 1 /* 0xc0 */, 1 /* 0xc1 */, 1 /* 0xc2 */, 1 /* 0xc3 */, 1 /* 0xc4 */, 1 /* 0xc5 */, 1 /* 0xc6 */, 1 /* 0xc7 */, 1 /* 0xc8 */, 1 /* 0xc9 */, 1 /* 0xca */, 1 /* 0xcb */, 1 /* 0xcc */, 1 /* 0xcd */, 1 /* 0xce */, 1 /* 0xcf */, 1 /* 0xd0 */, 1 /* 0xd1 */, 1 /* 0xd2 */, 1 /* 0xd3 */, 1 /* 0xd4 */, 1 /* 0xd5 */, 1 /* 0xd6 */, 1 /* 0xd7 */, 1 /* 0xd8 */, 1 /* 0xd9 */, 1 /* 0xda */, 1 /* 0xdb */, 1 /* 0xdc */, 1 /* 0xdd */, 1 /* 0xde */, 1 /* 0xdf */, 1 /* 0xe0 */, 1 /* 0xe1 */, 1 /* 0xe2 */, 1 /* 0xe3 */, 1 /* 0xe4 */, 1 /* 0xe5 */, 1 /* 0xe6 */, 1 /* 0xe7 */, 1 /* 0xe8 */, 1 /* 0xe9 */, 1 /* 0xea */, 1 /* 0xeb */, 1 /* 0xec */, 1 /* 0xed */, 1 /* 0xee */, 1 /* 0xef */, 1 /* 0xf0 */, 1 /* 0xf1 */, 1 /* 0xf2 */, 1 /* 0xf3 */, 1 /* 0xf4 */, 1 /* 0xf5 */, 1 /* 0xf6 */, 1 /* 0xf7 */, 1 /* 0xf8 */, 1 /* 0xf9 */, 1 /* 0xfa */, 1 /* 0xfb */, 1 /* 0xfc */, 1 /* 0xfd */, 1 /* 0xfe */, 1 /* 0xff */ }; int nghttp2_check_header_value(const uint8_t *value, size_t len) { const uint8_t *last; for (last = value + len; value != last; ++value) { if (!VALID_HD_VALUE_CHARS[*value]) { return 0; } } return 1; } /* Generated by genauthroitychartbl.py */ static char VALID_AUTHORITY_CHARS[] = { 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, 0 /* RS */, 0 /* US */, 0 /* SPC */, 1 /* ! */, 0 /* " */, 0 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, 1 /* ( */, 1 /* ) */, 1 /* * */, 1 /* + */, 1 /* , */, 1 /* - */, 1 /* . */, 0 /* / */, 1 /* 0 */, 1 /* 1 */, 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 1 /* : */, 1 /* ; */, 0 /* < */, 1 /* = */, 0 /* > */, 0 /* ? */, 1 /* @ */, 1 /* A */, 1 /* B */, 1 /* C */, 1 /* D */, 1 /* E */, 1 /* F */, 1 /* G */, 1 /* H */, 1 /* I */, 1 /* J */, 1 /* K */, 1 /* L */, 1 /* M */, 1 /* N */, 1 /* O */, 1 /* P */, 1 /* Q */, 1 /* R */, 1 /* S */, 1 /* T */, 1 /* U */, 1 /* V */, 1 /* W */, 1 /* X */, 1 /* Y */, 1 /* Z */, 1 /* [ */, 0 /* \ */, 1 /* ] */, 0 /* ^ */, 1 /* _ */, 0 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, 1 /* x */, 1 /* y */, 1 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 1 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, 0 /* 0xff */ }; int nghttp2_check_authority(const uint8_t *value, size_t len) { const uint8_t *last; for (last = value + len; value != last; ++value) { if (!VALID_AUTHORITY_CHARS[*value]) { return 0; } } return 1; } uint8_t *nghttp2_cpymem(uint8_t *dest, const void *src, size_t len) { if (len == 0) { return dest; } memcpy(dest, src, len); return dest + len; } const char *nghttp2_http2_strerror(uint32_t error_code) { switch (error_code) { case NGHTTP2_NO_ERROR: return "NO_ERROR"; case NGHTTP2_PROTOCOL_ERROR: return "PROTOCOL_ERROR"; case NGHTTP2_INTERNAL_ERROR: return "INTERNAL_ERROR"; case NGHTTP2_FLOW_CONTROL_ERROR: return "FLOW_CONTROL_ERROR"; case NGHTTP2_SETTINGS_TIMEOUT: return "SETTINGS_TIMEOUT"; case NGHTTP2_STREAM_CLOSED: return "STREAM_CLOSED"; case NGHTTP2_FRAME_SIZE_ERROR: return "FRAME_SIZE_ERROR"; case NGHTTP2_REFUSED_STREAM: return "REFUSED_STREAM"; case NGHTTP2_CANCEL: return "CANCEL"; case NGHTTP2_COMPRESSION_ERROR: return "COMPRESSION_ERROR"; case NGHTTP2_CONNECT_ERROR: return "CONNECT_ERROR"; case NGHTTP2_ENHANCE_YOUR_CALM: return "ENHANCE_YOUR_CALM"; case NGHTTP2_INADEQUATE_SECURITY: return "INADEQUATE_SECURITY"; case NGHTTP2_HTTP_1_1_REQUIRED: return "HTTP_1_1_REQUIRED"; default: return "unknown"; } }
./CrossVul/dataset_final_sorted/CWE-707/c/good_3936_3
crossvul-cpp_data_bad_3936_3
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_helper.h" #include <assert.h> #include <string.h> #include "nghttp2_net.h" void nghttp2_put_uint16be(uint8_t *buf, uint16_t n) { uint16_t x = htons(n); memcpy(buf, &x, sizeof(uint16_t)); } void nghttp2_put_uint32be(uint8_t *buf, uint32_t n) { uint32_t x = htonl(n); memcpy(buf, &x, sizeof(uint32_t)); } uint16_t nghttp2_get_uint16(const uint8_t *data) { uint16_t n; memcpy(&n, data, sizeof(uint16_t)); return ntohs(n); } uint32_t nghttp2_get_uint32(const uint8_t *data) { uint32_t n; memcpy(&n, data, sizeof(uint32_t)); return ntohl(n); } /* Generated by gendowncasetbl.py */ static const uint8_t DOWNCASE_TBL[] = { 0 /* NUL */, 1 /* SOH */, 2 /* STX */, 3 /* ETX */, 4 /* EOT */, 5 /* ENQ */, 6 /* ACK */, 7 /* BEL */, 8 /* BS */, 9 /* HT */, 10 /* LF */, 11 /* VT */, 12 /* FF */, 13 /* CR */, 14 /* SO */, 15 /* SI */, 16 /* DLE */, 17 /* DC1 */, 18 /* DC2 */, 19 /* DC3 */, 20 /* DC4 */, 21 /* NAK */, 22 /* SYN */, 23 /* ETB */, 24 /* CAN */, 25 /* EM */, 26 /* SUB */, 27 /* ESC */, 28 /* FS */, 29 /* GS */, 30 /* RS */, 31 /* US */, 32 /* SPC */, 33 /* ! */, 34 /* " */, 35 /* # */, 36 /* $ */, 37 /* % */, 38 /* & */, 39 /* ' */, 40 /* ( */, 41 /* ) */, 42 /* * */, 43 /* + */, 44 /* , */, 45 /* - */, 46 /* . */, 47 /* / */, 48 /* 0 */, 49 /* 1 */, 50 /* 2 */, 51 /* 3 */, 52 /* 4 */, 53 /* 5 */, 54 /* 6 */, 55 /* 7 */, 56 /* 8 */, 57 /* 9 */, 58 /* : */, 59 /* ; */, 60 /* < */, 61 /* = */, 62 /* > */, 63 /* ? */, 64 /* @ */, 97 /* A */, 98 /* B */, 99 /* C */, 100 /* D */, 101 /* E */, 102 /* F */, 103 /* G */, 104 /* H */, 105 /* I */, 106 /* J */, 107 /* K */, 108 /* L */, 109 /* M */, 110 /* N */, 111 /* O */, 112 /* P */, 113 /* Q */, 114 /* R */, 115 /* S */, 116 /* T */, 117 /* U */, 118 /* V */, 119 /* W */, 120 /* X */, 121 /* Y */, 122 /* Z */, 91 /* [ */, 92 /* \ */, 93 /* ] */, 94 /* ^ */, 95 /* _ */, 96 /* ` */, 97 /* a */, 98 /* b */, 99 /* c */, 100 /* d */, 101 /* e */, 102 /* f */, 103 /* g */, 104 /* h */, 105 /* i */, 106 /* j */, 107 /* k */, 108 /* l */, 109 /* m */, 110 /* n */, 111 /* o */, 112 /* p */, 113 /* q */, 114 /* r */, 115 /* s */, 116 /* t */, 117 /* u */, 118 /* v */, 119 /* w */, 120 /* x */, 121 /* y */, 122 /* z */, 123 /* { */, 124 /* | */, 125 /* } */, 126 /* ~ */, 127 /* DEL */, 128 /* 0x80 */, 129 /* 0x81 */, 130 /* 0x82 */, 131 /* 0x83 */, 132 /* 0x84 */, 133 /* 0x85 */, 134 /* 0x86 */, 135 /* 0x87 */, 136 /* 0x88 */, 137 /* 0x89 */, 138 /* 0x8a */, 139 /* 0x8b */, 140 /* 0x8c */, 141 /* 0x8d */, 142 /* 0x8e */, 143 /* 0x8f */, 144 /* 0x90 */, 145 /* 0x91 */, 146 /* 0x92 */, 147 /* 0x93 */, 148 /* 0x94 */, 149 /* 0x95 */, 150 /* 0x96 */, 151 /* 0x97 */, 152 /* 0x98 */, 153 /* 0x99 */, 154 /* 0x9a */, 155 /* 0x9b */, 156 /* 0x9c */, 157 /* 0x9d */, 158 /* 0x9e */, 159 /* 0x9f */, 160 /* 0xa0 */, 161 /* 0xa1 */, 162 /* 0xa2 */, 163 /* 0xa3 */, 164 /* 0xa4 */, 165 /* 0xa5 */, 166 /* 0xa6 */, 167 /* 0xa7 */, 168 /* 0xa8 */, 169 /* 0xa9 */, 170 /* 0xaa */, 171 /* 0xab */, 172 /* 0xac */, 173 /* 0xad */, 174 /* 0xae */, 175 /* 0xaf */, 176 /* 0xb0 */, 177 /* 0xb1 */, 178 /* 0xb2 */, 179 /* 0xb3 */, 180 /* 0xb4 */, 181 /* 0xb5 */, 182 /* 0xb6 */, 183 /* 0xb7 */, 184 /* 0xb8 */, 185 /* 0xb9 */, 186 /* 0xba */, 187 /* 0xbb */, 188 /* 0xbc */, 189 /* 0xbd */, 190 /* 0xbe */, 191 /* 0xbf */, 192 /* 0xc0 */, 193 /* 0xc1 */, 194 /* 0xc2 */, 195 /* 0xc3 */, 196 /* 0xc4 */, 197 /* 0xc5 */, 198 /* 0xc6 */, 199 /* 0xc7 */, 200 /* 0xc8 */, 201 /* 0xc9 */, 202 /* 0xca */, 203 /* 0xcb */, 204 /* 0xcc */, 205 /* 0xcd */, 206 /* 0xce */, 207 /* 0xcf */, 208 /* 0xd0 */, 209 /* 0xd1 */, 210 /* 0xd2 */, 211 /* 0xd3 */, 212 /* 0xd4 */, 213 /* 0xd5 */, 214 /* 0xd6 */, 215 /* 0xd7 */, 216 /* 0xd8 */, 217 /* 0xd9 */, 218 /* 0xda */, 219 /* 0xdb */, 220 /* 0xdc */, 221 /* 0xdd */, 222 /* 0xde */, 223 /* 0xdf */, 224 /* 0xe0 */, 225 /* 0xe1 */, 226 /* 0xe2 */, 227 /* 0xe3 */, 228 /* 0xe4 */, 229 /* 0xe5 */, 230 /* 0xe6 */, 231 /* 0xe7 */, 232 /* 0xe8 */, 233 /* 0xe9 */, 234 /* 0xea */, 235 /* 0xeb */, 236 /* 0xec */, 237 /* 0xed */, 238 /* 0xee */, 239 /* 0xef */, 240 /* 0xf0 */, 241 /* 0xf1 */, 242 /* 0xf2 */, 243 /* 0xf3 */, 244 /* 0xf4 */, 245 /* 0xf5 */, 246 /* 0xf6 */, 247 /* 0xf7 */, 248 /* 0xf8 */, 249 /* 0xf9 */, 250 /* 0xfa */, 251 /* 0xfb */, 252 /* 0xfc */, 253 /* 0xfd */, 254 /* 0xfe */, 255 /* 0xff */, }; void nghttp2_downcase(uint8_t *s, size_t len) { size_t i; for (i = 0; i < len; ++i) { s[i] = DOWNCASE_TBL[s[i]]; } } /* * local_window_size * ^ * * | * recv_window_size * | * * ^ * | * * | * 0+++++++++ * | * * \ * | * * | This rage is hidden in flow control. But it must be * v * * / kept in order to restore it when window size is enlarged. * recv_reduction * (+ for negative direction) * * recv_window_size could be negative if we decrease * local_window_size more than recv_window_size: * * local_window_size * ^ * * | * * | * * 0++++++++ * | * ^ recv_window_size (negative) * | * | * v * * * recv_reduction */ int nghttp2_adjust_local_window_size(int32_t *local_window_size_ptr, int32_t *recv_window_size_ptr, int32_t *recv_reduction_ptr, int32_t *delta_ptr) { if (*delta_ptr > 0) { int32_t recv_reduction_delta; int32_t delta; int32_t new_recv_window_size = nghttp2_max(0, *recv_window_size_ptr) - *delta_ptr; if (new_recv_window_size >= 0) { *recv_window_size_ptr = new_recv_window_size; return 0; } delta = -new_recv_window_size; /* The delta size is strictly more than received bytes. Increase local_window_size by that difference |delta|. */ if (*local_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta) { return NGHTTP2_ERR_FLOW_CONTROL; } *local_window_size_ptr += delta; /* If there is recv_reduction due to earlier window_size reduction, we have to adjust it too. */ recv_reduction_delta = nghttp2_min(*recv_reduction_ptr, delta); *recv_reduction_ptr -= recv_reduction_delta; if (*recv_window_size_ptr < 0) { *recv_window_size_ptr += recv_reduction_delta; } else { /* If *recv_window_size_ptr > 0, then those bytes are going to be returned to the remote peer (by WINDOW_UPDATE with the adjusted *delta_ptr), so it is effectively 0 now. We set to *recv_reduction_delta, because caller does not take into account it in *delta_ptr. */ *recv_window_size_ptr = recv_reduction_delta; } /* recv_reduction_delta must be paid from *delta_ptr, since it was added in window size reduction (see below). */ *delta_ptr -= recv_reduction_delta; return 0; } if (*local_window_size_ptr + *delta_ptr < 0 || *recv_window_size_ptr < INT32_MIN - *delta_ptr || *recv_reduction_ptr > INT32_MAX + *delta_ptr) { return NGHTTP2_ERR_FLOW_CONTROL; } /* Decreasing local window size. Note that we achieve this without noticing to the remote peer. To do this, we cut recv_window_size by -delta. This means that we don't send WINDOW_UPDATE for -delta bytes. */ *local_window_size_ptr += *delta_ptr; *recv_window_size_ptr += *delta_ptr; *recv_reduction_ptr -= *delta_ptr; *delta_ptr = 0; return 0; } int nghttp2_increase_local_window_size(int32_t *local_window_size_ptr, int32_t *recv_window_size_ptr, int32_t *recv_reduction_ptr, int32_t *delta_ptr) { int32_t recv_reduction_delta; int32_t delta; delta = *delta_ptr; assert(delta >= 0); /* The delta size is strictly more than received bytes. Increase local_window_size by that difference |delta|. */ if (*local_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta) { return NGHTTP2_ERR_FLOW_CONTROL; } *local_window_size_ptr += delta; /* If there is recv_reduction due to earlier window_size reduction, we have to adjust it too. */ recv_reduction_delta = nghttp2_min(*recv_reduction_ptr, delta); *recv_reduction_ptr -= recv_reduction_delta; *recv_window_size_ptr += recv_reduction_delta; /* recv_reduction_delta must be paid from *delta_ptr, since it was added in window size reduction (see below). */ *delta_ptr -= recv_reduction_delta; return 0; } int nghttp2_should_send_window_update(int32_t local_window_size, int32_t recv_window_size) { return recv_window_size > 0 && recv_window_size >= local_window_size / 2; } const char *nghttp2_strerror(int error_code) { switch (error_code) { case 0: return "Success"; case NGHTTP2_ERR_INVALID_ARGUMENT: return "Invalid argument"; case NGHTTP2_ERR_BUFFER_ERROR: return "Out of buffer space"; case NGHTTP2_ERR_UNSUPPORTED_VERSION: return "Unsupported SPDY version"; case NGHTTP2_ERR_WOULDBLOCK: return "Operation would block"; case NGHTTP2_ERR_PROTO: return "Protocol error"; case NGHTTP2_ERR_INVALID_FRAME: return "Invalid frame octets"; case NGHTTP2_ERR_EOF: return "EOF"; case NGHTTP2_ERR_DEFERRED: return "Data transfer deferred"; case NGHTTP2_ERR_STREAM_ID_NOT_AVAILABLE: return "No more Stream ID available"; case NGHTTP2_ERR_STREAM_CLOSED: return "Stream was already closed or invalid"; case NGHTTP2_ERR_STREAM_CLOSING: return "Stream is closing"; case NGHTTP2_ERR_STREAM_SHUT_WR: return "The transmission is not allowed for this stream"; case NGHTTP2_ERR_INVALID_STREAM_ID: return "Stream ID is invalid"; case NGHTTP2_ERR_INVALID_STREAM_STATE: return "Invalid stream state"; case NGHTTP2_ERR_DEFERRED_DATA_EXIST: return "Another DATA frame has already been deferred"; case NGHTTP2_ERR_START_STREAM_NOT_ALLOWED: return "request HEADERS is not allowed"; case NGHTTP2_ERR_GOAWAY_ALREADY_SENT: return "GOAWAY has already been sent"; case NGHTTP2_ERR_INVALID_HEADER_BLOCK: return "Invalid header block"; case NGHTTP2_ERR_INVALID_STATE: return "Invalid state"; case NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE: return "The user callback function failed due to the temporal error"; case NGHTTP2_ERR_FRAME_SIZE_ERROR: return "The length of the frame is invalid"; case NGHTTP2_ERR_HEADER_COMP: return "Header compression/decompression error"; case NGHTTP2_ERR_FLOW_CONTROL: return "Flow control error"; case NGHTTP2_ERR_INSUFF_BUFSIZE: return "Insufficient buffer size given to function"; case NGHTTP2_ERR_PAUSE: return "Callback was paused by the application"; case NGHTTP2_ERR_TOO_MANY_INFLIGHT_SETTINGS: return "Too many inflight SETTINGS"; case NGHTTP2_ERR_PUSH_DISABLED: return "Server push is disabled by peer"; case NGHTTP2_ERR_DATA_EXIST: return "DATA or HEADERS frame has already been submitted for the stream"; case NGHTTP2_ERR_SESSION_CLOSING: return "The current session is closing"; case NGHTTP2_ERR_HTTP_HEADER: return "Invalid HTTP header field was received"; case NGHTTP2_ERR_HTTP_MESSAGING: return "Violation in HTTP messaging rule"; case NGHTTP2_ERR_REFUSED_STREAM: return "Stream was refused"; case NGHTTP2_ERR_INTERNAL: return "Internal error"; case NGHTTP2_ERR_CANCEL: return "Cancel"; case NGHTTP2_ERR_SETTINGS_EXPECTED: return "When a local endpoint expects to receive SETTINGS frame, it " "receives an other type of frame"; case NGHTTP2_ERR_NOMEM: return "Out of memory"; case NGHTTP2_ERR_CALLBACK_FAILURE: return "The user callback function failed"; case NGHTTP2_ERR_BAD_CLIENT_MAGIC: return "Received bad client magic byte string"; case NGHTTP2_ERR_FLOODED: return "Flooding was detected in this HTTP/2 session, and it must be " "closed"; default: return "Unknown error code"; } } /* Generated by gennmchartbl.py */ static const int VALID_HD_NAME_CHARS[] = { 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, 0 /* RS */, 0 /* US */, 0 /* SPC */, 1 /* ! */, 0 /* " */, 1 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, 0 /* ( */, 0 /* ) */, 1 /* * */, 1 /* + */, 0 /* , */, 1 /* - */, 1 /* . */, 0 /* / */, 1 /* 0 */, 1 /* 1 */, 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 0 /* : */, 0 /* ; */, 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 1 /* ^ */, 1 /* _ */, 1 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, 1 /* x */, 1 /* y */, 1 /* z */, 0 /* { */, 1 /* | */, 0 /* } */, 1 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, 0 /* 0xff */ }; int nghttp2_check_header_name(const uint8_t *name, size_t len) { const uint8_t *last; if (len == 0) { return 0; } if (*name == ':') { if (len == 1) { return 0; } ++name; --len; } for (last = name + len; name != last; ++name) { if (!VALID_HD_NAME_CHARS[*name]) { return 0; } } return 1; } /* Generated by genvchartbl.py */ static const int VALID_HD_VALUE_CHARS[] = { 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 1 /* HT */, 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, 0 /* RS */, 0 /* US */, 1 /* SPC */, 1 /* ! */, 1 /* " */, 1 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, 1 /* ( */, 1 /* ) */, 1 /* * */, 1 /* + */, 1 /* , */, 1 /* - */, 1 /* . */, 1 /* / */, 1 /* 0 */, 1 /* 1 */, 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 1 /* : */, 1 /* ; */, 1 /* < */, 1 /* = */, 1 /* > */, 1 /* ? */, 1 /* @ */, 1 /* A */, 1 /* B */, 1 /* C */, 1 /* D */, 1 /* E */, 1 /* F */, 1 /* G */, 1 /* H */, 1 /* I */, 1 /* J */, 1 /* K */, 1 /* L */, 1 /* M */, 1 /* N */, 1 /* O */, 1 /* P */, 1 /* Q */, 1 /* R */, 1 /* S */, 1 /* T */, 1 /* U */, 1 /* V */, 1 /* W */, 1 /* X */, 1 /* Y */, 1 /* Z */, 1 /* [ */, 1 /* \ */, 1 /* ] */, 1 /* ^ */, 1 /* _ */, 1 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, 1 /* x */, 1 /* y */, 1 /* z */, 1 /* { */, 1 /* | */, 1 /* } */, 1 /* ~ */, 0 /* DEL */, 1 /* 0x80 */, 1 /* 0x81 */, 1 /* 0x82 */, 1 /* 0x83 */, 1 /* 0x84 */, 1 /* 0x85 */, 1 /* 0x86 */, 1 /* 0x87 */, 1 /* 0x88 */, 1 /* 0x89 */, 1 /* 0x8a */, 1 /* 0x8b */, 1 /* 0x8c */, 1 /* 0x8d */, 1 /* 0x8e */, 1 /* 0x8f */, 1 /* 0x90 */, 1 /* 0x91 */, 1 /* 0x92 */, 1 /* 0x93 */, 1 /* 0x94 */, 1 /* 0x95 */, 1 /* 0x96 */, 1 /* 0x97 */, 1 /* 0x98 */, 1 /* 0x99 */, 1 /* 0x9a */, 1 /* 0x9b */, 1 /* 0x9c */, 1 /* 0x9d */, 1 /* 0x9e */, 1 /* 0x9f */, 1 /* 0xa0 */, 1 /* 0xa1 */, 1 /* 0xa2 */, 1 /* 0xa3 */, 1 /* 0xa4 */, 1 /* 0xa5 */, 1 /* 0xa6 */, 1 /* 0xa7 */, 1 /* 0xa8 */, 1 /* 0xa9 */, 1 /* 0xaa */, 1 /* 0xab */, 1 /* 0xac */, 1 /* 0xad */, 1 /* 0xae */, 1 /* 0xaf */, 1 /* 0xb0 */, 1 /* 0xb1 */, 1 /* 0xb2 */, 1 /* 0xb3 */, 1 /* 0xb4 */, 1 /* 0xb5 */, 1 /* 0xb6 */, 1 /* 0xb7 */, 1 /* 0xb8 */, 1 /* 0xb9 */, 1 /* 0xba */, 1 /* 0xbb */, 1 /* 0xbc */, 1 /* 0xbd */, 1 /* 0xbe */, 1 /* 0xbf */, 1 /* 0xc0 */, 1 /* 0xc1 */, 1 /* 0xc2 */, 1 /* 0xc3 */, 1 /* 0xc4 */, 1 /* 0xc5 */, 1 /* 0xc6 */, 1 /* 0xc7 */, 1 /* 0xc8 */, 1 /* 0xc9 */, 1 /* 0xca */, 1 /* 0xcb */, 1 /* 0xcc */, 1 /* 0xcd */, 1 /* 0xce */, 1 /* 0xcf */, 1 /* 0xd0 */, 1 /* 0xd1 */, 1 /* 0xd2 */, 1 /* 0xd3 */, 1 /* 0xd4 */, 1 /* 0xd5 */, 1 /* 0xd6 */, 1 /* 0xd7 */, 1 /* 0xd8 */, 1 /* 0xd9 */, 1 /* 0xda */, 1 /* 0xdb */, 1 /* 0xdc */, 1 /* 0xdd */, 1 /* 0xde */, 1 /* 0xdf */, 1 /* 0xe0 */, 1 /* 0xe1 */, 1 /* 0xe2 */, 1 /* 0xe3 */, 1 /* 0xe4 */, 1 /* 0xe5 */, 1 /* 0xe6 */, 1 /* 0xe7 */, 1 /* 0xe8 */, 1 /* 0xe9 */, 1 /* 0xea */, 1 /* 0xeb */, 1 /* 0xec */, 1 /* 0xed */, 1 /* 0xee */, 1 /* 0xef */, 1 /* 0xf0 */, 1 /* 0xf1 */, 1 /* 0xf2 */, 1 /* 0xf3 */, 1 /* 0xf4 */, 1 /* 0xf5 */, 1 /* 0xf6 */, 1 /* 0xf7 */, 1 /* 0xf8 */, 1 /* 0xf9 */, 1 /* 0xfa */, 1 /* 0xfb */, 1 /* 0xfc */, 1 /* 0xfd */, 1 /* 0xfe */, 1 /* 0xff */ }; int nghttp2_check_header_value(const uint8_t *value, size_t len) { const uint8_t *last; for (last = value + len; value != last; ++value) { if (!VALID_HD_VALUE_CHARS[*value]) { return 0; } } return 1; } /* Generated by genauthroitychartbl.py */ static char VALID_AUTHORITY_CHARS[] = { 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, 0 /* RS */, 0 /* US */, 0 /* SPC */, 1 /* ! */, 0 /* " */, 0 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, 1 /* ( */, 1 /* ) */, 1 /* * */, 1 /* + */, 1 /* , */, 1 /* - */, 1 /* . */, 0 /* / */, 1 /* 0 */, 1 /* 1 */, 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 1 /* : */, 1 /* ; */, 0 /* < */, 1 /* = */, 0 /* > */, 0 /* ? */, 1 /* @ */, 1 /* A */, 1 /* B */, 1 /* C */, 1 /* D */, 1 /* E */, 1 /* F */, 1 /* G */, 1 /* H */, 1 /* I */, 1 /* J */, 1 /* K */, 1 /* L */, 1 /* M */, 1 /* N */, 1 /* O */, 1 /* P */, 1 /* Q */, 1 /* R */, 1 /* S */, 1 /* T */, 1 /* U */, 1 /* V */, 1 /* W */, 1 /* X */, 1 /* Y */, 1 /* Z */, 1 /* [ */, 0 /* \ */, 1 /* ] */, 0 /* ^ */, 1 /* _ */, 0 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, 1 /* x */, 1 /* y */, 1 /* z */, 0 /* { */, 0 /* | */, 0 /* } */, 1 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, 0 /* 0xff */ }; int nghttp2_check_authority(const uint8_t *value, size_t len) { const uint8_t *last; for (last = value + len; value != last; ++value) { if (!VALID_AUTHORITY_CHARS[*value]) { return 0; } } return 1; } uint8_t *nghttp2_cpymem(uint8_t *dest, const void *src, size_t len) { if (len == 0) { return dest; } memcpy(dest, src, len); return dest + len; } const char *nghttp2_http2_strerror(uint32_t error_code) { switch (error_code) { case NGHTTP2_NO_ERROR: return "NO_ERROR"; case NGHTTP2_PROTOCOL_ERROR: return "PROTOCOL_ERROR"; case NGHTTP2_INTERNAL_ERROR: return "INTERNAL_ERROR"; case NGHTTP2_FLOW_CONTROL_ERROR: return "FLOW_CONTROL_ERROR"; case NGHTTP2_SETTINGS_TIMEOUT: return "SETTINGS_TIMEOUT"; case NGHTTP2_STREAM_CLOSED: return "STREAM_CLOSED"; case NGHTTP2_FRAME_SIZE_ERROR: return "FRAME_SIZE_ERROR"; case NGHTTP2_REFUSED_STREAM: return "REFUSED_STREAM"; case NGHTTP2_CANCEL: return "CANCEL"; case NGHTTP2_COMPRESSION_ERROR: return "COMPRESSION_ERROR"; case NGHTTP2_CONNECT_ERROR: return "CONNECT_ERROR"; case NGHTTP2_ENHANCE_YOUR_CALM: return "ENHANCE_YOUR_CALM"; case NGHTTP2_INADEQUATE_SECURITY: return "INADEQUATE_SECURITY"; case NGHTTP2_HTTP_1_1_REQUIRED: return "HTTP_1_1_REQUIRED"; default: return "unknown"; } }
./CrossVul/dataset_final_sorted/CWE-707/c/bad_3936_3
crossvul-cpp_data_good_3936_6
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_session.h" #include <string.h> #include <stddef.h> #include <stdio.h> #include <assert.h> #include <stdarg.h> #include "nghttp2_helper.h" #include "nghttp2_net.h" #include "nghttp2_priority_spec.h" #include "nghttp2_option.h" #include "nghttp2_http.h" #include "nghttp2_pq.h" #include "nghttp2_debug.h" /* * Returns non-zero if the number of outgoing opened streams is larger * than or equal to * remote_settings.max_concurrent_streams. */ static int session_is_outgoing_concurrent_streams_max(nghttp2_session *session) { return session->remote_settings.max_concurrent_streams <= session->num_outgoing_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * local_settings.max_concurrent_streams. */ static int session_is_incoming_concurrent_streams_max(nghttp2_session *session) { return session->local_settings.max_concurrent_streams <= session->num_incoming_streams; } /* * Returns non-zero if the number of incoming opened streams is larger * than or equal to * session->pending_local_max_concurrent_stream. */ static int session_is_incoming_concurrent_streams_pending_max(nghttp2_session *session) { return session->pending_local_max_concurrent_stream <= session->num_incoming_streams; } /* * Returns non-zero if |lib_error| is non-fatal error. */ static int is_non_fatal(int lib_error_code) { return lib_error_code < 0 && lib_error_code > NGHTTP2_ERR_FATAL; } int nghttp2_is_fatal(int lib_error_code) { return lib_error_code < NGHTTP2_ERR_FATAL; } static int session_enforce_http_messaging(nghttp2_session *session) { return (session->opt_flags & NGHTTP2_OPTMASK_NO_HTTP_MESSAGING) == 0; } /* * Returns nonzero if |frame| is trailer headers. */ static int session_trailer_headers(nghttp2_session *session, nghttp2_stream *stream, nghttp2_frame *frame) { if (!stream || frame->hd.type != NGHTTP2_HEADERS) { return 0; } if (session->server) { return frame->headers.cat == NGHTTP2_HCAT_HEADERS; } return frame->headers.cat == NGHTTP2_HCAT_HEADERS && (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) == 0; } /* Returns nonzero if the |stream| is in reserved(remote) state */ static int state_reserved_remote(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && !nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* Returns nonzero if the |stream| is in reserved(local) state */ static int state_reserved_local(nghttp2_session *session, nghttp2_stream *stream) { return stream->state == NGHTTP2_STREAM_RESERVED && nghttp2_session_is_my_stream_id(session, stream->stream_id); } /* * Checks whether received stream_id is valid. This function returns * 1 if it succeeds, or 0. */ static int session_is_new_peer_stream_id(nghttp2_session *session, int32_t stream_id) { return stream_id != 0 && !nghttp2_session_is_my_stream_id(session, stream_id) && session->last_recv_stream_id < stream_id; } static int session_detect_idle_stream(nghttp2_session *session, int32_t stream_id) { /* Assume that stream object with stream_id does not exist */ if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (session->last_sent_stream_id < stream_id) { return 1; } return 0; } if (session_is_new_peer_stream_id(session, stream_id)) { return 1; } return 0; } static int check_ext_type_set(const uint8_t *ext_types, uint8_t type) { return (ext_types[type / 8] & (1 << (type & 0x7))) > 0; } static int session_call_error_callback(nghttp2_session *session, int lib_error_code, const char *fmt, ...) { size_t bufsize; va_list ap; char *buf; int rv; nghttp2_mem *mem; if (!session->callbacks.error_callback && !session->callbacks.error_callback2) { return 0; } mem = &session->mem; va_start(ap, fmt); rv = vsnprintf(NULL, 0, fmt, ap); va_end(ap); if (rv < 0) { return NGHTTP2_ERR_NOMEM; } bufsize = (size_t)(rv + 1); buf = nghttp2_mem_malloc(mem, bufsize); if (buf == NULL) { return NGHTTP2_ERR_NOMEM; } va_start(ap, fmt); rv = vsnprintf(buf, bufsize, fmt, ap); va_end(ap); if (rv < 0) { nghttp2_mem_free(mem, buf); /* vsnprintf may return error because of various things we can imagine, but typically we don't want to drop session just for debug callback. */ DEBUGF("error_callback: vsnprintf failed. The template was %s\n", fmt); return 0; } if (session->callbacks.error_callback2) { rv = session->callbacks.error_callback2(session, lib_error_code, buf, (size_t)rv, session->user_data); } else { rv = session->callbacks.error_callback(session, buf, (size_t)rv, session->user_data); } nghttp2_mem_free(mem, buf); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_terminate_session(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const char *reason) { int rv; const uint8_t *debug_data; size_t debug_datalen; if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return 0; } /* Ignore all incoming frames because we are going to tear down the session. */ session->iframe.state = NGHTTP2_IB_IGN_ALL; if (reason == NULL) { debug_data = NULL; debug_datalen = 0; } else { debug_data = (const uint8_t *)reason; debug_datalen = strlen(reason); } rv = nghttp2_session_add_goaway(session, last_stream_id, error_code, debug_data, debug_datalen, NGHTTP2_GOAWAY_AUX_TERM_ON_SEND); if (rv != 0) { return rv; } session->goaway_flags |= NGHTTP2_GOAWAY_TERM_ON_SEND; return 0; } int nghttp2_session_terminate_session(nghttp2_session *session, uint32_t error_code) { return session_terminate_session(session, session->last_proc_stream_id, error_code, NULL); } int nghttp2_session_terminate_session2(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code) { return session_terminate_session(session, last_stream_id, error_code, NULL); } int nghttp2_session_terminate_session_with_reason(nghttp2_session *session, uint32_t error_code, const char *reason) { return session_terminate_session(session, session->last_proc_stream_id, error_code, reason); } int nghttp2_session_is_my_stream_id(nghttp2_session *session, int32_t stream_id) { int rem; if (stream_id == 0) { return 0; } rem = stream_id & 0x1; if (session->server) { return rem == 0; } return rem == 1; } nghttp2_stream *nghttp2_session_get_stream(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); if (stream == NULL || (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) || stream->state == NGHTTP2_STREAM_IDLE) { return NULL; } return stream; } nghttp2_stream *nghttp2_session_get_stream_raw(nghttp2_session *session, int32_t stream_id) { return (nghttp2_stream *)nghttp2_map_find(&session->streams, stream_id); } static void session_inbound_frame_reset(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_mem *mem = &session->mem; /* A bit risky code, since if this function is called from nghttp2_session_new(), we rely on the fact that iframe->frame.hd.type is 0, so that no free is performed. */ switch (iframe->frame.hd.type) { case NGHTTP2_DATA: break; case NGHTTP2_HEADERS: nghttp2_frame_headers_free(&iframe->frame.headers, mem); break; case NGHTTP2_PRIORITY: nghttp2_frame_priority_free(&iframe->frame.priority); break; case NGHTTP2_RST_STREAM: nghttp2_frame_rst_stream_free(&iframe->frame.rst_stream); break; case NGHTTP2_SETTINGS: nghttp2_frame_settings_free(&iframe->frame.settings, mem); nghttp2_mem_free(mem, iframe->iv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; break; case NGHTTP2_PUSH_PROMISE: nghttp2_frame_push_promise_free(&iframe->frame.push_promise, mem); break; case NGHTTP2_PING: nghttp2_frame_ping_free(&iframe->frame.ping); break; case NGHTTP2_GOAWAY: nghttp2_frame_goaway_free(&iframe->frame.goaway, mem); break; case NGHTTP2_WINDOW_UPDATE: nghttp2_frame_window_update_free(&iframe->frame.window_update); break; default: /* extension frame */ if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { nghttp2_frame_extension_free(&iframe->frame.ext); } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { break; } nghttp2_frame_altsvc_free(&iframe->frame.ext, mem); break; case NGHTTP2_ORIGIN: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN) == 0) { break; } nghttp2_frame_origin_free(&iframe->frame.ext, mem); break; } } break; } memset(&iframe->frame, 0, sizeof(nghttp2_frame)); memset(&iframe->ext_frame_payload, 0, sizeof(nghttp2_ext_frame_payload)); iframe->state = NGHTTP2_IB_READ_HEAD; nghttp2_buf_wrap_init(&iframe->sbuf, iframe->raw_sbuf, sizeof(iframe->raw_sbuf)); iframe->sbuf.mark += NGHTTP2_FRAME_HDLEN; nghttp2_buf_free(&iframe->lbuf, mem); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); iframe->raw_lbuf = NULL; iframe->payloadleft = 0; iframe->padlen = 0; } static void init_settings(nghttp2_settings_storage *settings) { settings->header_table_size = NGHTTP2_HD_DEFAULT_MAX_BUFFER_SIZE; settings->enable_push = 1; settings->max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; settings->initial_window_size = NGHTTP2_INITIAL_WINDOW_SIZE; settings->max_frame_size = NGHTTP2_MAX_FRAME_SIZE_MIN; settings->max_header_list_size = UINT32_MAX; } static void active_outbound_item_reset(nghttp2_active_outbound_item *aob, nghttp2_mem *mem) { DEBUGF("send: reset nghttp2_active_outbound_item\n"); DEBUGF("send: aob->item = %p\n", aob->item); nghttp2_outbound_item_free(aob->item, mem); nghttp2_mem_free(mem, aob->item); aob->item = NULL; nghttp2_bufs_reset(&aob->framebufs); aob->state = NGHTTP2_OB_POP_ITEM; } int nghttp2_enable_strict_preface = 1; static int session_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, int server, const nghttp2_option *option, nghttp2_mem *mem) { int rv; size_t nbuffer; size_t max_deflate_dynamic_table_size = NGHTTP2_HD_DEFAULT_MAX_DEFLATE_BUFFER_SIZE; if (mem == NULL) { mem = nghttp2_mem_default(); } *session_ptr = nghttp2_mem_calloc(mem, 1, sizeof(nghttp2_session)); if (*session_ptr == NULL) { rv = NGHTTP2_ERR_NOMEM; goto fail_session; } (*session_ptr)->mem = *mem; mem = &(*session_ptr)->mem; /* next_stream_id is initialized in either nghttp2_session_client_new2 or nghttp2_session_server_new2 */ nghttp2_stream_init(&(*session_ptr)->root, 0, NGHTTP2_STREAM_FLAG_NONE, NGHTTP2_STREAM_IDLE, NGHTTP2_DEFAULT_WEIGHT, 0, 0, NULL, mem); (*session_ptr)->remote_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->recv_window_size = 0; (*session_ptr)->consumed_size = 0; (*session_ptr)->recv_reduction = 0; (*session_ptr)->local_window_size = NGHTTP2_INITIAL_CONNECTION_WINDOW_SIZE; (*session_ptr)->goaway_flags = NGHTTP2_GOAWAY_NONE; (*session_ptr)->local_last_stream_id = (1u << 31) - 1; (*session_ptr)->remote_last_stream_id = (1u << 31) - 1; (*session_ptr)->pending_local_max_concurrent_stream = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; (*session_ptr)->pending_enable_push = 1; if (server) { (*session_ptr)->server = 1; } init_settings(&(*session_ptr)->remote_settings); init_settings(&(*session_ptr)->local_settings); (*session_ptr)->max_incoming_reserved_streams = NGHTTP2_MAX_INCOMING_RESERVED_STREAMS; /* Limit max outgoing concurrent streams to sensible value */ (*session_ptr)->remote_settings.max_concurrent_streams = 100; (*session_ptr)->max_send_header_block_length = NGHTTP2_MAX_HEADERSLEN; (*session_ptr)->max_outbound_ack = NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM; (*session_ptr)->max_settings = NGHTTP2_DEFAULT_MAX_SETTINGS; if (option) { if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE) && option->no_auto_window_update) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE; } if (option->opt_set_mask & NGHTTP2_OPT_PEER_MAX_CONCURRENT_STREAMS) { (*session_ptr)->remote_settings.max_concurrent_streams = option->peer_max_concurrent_streams; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_RESERVED_REMOTE_STREAMS) { (*session_ptr)->max_incoming_reserved_streams = option->max_reserved_remote_streams; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_RECV_CLIENT_MAGIC) && option->no_recv_client_magic) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_HTTP_MESSAGING) && option->no_http_messaging) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_HTTP_MESSAGING; } if (option->opt_set_mask & NGHTTP2_OPT_USER_RECV_EXT_TYPES) { memcpy((*session_ptr)->user_recv_ext_types, option->user_recv_ext_types, sizeof((*session_ptr)->user_recv_ext_types)); } if (option->opt_set_mask & NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES) { (*session_ptr)->builtin_recv_ext_types = option->builtin_recv_ext_types; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_PING_ACK) && option->no_auto_ping_ack) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_AUTO_PING_ACK; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_SEND_HEADER_BLOCK_LENGTH) { (*session_ptr)->max_send_header_block_length = option->max_send_header_block_length; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_DEFLATE_DYNAMIC_TABLE_SIZE) { max_deflate_dynamic_table_size = option->max_deflate_dynamic_table_size; } if ((option->opt_set_mask & NGHTTP2_OPT_NO_CLOSED_STREAMS) && option->no_closed_streams) { (*session_ptr)->opt_flags |= NGHTTP2_OPTMASK_NO_CLOSED_STREAMS; } if (option->opt_set_mask & NGHTTP2_OPT_MAX_OUTBOUND_ACK) { (*session_ptr)->max_outbound_ack = option->max_outbound_ack; } if ((option->opt_set_mask & NGHTTP2_OPT_MAX_SETTINGS) && option->max_settings) { (*session_ptr)->max_settings = option->max_settings; } } rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater, max_deflate_dynamic_table_size, mem); if (rv != 0) { goto fail_hd_deflater; } rv = nghttp2_hd_inflate_init(&(*session_ptr)->hd_inflater, mem); if (rv != 0) { goto fail_hd_inflater; } rv = nghttp2_map_init(&(*session_ptr)->streams, mem); if (rv != 0) { goto fail_map; } nbuffer = ((*session_ptr)->max_send_header_block_length + NGHTTP2_FRAMEBUF_CHUNKLEN - 1) / NGHTTP2_FRAMEBUF_CHUNKLEN; if (nbuffer == 0) { nbuffer = 1; } /* 1 for Pad Field. */ rv = nghttp2_bufs_init3(&(*session_ptr)->aob.framebufs, NGHTTP2_FRAMEBUF_CHUNKLEN, nbuffer, 1, NGHTTP2_FRAME_HDLEN + 1, mem); if (rv != 0) { goto fail_aob_framebuf; } active_outbound_item_reset(&(*session_ptr)->aob, mem); (*session_ptr)->callbacks = *callbacks; (*session_ptr)->user_data = user_data; session_inbound_frame_reset(*session_ptr); if (nghttp2_enable_strict_preface) { nghttp2_inbound_frame *iframe = &(*session_ptr)->iframe; if (server && ((*session_ptr)->opt_flags & NGHTTP2_OPTMASK_NO_RECV_CLIENT_MAGIC) == 0) { iframe->state = NGHTTP2_IB_READ_CLIENT_MAGIC; iframe->payloadleft = NGHTTP2_CLIENT_MAGIC_LEN; } else { iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } if (!server) { (*session_ptr)->aob.state = NGHTTP2_OB_SEND_CLIENT_MAGIC; nghttp2_bufs_add(&(*session_ptr)->aob.framebufs, NGHTTP2_CLIENT_MAGIC, NGHTTP2_CLIENT_MAGIC_LEN); } } return 0; fail_aob_framebuf: nghttp2_map_free(&(*session_ptr)->streams); fail_map: nghttp2_hd_inflate_free(&(*session_ptr)->hd_inflater); fail_hd_inflater: nghttp2_hd_deflate_free(&(*session_ptr)->hd_deflater); fail_hd_deflater: nghttp2_mem_free(mem, *session_ptr); fail_session: return rv; } int nghttp2_session_client_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_client_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_client_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_client_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 0, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 1; *session_ptr = session; return 0; } int nghttp2_session_server_new(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, NULL, NULL); } int nghttp2_session_server_new2(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option) { return nghttp2_session_server_new3(session_ptr, callbacks, user_data, option, NULL); } int nghttp2_session_server_new3(nghttp2_session **session_ptr, const nghttp2_session_callbacks *callbacks, void *user_data, const nghttp2_option *option, nghttp2_mem *mem) { int rv; nghttp2_session *session; rv = session_new(&session, callbacks, user_data, 1, option, mem); if (rv != 0) { return rv; } /* IDs for use in client */ session->next_stream_id = 2; *session_ptr = session; return 0; } static int free_streams(nghttp2_map_entry *entry, void *ptr) { nghttp2_session *session; nghttp2_stream *stream; nghttp2_outbound_item *item; nghttp2_mem *mem; session = (nghttp2_session *)ptr; mem = &session->mem; stream = (nghttp2_stream *)entry; item = stream->item; if (item && !item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } static void ob_q_free(nghttp2_outbound_queue *q, nghttp2_mem *mem) { nghttp2_outbound_item *item, *next; for (item = q->head; item;) { next = item->qnext; nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); item = next; } } static int inflight_settings_new(nghttp2_inflight_settings **settings_ptr, const nghttp2_settings_entry *iv, size_t niv, nghttp2_mem *mem) { *settings_ptr = nghttp2_mem_malloc(mem, sizeof(nghttp2_inflight_settings)); if (!*settings_ptr) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { (*settings_ptr)->iv = nghttp2_frame_iv_copy(iv, niv, mem); if (!(*settings_ptr)->iv) { nghttp2_mem_free(mem, *settings_ptr); return NGHTTP2_ERR_NOMEM; } } else { (*settings_ptr)->iv = NULL; } (*settings_ptr)->niv = niv; (*settings_ptr)->next = NULL; return 0; } static void inflight_settings_del(nghttp2_inflight_settings *settings, nghttp2_mem *mem) { if (!settings) { return; } nghttp2_mem_free(mem, settings->iv); nghttp2_mem_free(mem, settings); } void nghttp2_session_del(nghttp2_session *session) { nghttp2_mem *mem; nghttp2_inflight_settings *settings; if (session == NULL) { return; } mem = &session->mem; for (settings = session->inflight_settings_head; settings;) { nghttp2_inflight_settings *next = settings->next; inflight_settings_del(settings, mem); settings = next; } nghttp2_stream_free(&session->root); /* Have to free streams first, so that we can check stream->item->queued */ nghttp2_map_each_free(&session->streams, free_streams, session); nghttp2_map_free(&session->streams); ob_q_free(&session->ob_urgent, mem); ob_q_free(&session->ob_reg, mem); ob_q_free(&session->ob_syn, mem); active_outbound_item_reset(&session->aob, mem); session_inbound_frame_reset(session); nghttp2_hd_deflate_free(&session->hd_deflater); nghttp2_hd_inflate_free(&session->hd_inflater); nghttp2_bufs_free(&session->aob.framebufs); nghttp2_mem_free(mem, session); } int nghttp2_session_reprioritize_stream( nghttp2_session *session, nghttp2_stream *stream, const nghttp2_priority_spec *pri_spec_in) { int rv; nghttp2_stream *dep_stream = NULL; nghttp2_priority_spec pri_spec_default; const nghttp2_priority_spec *pri_spec = pri_spec_in; assert(pri_spec->stream_id != stream->stream_id); if (!nghttp2_stream_in_dep_tree(stream)) { return 0; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { return NGHTTP2_ERR_NOMEM; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } else if (nghttp2_stream_dep_find_ancestor(dep_stream, stream)) { DEBUGF("stream: cycle detected, dep_stream(%p)=%d stream(%p)=%d\n", dep_stream, dep_stream->stream_id, stream, stream->stream_id); nghttp2_stream_dep_remove_subtree(dep_stream); rv = nghttp2_stream_dep_add_subtree(stream->dep_prev, dep_stream); if (rv != 0) { return rv; } } assert(dep_stream); if (dep_stream == stream->dep_prev && !pri_spec->exclusive) { /* This is minor optimization when just weight is changed. */ nghttp2_stream_change_weight(stream, pri_spec->weight); return 0; } nghttp2_stream_dep_remove_subtree(stream); /* We have to update weight after removing stream from tree */ stream->weight = pri_spec->weight; if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert_subtree(dep_stream, stream); } else { rv = nghttp2_stream_dep_add_subtree(dep_stream, stream); } if (rv != 0) { return rv; } return 0; } int nghttp2_session_add_item(nghttp2_session *session, nghttp2_outbound_item *item) { /* TODO Return error if stream is not found for the frame requiring stream presence. */ int rv = 0; nghttp2_stream *stream; nghttp2_frame *frame; frame = &item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); switch (frame->hd.type) { case NGHTTP2_DATA: if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->item) { return NGHTTP2_ERR_DATA_EXIST; } rv = nghttp2_stream_attach_item(stream, item); if (rv != 0) { return rv; } return 0; case NGHTTP2_HEADERS: /* We push request HEADERS and push response HEADERS to dedicated queue because their transmission is affected by SETTINGS_MAX_CONCURRENT_STREAMS */ /* TODO If 2 HEADERS are submitted for reserved stream, then both of them are queued into ob_syn, which is not desirable. */ if (frame->headers.cat == NGHTTP2_HCAT_REQUEST || (stream && stream->state == NGHTTP2_STREAM_RESERVED)) { nghttp2_outbound_queue_push(&session->ob_syn, item); item->queued = 1; return 0; ; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_SETTINGS: case NGHTTP2_PING: nghttp2_outbound_queue_push(&session->ob_urgent, item); item->queued = 1; return 0; case NGHTTP2_RST_STREAM: if (stream) { stream->state = NGHTTP2_STREAM_CLOSING; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; case NGHTTP2_PUSH_PROMISE: { nghttp2_headers_aux_data *aux_data; nghttp2_priority_spec pri_spec; aux_data = &item->aux_data.headers; if (!stream) { return NGHTTP2_ERR_STREAM_CLOSED; } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); if (!nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, aux_data->stream_user_data)) { return NGHTTP2_ERR_NOMEM; } /* We don't have to call nghttp2_session_adjust_closed_stream() here, since stream->stream_id is local stream_id, and it does not affect closed stream count. */ nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } case NGHTTP2_WINDOW_UPDATE: if (stream) { stream->window_update_queued = 1; } else if (frame->hd.stream_id == 0) { session->window_update_queued = 1; } nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; default: nghttp2_outbound_queue_push(&session->ob_reg, item); item->queued = 1; return 0; } } int nghttp2_session_add_rst_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_stream *stream; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (stream && stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } /* Cancel pending request HEADERS in ob_syn if this RST_STREAM refers to that stream. */ if (!session->server && nghttp2_session_is_my_stream_id(session, stream_id) && nghttp2_outbound_queue_top(&session->ob_syn)) { nghttp2_headers_aux_data *aux_data; nghttp2_frame *headers_frame; headers_frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(headers_frame->hd.type == NGHTTP2_HEADERS); if (headers_frame->hd.stream_id <= stream_id && (uint32_t)stream_id < session->next_stream_id) { for (item = session->ob_syn.head; item; item = item->qnext) { aux_data = &item->aux_data.headers; if (item->frame.hd.stream_id < stream_id) { continue; } /* stream_id in ob_syn queue must be strictly increasing. If we found larger ID, then we can break here. */ if (item->frame.hd.stream_id > stream_id || aux_data->canceled) { break; } aux_data->error_code = error_code; aux_data->canceled = 1; return 0; } } } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_rst_stream_init(&frame->rst_stream, stream_id, error_code); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_rst_stream_free(&frame->rst_stream); nghttp2_mem_free(mem, item); return rv; } return 0; } nghttp2_stream *nghttp2_session_open_stream(nghttp2_session *session, int32_t stream_id, uint8_t flags, nghttp2_priority_spec *pri_spec_in, nghttp2_stream_state initial_state, void *stream_user_data) { int rv; nghttp2_stream *stream; nghttp2_stream *dep_stream = NULL; int stream_alloc = 0; nghttp2_priority_spec pri_spec_default; nghttp2_priority_spec *pri_spec = pri_spec_in; nghttp2_mem *mem; mem = &session->mem; stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { assert(stream->state == NGHTTP2_STREAM_IDLE); assert(nghttp2_stream_in_dep_tree(stream)); nghttp2_session_detach_idle_stream(session, stream); rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return NULL; } } else { stream = nghttp2_mem_malloc(mem, sizeof(nghttp2_stream)); if (stream == NULL) { return NULL; } stream_alloc = 1; } if (pri_spec->stream_id != 0) { dep_stream = nghttp2_session_get_stream_raw(session, pri_spec->stream_id); if (!dep_stream && session_detect_idle_stream(session, pri_spec->stream_id)) { /* Depends on idle stream, which does not exist in memory. Assign default priority for it. */ nghttp2_priority_spec_default_init(&pri_spec_default); dep_stream = nghttp2_session_open_stream( session, pri_spec->stream_id, NGHTTP2_FLAG_NONE, &pri_spec_default, NGHTTP2_STREAM_IDLE, NULL); if (dep_stream == NULL) { if (stream_alloc) { nghttp2_mem_free(mem, stream); } return NULL; } } else if (!dep_stream || !nghttp2_stream_in_dep_tree(dep_stream)) { /* If dep_stream is not part of dependency tree, stream will get default priority. This handles the case when pri_spec->stream_id == stream_id. This happens because we don't check pri_spec->stream_id against new stream ID in nghttp2_submit_request. This also handles the case when idle stream created by PRIORITY frame was opened. Somehow we first remove the idle stream from dependency tree. This is done to simplify code base, but ideally we should retain old dependency. But I'm not sure this adds values. */ nghttp2_priority_spec_default_init(&pri_spec_default); pri_spec = &pri_spec_default; } } if (initial_state == NGHTTP2_STREAM_RESERVED) { flags |= NGHTTP2_STREAM_FLAG_PUSH; } if (stream_alloc) { nghttp2_stream_init(stream, stream_id, flags, initial_state, pri_spec->weight, (int32_t)session->remote_settings.initial_window_size, (int32_t)session->local_settings.initial_window_size, stream_user_data, mem); rv = nghttp2_map_insert(&session->streams, &stream->map_entry); if (rv != 0) { nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return NULL; } } else { stream->flags = flags; stream->state = initial_state; stream->weight = pri_spec->weight; stream->stream_user_data = stream_user_data; } switch (initial_state) { case NGHTTP2_STREAM_RESERVED: if (nghttp2_session_is_my_stream_id(session, stream_id)) { /* reserved (local) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } else { /* reserved (remote) */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); ++session->num_incoming_reserved_streams; } /* Reserved stream does not count in the concurrent streams limit. That is one of the DOS vector. */ break; case NGHTTP2_STREAM_IDLE: /* Idle stream does not count toward the concurrent streams limit. This is used as anchor node in dependency tree. */ nghttp2_session_keep_idle_stream(session, stream); break; default: if (nghttp2_session_is_my_stream_id(session, stream_id)) { ++session->num_outgoing_streams; } else { ++session->num_incoming_streams; } } if (pri_spec->stream_id == 0) { dep_stream = &session->root; } assert(dep_stream); if (pri_spec->exclusive) { rv = nghttp2_stream_dep_insert(dep_stream, stream); if (rv != 0) { return NULL; } } else { nghttp2_stream_dep_add(dep_stream, stream); } return stream; } int nghttp2_session_close_stream(nghttp2_session *session, int32_t stream_id, uint32_t error_code) { int rv; nghttp2_stream *stream; nghttp2_mem *mem; int is_my_stream_id; mem = &session->mem; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } DEBUGF("stream: stream(%p)=%d close\n", stream, stream->stream_id); if (stream->item) { nghttp2_outbound_item *item; item = stream->item; rv = nghttp2_stream_detach_item(stream); if (rv != 0) { return rv; } /* If item is queued, it will be deleted when it is popped (nghttp2_session_prep_frame() will fail). If session->aob.item points to this item, let active_outbound_item_reset() free the item. */ if (!item->queued && item != session->aob.item) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); } } /* We call on_stream_close_callback even if stream->state is NGHTTP2_STREAM_INITIAL. This will happen while sending request HEADERS, a local endpoint receives RST_STREAM for that stream. It may be PROTOCOL_ERROR, but without notifying stream closure will hang the stream in a local endpoint. */ if (session->callbacks.on_stream_close_callback) { if (session->callbacks.on_stream_close_callback( session, stream_id, error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } is_my_stream_id = nghttp2_session_is_my_stream_id(session, stream_id); /* pushed streams which is not opened yet is not counted toward max concurrent limits */ if ((stream->flags & NGHTTP2_STREAM_FLAG_PUSH)) { if (!is_my_stream_id) { --session->num_incoming_reserved_streams; } } else { if (is_my_stream_id) { --session->num_outgoing_streams; } else { --session->num_incoming_streams; } } /* Closes both directions just in case they are not closed yet */ stream->flags |= NGHTTP2_STREAM_FLAG_CLOSED; if ((session->opt_flags & NGHTTP2_OPTMASK_NO_CLOSED_STREAMS) == 0 && session->server && !is_my_stream_id && nghttp2_stream_in_dep_tree(stream)) { /* On server side, retain stream at most MAX_CONCURRENT_STREAMS combined with the current active incoming streams to make dependency tree work better. */ nghttp2_session_keep_closed_stream(session, stream); } else { rv = nghttp2_session_destroy_stream(session, stream); if (rv != 0) { return rv; } } return 0; } int nghttp2_session_destroy_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_mem *mem; int rv; DEBUGF("stream: destroy closed stream(%p)=%d\n", stream, stream->stream_id); mem = &session->mem; if (nghttp2_stream_in_dep_tree(stream)) { rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return rv; } } nghttp2_map_remove(&session->streams, stream->stream_id); nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); return 0; } void nghttp2_session_keep_closed_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep closed stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->closed_stream_tail) { session->closed_stream_tail->closed_next = stream; stream->closed_prev = session->closed_stream_tail; } else { session->closed_stream_head = stream; } session->closed_stream_tail = stream; ++session->num_closed_streams; } void nghttp2_session_keep_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { DEBUGF("stream: keep idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); if (session->idle_stream_tail) { session->idle_stream_tail->closed_next = stream; stream->closed_prev = session->idle_stream_tail; } else { session->idle_stream_head = stream; } session->idle_stream_tail = stream; ++session->num_idle_streams; } void nghttp2_session_detach_idle_stream(nghttp2_session *session, nghttp2_stream *stream) { nghttp2_stream *prev_stream, *next_stream; DEBUGF("stream: detach idle stream(%p)=%d, state=%d\n", stream, stream->stream_id, stream->state); prev_stream = stream->closed_prev; next_stream = stream->closed_next; if (prev_stream) { prev_stream->closed_next = next_stream; } else { session->idle_stream_head = next_stream; } if (next_stream) { next_stream->closed_prev = prev_stream; } else { session->idle_stream_tail = prev_stream; } stream->closed_prev = NULL; stream->closed_next = NULL; --session->num_idle_streams; } int nghttp2_session_adjust_closed_stream(nghttp2_session *session) { size_t num_stream_max; int rv; if (session->local_settings.max_concurrent_streams == NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS) { num_stream_max = session->pending_local_max_concurrent_stream; } else { num_stream_max = session->local_settings.max_concurrent_streams; } DEBUGF("stream: adjusting kept closed streams num_closed_streams=%zu, " "num_incoming_streams=%zu, max_concurrent_streams=%zu\n", session->num_closed_streams, session->num_incoming_streams, num_stream_max); while (session->num_closed_streams > 0 && session->num_closed_streams + session->num_incoming_streams > num_stream_max) { nghttp2_stream *head_stream; nghttp2_stream *next; head_stream = session->closed_stream_head; assert(head_stream); next = head_stream->closed_next; rv = nghttp2_session_destroy_stream(session, head_stream); if (rv != 0) { return rv; } /* head_stream is now freed */ session->closed_stream_head = next; if (session->closed_stream_head) { session->closed_stream_head->closed_prev = NULL; } else { session->closed_stream_tail = NULL; } --session->num_closed_streams; } return 0; } int nghttp2_session_adjust_idle_stream(nghttp2_session *session) { size_t max; int rv; /* Make minimum number of idle streams 16, and maximum 100, which are arbitrary chosen numbers. */ max = nghttp2_min( 100, nghttp2_max( 16, nghttp2_min(session->local_settings.max_concurrent_streams, session->pending_local_max_concurrent_stream))); DEBUGF("stream: adjusting kept idle streams num_idle_streams=%zu, max=%zu\n", session->num_idle_streams, max); while (session->num_idle_streams > max) { nghttp2_stream *head; nghttp2_stream *next; head = session->idle_stream_head; assert(head); next = head->closed_next; rv = nghttp2_session_destroy_stream(session, head); if (rv != 0) { return rv; } /* head is now destroyed */ session->idle_stream_head = next; if (session->idle_stream_head) { session->idle_stream_head->closed_prev = NULL; } else { session->idle_stream_tail = NULL; } --session->num_idle_streams; } return 0; } /* * Closes stream with stream ID |stream_id| if both transmission and * reception of the stream were disallowed. The |error_code| indicates * the reason of the closure. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_INVALID_ARGUMENT * The stream is not found. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ int nghttp2_session_close_stream_if_shut_rdwr(nghttp2_session *session, nghttp2_stream *stream) { if ((stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR) { return nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_NO_ERROR); } return 0; } /* * Returns nonzero if local endpoint allows reception of new stream * from remote. */ static int session_allow_incoming_new_stream(nghttp2_session *session) { return (session->goaway_flags & (NGHTTP2_GOAWAY_TERM_ON_SEND | NGHTTP2_GOAWAY_SENT)) == 0; } /* * This function returns nonzero if session is closing. */ static int session_is_closing(nghttp2_session *session) { return (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) != 0 || (nghttp2_session_want_read(session) == 0 && nghttp2_session_want_write(session) == 0); } /* * Check that we can send a frame to the |stream|. This function * returns 0 if we can send a frame to the |frame|, or one of the * following negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_for_stream_send(nghttp2_session *session, nghttp2_stream *stream) { if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream->shut_flags & NGHTTP2_SHUT_WR) { return NGHTTP2_ERR_STREAM_SHUT_WR; } return 0; } int nghttp2_session_check_request_allowed(nghttp2_session *session) { return !session->server && session->next_stream_id <= INT32_MAX && (session->goaway_flags & NGHTTP2_GOAWAY_RECV) == 0 && !session_is_closing(session); } /* * This function checks request HEADERS frame, which opens stream, can * be sent at this time. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because of GOAWAY: session is * going down or received last_stream_id is strictly less than * frame->hd.stream_id. * NGHTTP2_ERR_STREAM_CLOSING * request HEADERS was canceled by RST_STREAM while it is in queue. */ static int session_predicate_request_headers_send(nghttp2_session *session, nghttp2_outbound_item *item) { if (item->aux_data.headers.canceled) { return NGHTTP2_ERR_STREAM_CLOSING; } /* If we are terminating session (NGHTTP2_GOAWAY_TERM_ON_SEND), GOAWAY was received from peer, or session is about to close, new request is not allowed. */ if ((session->goaway_flags & NGHTTP2_GOAWAY_RECV) || session_is_closing(session)) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is the first frame from the * server, with the |stream| can be sent at this time. The |stream| * can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_INVALID_STREAM_ID * The stream ID is invalid. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_PROTO * Client side attempted to send response. */ static int session_predicate_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return NGHTTP2_ERR_INVALID_STREAM_ID; } switch (stream->state) { case NGHTTP2_STREAM_OPENING: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks HEADERS for reserved stream can be sent. The * |stream| must be reserved state and the |session| is server side. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed. * NGHTTP2_ERR_STREAM_SHUT_WR * The stream is half-closed for transmission. * NGHTTP2_ERR_PROTO * The stream is not reserved state * NGHTTP2_ERR_STREAM_CLOSED * RST_STREAM was queued for this stream. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * Client side attempted to send push response. */ static int session_predicate_push_response_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; /* TODO Should disallow HEADERS if GOAWAY has already been issued? */ rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (!session->server) { return NGHTTP2_ERR_PROTO; } if (stream->state != NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_PROTO; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks HEADERS, which is neither stream-opening nor * first response header, with the |stream| can be sent at this time. * The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_headers_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); switch (stream->state) { case NGHTTP2_STREAM_OPENED: return 0; case NGHTTP2_STREAM_CLOSING: return NGHTTP2_ERR_STREAM_CLOSING; default: if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { return 0; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } } /* * This function checks PUSH_PROMISE frame |frame| with the |stream| * can be sent at this time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_START_STREAM_NOT_ALLOWED * New stream cannot be created because GOAWAY is already sent or * received. * NGHTTP2_ERR_PROTO * The client side attempts to send PUSH_PROMISE, or the server * sends PUSH_PROMISE for the stream not initiated by the client. * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_PUSH_DISABLED * The remote peer disabled reception of PUSH_PROMISE. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_push_promise_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; if (!session->server) { return NGHTTP2_ERR_PROTO; } rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (session->remote_settings.enable_push == 0) { return NGHTTP2_ERR_PUSH_DISABLED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (session->goaway_flags & NGHTTP2_GOAWAY_RECV) { return NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } return 0; } /* * This function checks WINDOW_UPDATE with the stream ID |stream_id| * can be sent at this time. Note that END_STREAM flag of the previous * frame does not affect the transmission of the WINDOW_UPDATE frame. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int session_predicate_window_update_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { /* Connection-level window update */ return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (state_reserved_local(session, stream)) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } static int session_predicate_altsvc_send(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } if (stream_id == 0) { return 0; } stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return NGHTTP2_ERR_STREAM_CLOSED; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return 0; } static int session_predicate_origin_send(nghttp2_session *session) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return 0; } /* Take into account settings max frame size and both connection-level flow control here */ static ssize_t nghttp2_session_enforce_flow_control_limits(nghttp2_session *session, nghttp2_stream *stream, ssize_t requested_window_size) { DEBUGF("send: remote windowsize connection=%d, remote maxframsize=%u, " "stream(id %d)=%d\n", session->remote_window_size, session->remote_settings.max_frame_size, stream->stream_id, stream->remote_window_size); return nghttp2_min(nghttp2_min(nghttp2_min(requested_window_size, stream->remote_window_size), session->remote_window_size), (int32_t)session->remote_settings.max_frame_size); } /* * Returns the maximum length of next data read. If the * connection-level and/or stream-wise flow control are enabled, the * return value takes into account those current window sizes. The remote * settings for max frame size is also taken into account. */ static size_t nghttp2_session_next_data_read(nghttp2_session *session, nghttp2_stream *stream) { ssize_t window_size; window_size = nghttp2_session_enforce_flow_control_limits( session, stream, NGHTTP2_DATA_PAYLOADLEN); DEBUGF("send: available window=%zd\n", window_size); return window_size > 0 ? (size_t)window_size : 0; } /* * This function checks DATA with the |stream| can be sent at this * time. The |stream| can be NULL. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_STREAM_CLOSED * The stream is already closed or does not exist. * NGHTTP2_ERR_STREAM_SHUT_WR * The transmission is not allowed for this stream (e.g., a frame * with END_STREAM flag set has already sent) * NGHTTP2_ERR_STREAM_CLOSING * RST_STREAM was queued for this stream. * NGHTTP2_ERR_INVALID_STREAM_STATE * The state of the stream is not valid. * NGHTTP2_ERR_SESSION_CLOSING * This session is closing. */ static int nghttp2_session_predicate_data_send(nghttp2_session *session, nghttp2_stream *stream) { int rv; rv = session_predicate_for_stream_send(session, stream); if (rv != 0) { return rv; } assert(stream); if (nghttp2_session_is_my_stream_id(session, stream->stream_id)) { /* Request body data */ /* If stream->state is NGHTTP2_STREAM_CLOSING, RST_STREAM was queued but not yet sent. In this case, we won't send DATA frames. */ if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } if (stream->state == NGHTTP2_STREAM_RESERVED) { return NGHTTP2_ERR_INVALID_STREAM_STATE; } return 0; } /* Response body data */ if (stream->state == NGHTTP2_STREAM_OPENED) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_STREAM_CLOSING; } return NGHTTP2_ERR_INVALID_STREAM_STATE; } static ssize_t session_call_select_padding(nghttp2_session *session, const nghttp2_frame *frame, size_t max_payloadlen) { ssize_t rv; if (frame->hd.length >= max_payloadlen) { return (ssize_t)frame->hd.length; } if (session->callbacks.select_padding_callback) { size_t max_paddedlen; max_paddedlen = nghttp2_min(frame->hd.length + NGHTTP2_MAX_PADLEN, max_payloadlen); rv = session->callbacks.select_padding_callback( session, frame, max_paddedlen, session->user_data); if (rv < (ssize_t)frame->hd.length || rv > (ssize_t)max_paddedlen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } return (ssize_t)frame->hd.length; } /* Add padding to HEADERS or PUSH_PROMISE. We use frame->headers.padlen in this function to use the fact that frame->push_promise has also padlen in the same position. */ static int session_headers_add_pad(nghttp2_session *session, nghttp2_frame *frame) { int rv; ssize_t padded_payloadlen; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; size_t padlen; size_t max_payloadlen; aob = &session->aob; framebufs = &aob->framebufs; max_payloadlen = nghttp2_min(NGHTTP2_MAX_PAYLOADLEN, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } padlen = (size_t)padded_payloadlen - frame->hd.length; DEBUGF("send: padding selected: payloadlen=%zd, padlen=%zu\n", padded_payloadlen, padlen); rv = nghttp2_frame_add_pad(framebufs, &frame->hd, padlen, 0); if (rv != 0) { return rv; } frame->headers.padlen = padlen; return 0; } static size_t session_estimate_headers_payload(nghttp2_session *session, const nghttp2_nv *nva, size_t nvlen, size_t additional) { return nghttp2_hd_deflate_bound(&session->hd_deflater, nva, nvlen) + additional; } static int session_pack_extension(nghttp2_session *session, nghttp2_bufs *bufs, nghttp2_frame *frame) { ssize_t rv; nghttp2_buf *buf; size_t buflen; size_t framelen; assert(session->callbacks.pack_extension_callback); buf = &bufs->head->buf; buflen = nghttp2_min(nghttp2_buf_avail(buf), NGHTTP2_MAX_PAYLOADLEN); rv = session->callbacks.pack_extension_callback(session, buf->last, buflen, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return (int)rv; } if (rv < 0 || (size_t)rv > buflen) { return NGHTTP2_ERR_CALLBACK_FAILURE; } framelen = (size_t)rv; frame->hd.length = framelen; assert(buf->pos == buf->last); buf->last += framelen; buf->pos -= NGHTTP2_FRAME_HDLEN; nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); return 0; } /* * This function serializes frame for transmission. * * This function returns 0 if it succeeds, or one of negative error * codes, including both fatal and non-fatal ones. */ static int session_prep_frame(nghttp2_session *session, nghttp2_outbound_item *item) { int rv; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; frame = &item->frame; switch (frame->hd.type) { case NGHTTP2_DATA: { size_t next_readmax; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { assert(stream->item == item); } rv = nghttp2_session_predicate_data_send(session, stream); if (rv != 0) { // If stream was already closed, nghttp2_session_get_stream() // returns NULL, but item is still attached to the stream. // Search stream including closed again. stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } } return rv; } /* Assuming stream is not NULL */ assert(stream); next_readmax = nghttp2_session_next_data_read(session, stream); if (next_readmax == 0) { /* This must be true since we only pop DATA frame item from queue when session->remote_window_size > 0 */ assert(session->remote_window_size > 0); rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } rv = nghttp2_session_pack_data(session, &session->aob.framebufs, next_readmax, frame, &item->aux_data.data, stream); if (rv == NGHTTP2_ERR_PAUSE) { return rv; } if (rv == NGHTTP2_ERR_DEFERRED) { rv = nghttp2_stream_defer_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } session->aob.item = NULL; active_outbound_item_reset(&session->aob, mem); return NGHTTP2_ERR_DEFERRED; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv != 0) { int rv2; rv2 = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv2)) { return rv2; } return rv; } return 0; } case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; size_t estimated_payloadlen; aux_data = &item->aux_data.headers; if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { /* initial HEADERS, which opens stream */ nghttp2_stream *stream; stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_INITIAL, aux_data->stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream() here, since we don't keep closed stream in client side */ rv = session_predicate_request_headers_send(session, item); if (rv != 0) { return rv; } if (session_enforce_http_messaging(session)) { nghttp2_http_record_request_method(stream, frame); } } else { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream && stream->state == NGHTTP2_STREAM_RESERVED) { rv = session_predicate_push_response_headers_send(session, stream); if (rv == 0) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; if (aux_data->stream_user_data) { stream->stream_user_data = aux_data->stream_user_data; } } } else if (session_predicate_response_headers_send(session, stream) == 0) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; rv = 0; } else { frame->headers.cat = NGHTTP2_HCAT_HEADERS; rv = session_predicate_headers_send(session, stream); } if (rv != 0) { return rv; } } estimated_payloadlen = session_estimate_headers_payload( session, frame->headers.nva, frame->headers.nvlen, NGHTTP2_PRIORITY_SPECLEN); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_headers(&session->aob.framebufs, &frame->headers, &session->hd_deflater); if (rv != 0) { return rv; } DEBUGF("send: before padding, HEADERS serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } DEBUGF("send: HEADERS finally serialized in %zd bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { assert(session->last_sent_stream_id < frame->hd.stream_id); session->last_sent_stream_id = frame->hd.stream_id; } return 0; } case NGHTTP2_PRIORITY: { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } /* PRIORITY frame can be sent at any time and to any stream ID. */ nghttp2_frame_pack_priority(&session->aob.framebufs, &frame->priority); /* Peer can send PRIORITY frame against idle stream to create "anchor" in dependency tree. Only client can do this in nghttp2. In nghttp2, only server retains non-active (closed or idle) streams in memory, so we don't open stream here. */ return 0; } case NGHTTP2_RST_STREAM: if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_rst_stream(&session->aob.framebufs, &frame->rst_stream); return 0; case NGHTTP2_SETTINGS: { if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; /* When session is about to close, don't send SETTINGS ACK. We are required to send SETTINGS without ACK though; for example, we have to send SETTINGS as a part of connection preface. */ if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } } rv = nghttp2_frame_pack_settings(&session->aob.framebufs, &frame->settings); if (rv != 0) { return rv; } return 0; } case NGHTTP2_PUSH_PROMISE: { nghttp2_stream *stream; size_t estimated_payloadlen; /* stream could be NULL if associated stream was already closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* predicate should fail if stream is NULL. */ rv = session_predicate_push_promise_send(session, stream); if (rv != 0) { return rv; } assert(stream); estimated_payloadlen = session_estimate_headers_payload( session, frame->push_promise.nva, frame->push_promise.nvlen, 0); if (estimated_payloadlen > session->max_send_header_block_length) { return NGHTTP2_ERR_FRAME_SIZE_ERROR; } rv = nghttp2_frame_pack_push_promise( &session->aob.framebufs, &frame->push_promise, &session->hd_deflater); if (rv != 0) { return rv; } rv = session_headers_add_pad(session, frame); if (rv != 0) { return rv; } assert(session->last_sent_stream_id + 2 <= frame->push_promise.promised_stream_id); session->last_sent_stream_id = frame->push_promise.promised_stream_id; return 0; } case NGHTTP2_PING: if (frame->hd.flags & NGHTTP2_FLAG_ACK) { assert(session->obq_flood_counter_ > 0); --session->obq_flood_counter_; } /* PING frame is allowed to be sent unless termination GOAWAY is sent */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_ON_SEND) { return NGHTTP2_ERR_SESSION_CLOSING; } nghttp2_frame_pack_ping(&session->aob.framebufs, &frame->ping); return 0; case NGHTTP2_GOAWAY: rv = nghttp2_frame_pack_goaway(&session->aob.framebufs, &frame->goaway); if (rv != 0) { return rv; } session->local_last_stream_id = frame->goaway.last_stream_id; return 0; case NGHTTP2_WINDOW_UPDATE: rv = session_predicate_window_update_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_window_update(&session->aob.framebufs, &frame->window_update); return 0; case NGHTTP2_CONTINUATION: /* We never handle CONTINUATION here. */ assert(0); return 0; default: { nghttp2_ext_aux_data *aux_data; /* extension frame */ aux_data = &item->aux_data.ext; if (aux_data->builtin == 0) { if (session_is_closing(session)) { return NGHTTP2_ERR_SESSION_CLOSING; } return session_pack_extension(session, &session->aob.framebufs, frame); } switch (frame->hd.type) { case NGHTTP2_ALTSVC: rv = session_predicate_altsvc_send(session, frame->hd.stream_id); if (rv != 0) { return rv; } nghttp2_frame_pack_altsvc(&session->aob.framebufs, &frame->ext); return 0; case NGHTTP2_ORIGIN: rv = session_predicate_origin_send(session); if (rv != 0) { return rv; } rv = nghttp2_frame_pack_origin(&session->aob.framebufs, &frame->ext); if (rv != 0) { return rv; } return 0; default: /* Unreachable here */ assert(0); return 0; } } } } nghttp2_outbound_item * nghttp2_session_get_next_ob_item(nghttp2_session *session) { if (nghttp2_outbound_queue_top(&session->ob_urgent)) { return nghttp2_outbound_queue_top(&session->ob_urgent); } if (nghttp2_outbound_queue_top(&session->ob_reg)) { return nghttp2_outbound_queue_top(&session->ob_reg); } if (!session_is_outgoing_concurrent_streams_max(session)) { if (nghttp2_outbound_queue_top(&session->ob_syn)) { return nghttp2_outbound_queue_top(&session->ob_syn); } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } nghttp2_outbound_item * nghttp2_session_pop_next_ob_item(nghttp2_session *session) { nghttp2_outbound_item *item; item = nghttp2_outbound_queue_top(&session->ob_urgent); if (item) { nghttp2_outbound_queue_pop(&session->ob_urgent); item->queued = 0; return item; } item = nghttp2_outbound_queue_top(&session->ob_reg); if (item) { nghttp2_outbound_queue_pop(&session->ob_reg); item->queued = 0; return item; } if (!session_is_outgoing_concurrent_streams_max(session)) { item = nghttp2_outbound_queue_top(&session->ob_syn); if (item) { nghttp2_outbound_queue_pop(&session->ob_syn); item->queued = 0; return item; } } if (session->remote_window_size > 0) { return nghttp2_stream_next_outbound_item(&session->root); } return NULL; } static int session_call_before_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.before_frame_send_callback) { rv = session->callbacks.before_frame_send_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_send(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_send_callback) { rv = session->callbacks.on_frame_send_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int find_stream_on_goaway_func(nghttp2_map_entry *entry, void *ptr) { nghttp2_close_stream_on_goaway_arg *arg; nghttp2_stream *stream; arg = (nghttp2_close_stream_on_goaway_arg *)ptr; stream = (nghttp2_stream *)entry; if (nghttp2_session_is_my_stream_id(arg->session, stream->stream_id)) { if (arg->incoming) { return 0; } } else if (!arg->incoming) { return 0; } if (stream->state != NGHTTP2_STREAM_IDLE && (stream->flags & NGHTTP2_STREAM_FLAG_CLOSED) == 0 && stream->stream_id > arg->last_stream_id) { /* We are collecting streams to close because we cannot call nghttp2_session_close_stream() inside nghttp2_map_each(). Reuse closed_next member.. bad choice? */ assert(stream->closed_next == NULL); assert(stream->closed_prev == NULL); if (arg->head) { stream->closed_next = arg->head; arg->head = stream; } else { arg->head = stream; } } return 0; } /* Closes non-idle and non-closed streams whose stream ID > last_stream_id. If incoming is nonzero, we are going to close incoming streams. Otherwise, close outgoing streams. */ static int session_close_stream_on_goaway(nghttp2_session *session, int32_t last_stream_id, int incoming) { int rv; nghttp2_stream *stream, *next_stream; nghttp2_close_stream_on_goaway_arg arg = {session, NULL, last_stream_id, incoming}; rv = nghttp2_map_each(&session->streams, find_stream_on_goaway_func, &arg); assert(rv == 0); stream = arg.head; while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; rv = nghttp2_session_close_stream(session, stream->stream_id, NGHTTP2_REFUSED_STREAM); /* stream may be deleted here */ stream = next_stream; if (nghttp2_is_fatal(rv)) { /* Clean up closed_next member just in case */ while (stream) { next_stream = stream->closed_next; stream->closed_next = NULL; stream = next_stream; } return rv; } } return 0; } static void reschedule_stream(nghttp2_stream *stream) { stream->last_writelen = stream->item->frame.hd.length; nghttp2_stream_reschedule(stream); } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size); static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size); /* * Called after a frame is sent. This function runs * on_frame_send_callback and handles stream closure upon END_STREAM * or RST_STREAM. This function does not reset session->aob. It is a * responsibility of session_after_frame_sent2. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent1(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_stream *stream; frame = &item->frame; if (frame->hd.type == NGHTTP2_DATA) { nghttp2_data_aux_data *aux_data; aux_data = &item->aux_data.data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* We update flow control window after a frame was completely sent. This is possible because we choose payload length not to exceed the window */ session->remote_window_size -= (int32_t)frame->hd.length; if (stream) { stream->remote_window_size -= (int32_t)frame->hd.length; } if (stream && aux_data->eof) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } /* Call on_frame_send_callback after nghttp2_stream_detach_item(), so that application can issue nghttp2_submit_data() in the callback. */ if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { int stream_closed; stream_closed = (stream->shut_flags & NGHTTP2_SHUT_RDWR) == NGHTTP2_SHUT_RDWR; nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* stream may be NULL if it was closed */ if (stream_closed) { stream = NULL; } } return 0; } if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* non-DATA frame */ if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { DEBUGF("send: CONTINUATION exists, just return\n"); return 0; } } rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } switch (frame->hd.type) { case NGHTTP2_HEADERS: { nghttp2_headers_aux_data *aux_data; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: { stream->state = NGHTTP2_STREAM_OPENING; if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { /* nghttp2_submit_data() makes a copy of aux_data->data_prd */ rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; } case NGHTTP2_HCAT_PUSH_RESPONSE: stream->flags = (uint8_t)(stream->flags & ~NGHTTP2_STREAM_FLAG_PUSH); ++session->num_outgoing_streams; /* Fall through */ case NGHTTP2_HCAT_RESPONSE: stream->state = NGHTTP2_STREAM_OPENED; /* Fall through */ case NGHTTP2_HCAT_HEADERS: if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); } rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; if (aux_data->data_prd.read_callback) { rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, frame->hd.stream_id, &aux_data->data_prd); if (nghttp2_is_fatal(rv)) { return rv; } /* TODO nghttp2_submit_data() may fail if stream has already DATA frame item. We might have to handle it here. */ } return 0; default: /* Unreachable */ assert(0); return 0; } } case NGHTTP2_PRIORITY: if (session->server) { return 0; ; } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_RST_STREAM: rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; case NGHTTP2_GOAWAY: { nghttp2_goaway_aux_data *aux_data; aux_data = &item->aux_data.goaway; if ((aux_data->flags & NGHTTP2_GOAWAY_AUX_SHUTDOWN_NOTICE) == 0) { if (aux_data->flags & NGHTTP2_GOAWAY_AUX_TERM_ON_SEND) { session->goaway_flags |= NGHTTP2_GOAWAY_TERM_SENT; } session->goaway_flags |= NGHTTP2_GOAWAY_SENT; rv = session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 1); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } case NGHTTP2_WINDOW_UPDATE: if (frame->hd.stream_id == 0) { session->window_update_queued = 0; if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_connection_consumed_size(session, 0); } else { rv = nghttp2_session_update_recv_connection_window_size(session, 0); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } stream->window_update_queued = 0; /* We don't have to send WINDOW_UPDATE if END_STREAM from peer is seen. */ if (stream->shut_flags & NGHTTP2_SHUT_RD) { return 0; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { rv = session_update_stream_consumed_size(session, stream, 0); } else { rv = nghttp2_session_update_recv_stream_window_size(session, stream, 0, 1); } if (nghttp2_is_fatal(rv)) { return rv; } return 0; default: return 0; } } /* * Called after a frame is sent and session_after_frame_sent1. This * function is responsible to reset session->aob. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. */ static int session_after_frame_sent2(nghttp2_session *session) { int rv; nghttp2_active_outbound_item *aob = &session->aob; nghttp2_outbound_item *item = aob->item; nghttp2_bufs *framebufs = &aob->framebufs; nghttp2_frame *frame; nghttp2_mem *mem; nghttp2_stream *stream; nghttp2_data_aux_data *aux_data; mem = &session->mem; frame = &item->frame; if (frame->hd.type != NGHTTP2_DATA) { if (frame->hd.type == NGHTTP2_HEADERS || frame->hd.type == NGHTTP2_PUSH_PROMISE) { if (nghttp2_bufs_next_present(framebufs)) { framebufs->cur = framebufs->cur->next; DEBUGF("send: next CONTINUATION frame, %zu bytes\n", nghttp2_buf_len(&framebufs->cur->buf)); return 0; } } active_outbound_item_reset(&session->aob, mem); return 0; } /* DATA frame */ aux_data = &item->aux_data.data; /* On EOF, we have already detached data. Please note that application may issue nghttp2_submit_data() in on_frame_send_callback (call from session_after_frame_sent1), which attach data to stream. We don't want to detach it. */ if (aux_data->eof) { active_outbound_item_reset(aob, mem); return 0; } /* Reset no_copy here because next write may not use this. */ aux_data->no_copy = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); /* If session is closed or RST_STREAM was queued, we won't send further data. */ if (nghttp2_session_predicate_data_send(session, stream) != 0) { if (stream) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } } active_outbound_item_reset(aob, mem); return 0; } aob->item = NULL; active_outbound_item_reset(&session->aob, mem); return 0; } static int session_call_send_data(nghttp2_session *session, nghttp2_outbound_item *item, nghttp2_bufs *framebufs) { int rv; nghttp2_buf *buf; size_t length; nghttp2_frame *frame; nghttp2_data_aux_data *aux_data; buf = &framebufs->cur->buf; frame = &item->frame; length = frame->hd.length - frame->data.padlen; aux_data = &item->aux_data.data; rv = session->callbacks.send_data_callback(session, frame, buf->pos, length, &aux_data->data_prd.source, session->user_data); switch (rv) { case 0: case NGHTTP2_ERR_WOULDBLOCK: case NGHTTP2_ERR_PAUSE: case NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE: return rv; default: return NGHTTP2_ERR_CALLBACK_FAILURE; } } static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, const uint8_t **data_ptr, int fast_cb) { int rv; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; nghttp2_mem *mem; mem = &session->mem; aob = &session->aob; framebufs = &aob->framebufs; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } for (;;) { switch (aob->state) { case NGHTTP2_OB_POP_ITEM: { nghttp2_outbound_item *item; item = nghttp2_session_pop_next_ob_item(session); if (item == NULL) { return 0; } rv = session_prep_frame(session, item); if (rv == NGHTTP2_ERR_PAUSE) { return 0; } if (rv == NGHTTP2_ERR_DEFERRED) { DEBUGF("send: frame transmission deferred\n"); break; } if (rv < 0) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; DEBUGF("send: frame preparation failed with %s\n", nghttp2_strerror(rv)); /* TODO If the error comes from compressor, the connection must be closed. */ if (item->frame.hd.type != NGHTTP2_DATA && session->callbacks.on_frame_not_send_callback && is_non_fatal(rv)) { nghttp2_frame *frame = &item->frame; /* The library is responsible for the transmission of WINDOW_UPDATE frame, so we don't call error callback for it. */ if (frame->hd.type != NGHTTP2_WINDOW_UPDATE && session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by failed request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; if (item->aux_data.headers.canceled) { error_code = item->aux_data.headers.error_code; } else { /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } nghttp2_outbound_item_free(item, mem); nghttp2_mem_free(mem, item); active_outbound_item_reset(aob, mem); if (rv == NGHTTP2_ERR_HEADER_COMP) { /* If header compression error occurred, should terminiate connection. */ rv = nghttp2_session_terminate_session(session, NGHTTP2_INTERNAL_ERROR); } if (nghttp2_is_fatal(rv)) { return rv; } break; } aob->item = item; nghttp2_bufs_rewind(framebufs); if (item->frame.hd.type != NGHTTP2_DATA) { nghttp2_frame *frame; frame = &item->frame; DEBUGF("send: next frame: payloadlen=%zu, type=%u, flags=0x%02x, " "stream_id=%d\n", frame->hd.length, frame->hd.type, frame->hd.flags, frame->hd.stream_id); rv = session_call_before_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_CANCEL) { int32_t opened_stream_id = 0; uint32_t error_code = NGHTTP2_INTERNAL_ERROR; if (session->callbacks.on_frame_not_send_callback) { if (session->callbacks.on_frame_not_send_callback( session, frame, rv, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } /* We have to close stream opened by canceled request HEADERS or PUSH_PROMISE. */ switch (item->frame.hd.type) { case NGHTTP2_HEADERS: if (item->frame.headers.cat == NGHTTP2_HCAT_REQUEST) { opened_stream_id = item->frame.hd.stream_id; /* We don't have to check item->aux_data.headers.canceled since it has already been checked. */ /* Set error_code to REFUSED_STREAM so that application can send request again. */ error_code = NGHTTP2_REFUSED_STREAM; } break; case NGHTTP2_PUSH_PROMISE: opened_stream_id = item->frame.push_promise.promised_stream_id; break; } if (opened_stream_id) { /* careful not to override rv */ int rv2; rv2 = nghttp2_session_close_stream(session, opened_stream_id, error_code); if (nghttp2_is_fatal(rv2)) { return rv2; } } active_outbound_item_reset(aob, mem); break; } } else { DEBUGF("send: next frame: DATA\n"); if (item->aux_data.data.no_copy) { aob->state = NGHTTP2_OB_SEND_NO_COPY; break; } } DEBUGF("send: start transmitting frame type=%u, length=%zd\n", framebufs->cur->buf.pos[3], framebufs->cur->buf.last - framebufs->cur->buf.pos); aob->state = NGHTTP2_OB_SEND_DATA; break; } case NGHTTP2_OB_SEND_DATA: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of a frame\n"); /* Frame has completely sent */ if (fast_cb) { rv = session_after_frame_sent2(session); } else { rv = session_after_frame_sent1(session); if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); } if (rv < 0) { /* FATAL */ assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); /* We increment the offset here. If send_callback does not send everything, we will adjust it. */ buf->pos += datalen; return (ssize_t)datalen; } case NGHTTP2_OB_SEND_NO_COPY: { nghttp2_stream *stream; nghttp2_frame *frame; int pause; DEBUGF("send: no copy DATA\n"); frame = &aob->item->frame; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream == NULL) { DEBUGF("send: no copy DATA cancelled because stream was closed\n"); active_outbound_item_reset(aob, mem); break; } rv = session_call_send_data(session, aob->item, framebufs); if (nghttp2_is_fatal(rv)) { return rv; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_stream_detach_item(stream); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_add_rst_stream(session, frame->hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } active_outbound_item_reset(aob, mem); break; } if (rv == NGHTTP2_ERR_WOULDBLOCK) { return 0; } pause = (rv == NGHTTP2_ERR_PAUSE); rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } rv = session_after_frame_sent2(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return rv; } /* We have already adjusted the next state */ if (pause) { return 0; } break; } case NGHTTP2_OB_SEND_CLIENT_MAGIC: { size_t datalen; nghttp2_buf *buf; buf = &framebufs->cur->buf; if (buf->pos == buf->last) { DEBUGF("send: end transmission of client magic\n"); active_outbound_item_reset(aob, mem); break; } *data_ptr = buf->pos; datalen = nghttp2_buf_len(buf); buf->pos += datalen; return (ssize_t)datalen; } } } } ssize_t nghttp2_session_mem_send(nghttp2_session *session, const uint8_t **data_ptr) { int rv; ssize_t len; *data_ptr = NULL; len = nghttp2_session_mem_send_internal(session, data_ptr, 1); if (len <= 0) { return len; } if (session->aob.item) { /* We have to call session_after_frame_sent1 here to handle stream closure upon transmission of frames. Otherwise, END_STREAM may be reached to client before we call nghttp2_session_mem_send again and we may get exceeding number of incoming streams. */ rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); return (ssize_t)rv; } } return len; } int nghttp2_session_send(nghttp2_session *session) { const uint8_t *data = NULL; ssize_t datalen; ssize_t sentlen; nghttp2_bufs *framebufs; framebufs = &session->aob.framebufs; for (;;) { datalen = nghttp2_session_mem_send_internal(session, &data, 0); if (datalen <= 0) { return (int)datalen; } sentlen = session->callbacks.send_callback(session, data, (size_t)datalen, 0, session->user_data); if (sentlen < 0) { if (sentlen == NGHTTP2_ERR_WOULDBLOCK) { /* Transmission canceled. Rewind the offset */ framebufs->cur->buf.pos -= datalen; return 0; } return NGHTTP2_ERR_CALLBACK_FAILURE; } /* Rewind the offset to the amount of unsent bytes */ framebufs->cur->buf.pos -= datalen - sentlen; } } static ssize_t session_recv(nghttp2_session *session, uint8_t *buf, size_t len) { ssize_t rv; rv = session->callbacks.recv_callback(session, buf, len, 0, session->user_data); if (rv > 0) { if ((size_t)rv > len) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } else if (rv < 0 && rv != NGHTTP2_ERR_WOULDBLOCK && rv != NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return rv; } static int session_call_on_begin_frame(nghttp2_session *session, const nghttp2_frame_hd *hd) { int rv; if (session->callbacks.on_begin_frame_callback) { rv = session->callbacks.on_begin_frame_callback(session, hd, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_frame_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (session->callbacks.on_frame_recv_callback) { rv = session->callbacks.on_frame_recv_callback(session, frame, session->user_data); if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_begin_headers(nghttp2_session *session, nghttp2_frame *frame) { int rv; DEBUGF("recv: call on_begin_headers callback stream_id=%d\n", frame->hd.stream_id); if (session->callbacks.on_begin_headers_callback) { rv = session->callbacks.on_begin_headers_callback(session, frame, session->user_data); if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_on_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv = 0; if (session->callbacks.on_header_callback2) { rv = session->callbacks.on_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_header_callback) { rv = session->callbacks.on_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_invalid_header(nghttp2_session *session, const nghttp2_frame *frame, const nghttp2_hd_nv *nv) { int rv; if (session->callbacks.on_invalid_header_callback2) { rv = session->callbacks.on_invalid_header_callback2( session, frame, nv->name, nv->value, nv->flags, session->user_data); } else if (session->callbacks.on_invalid_header_callback) { rv = session->callbacks.on_invalid_header_callback( session, frame, nv->name->base, nv->name->len, nv->value->base, nv->value->len, nv->flags, session->user_data); } else { return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } if (rv == NGHTTP2_ERR_PAUSE || rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } return 0; } static int session_call_on_extension_chunk_recv_callback(nghttp2_session *session, const uint8_t *data, size_t len) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; if (session->callbacks.on_extension_chunk_recv_callback) { rv = session->callbacks.on_extension_chunk_recv_callback( session, &frame->hd, data, len, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_call_unpack_extension_callback(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; void *payload = NULL; rv = session->callbacks.unpack_extension_callback( session, &payload, &frame->hd, session->user_data); if (rv == NGHTTP2_ERR_CANCEL) { return rv; } if (rv != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } frame->ext.payload = payload; return 0; } /* * Handles frame size error. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_handle_frame_size_error(nghttp2_session *session) { /* TODO Currently no callback is called for this error, because we call this callback before reading any payload */ return nghttp2_session_terminate_session(session, NGHTTP2_FRAME_SIZE_ERROR); } static uint32_t get_error_code_from_lib_error_code(int lib_error_code) { switch (lib_error_code) { case NGHTTP2_ERR_STREAM_CLOSED: return NGHTTP2_STREAM_CLOSED; case NGHTTP2_ERR_HEADER_COMP: return NGHTTP2_COMPRESSION_ERROR; case NGHTTP2_ERR_FRAME_SIZE_ERROR: return NGHTTP2_FRAME_SIZE_ERROR; case NGHTTP2_ERR_FLOW_CONTROL: return NGHTTP2_FLOW_CONTROL_ERROR; case NGHTTP2_ERR_REFUSED_STREAM: return NGHTTP2_REFUSED_STREAM; case NGHTTP2_ERR_PROTO: case NGHTTP2_ERR_HTTP_HEADER: case NGHTTP2_ERR_HTTP_MESSAGING: return NGHTTP2_PROTOCOL_ERROR; default: return NGHTTP2_INTERNAL_ERROR; } } /* * Calls on_invalid_frame_recv_callback if it is set to |session|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * User defined callback function fails. */ static int session_call_on_invalid_frame_recv_callback(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream2(nghttp2_session *session, int32_t stream_id, nghttp2_frame *frame, int lib_error_code) { int rv; rv = nghttp2_session_add_rst_stream( session, stream_id, get_error_code_from_lib_error_code(lib_error_code)); if (rv != 0) { return rv; } if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return 0; } static int session_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { return session_handle_invalid_stream2(session, frame->hd.stream_id, frame, lib_error_code); } static int session_inflate_handle_invalid_stream(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code) { int rv; rv = session_handle_invalid_stream(session, frame, lib_error_code); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Handles invalid frame which causes connection error. */ static int session_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { if (session->callbacks.on_invalid_frame_recv_callback) { if (session->callbacks.on_invalid_frame_recv_callback( session, frame, lib_error_code, session->user_data) != 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } return nghttp2_session_terminate_session_with_reason( session, get_error_code_from_lib_error_code(lib_error_code), reason); } static int session_inflate_handle_invalid_connection(nghttp2_session *session, nghttp2_frame *frame, int lib_error_code, const char *reason) { int rv; rv = session_handle_invalid_connection(session, frame, lib_error_code, reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* * Inflates header block in the memory pointed by |in| with |inlen| * bytes. If this function returns NGHTTP2_ERR_PAUSE, the caller must * call this function again, until it returns 0 or one of negative * error code. If |call_header_cb| is zero, the on_header_callback * are not invoked and the function never return NGHTTP2_ERR_PAUSE. If * the given |in| is the last chunk of header block, the |final| must * be nonzero. If header block is successfully processed (which is * indicated by the return value 0, NGHTTP2_ERR_PAUSE or * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE), the number of processed * input bytes is assigned to the |*readlen_ptr|. * * This function return 0 if it succeeds, or one of the negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE * The callback returns this error code, indicating that this * stream should be RST_STREAMed. * NGHTTP2_ERR_NOMEM * Out of memory. * NGHTTP2_ERR_PAUSE * The callback function returned NGHTTP2_ERR_PAUSE * NGHTTP2_ERR_HEADER_COMP * Header decompression failed */ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, size_t *readlen_ptr, uint8_t *in, size_t inlen, int final, int call_header_cb) { ssize_t proclen; int rv; int inflate_flags; nghttp2_hd_nv nv; nghttp2_stream *stream; nghttp2_stream *subject_stream; int trailer = 0; *readlen_ptr = 0; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); } else { subject_stream = stream; trailer = session_trailer_headers(session, stream, frame); } DEBUGF("recv: decoding header block %zu bytes\n", inlen); for (;;) { inflate_flags = 0; proclen = nghttp2_hd_inflate_hd_nv(&session->hd_inflater, &nv, &inflate_flags, in, inlen, final); if (nghttp2_is_fatal((int)proclen)) { return (int)proclen; } if (proclen < 0) { if (session->iframe.state == NGHTTP2_IB_READ_HEADER_BLOCK) { if (subject_stream && subject_stream->state != NGHTTP2_STREAM_CLOSING) { /* Adding RST_STREAM here is very important. It prevents from invoking subsequent callbacks for the same stream ID. */ rv = nghttp2_session_add_rst_stream( session, subject_stream->stream_id, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } } } rv = nghttp2_session_terminate_session(session, NGHTTP2_COMPRESSION_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_HEADER_COMP; } in += proclen; inlen -= (size_t)proclen; *readlen_ptr += (size_t)proclen; DEBUGF("recv: proclen=%zd\n", proclen); if (call_header_cb && (inflate_flags & NGHTTP2_HD_INFLATE_EMIT)) { rv = 0; if (subject_stream) { if (session_enforce_http_messaging(session)) { rv = nghttp2_http_on_header(session, subject_stream, frame, &nv, trailer); if (rv == NGHTTP2_ERR_IGN_HTTP_HEADER) { /* Don't overwrite rv here */ int rv2; rv2 = session_call_on_invalid_header(session, frame, &nv); if (rv2 == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = NGHTTP2_ERR_HTTP_HEADER; } else { if (rv2 != 0) { return rv2; } /* header is ignored */ DEBUGF("recv: HTTP ignored: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv2 = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Ignoring received invalid HTTP header field: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv2)) { return rv2; } } } if (rv == NGHTTP2_ERR_HTTP_HEADER) { DEBUGF("recv: HTTP error: type=%u, id=%d, header %.*s: %.*s\n", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); rv = session_call_error_callback( session, NGHTTP2_ERR_HTTP_HEADER, "Invalid HTTP header field was received: frame type: " "%u, stream: %d, name: [%.*s], value: [%.*s]", frame->hd.type, frame->hd.stream_id, (int)nv.name->len, nv.name->base, (int)nv.value->len, nv.value->base); if (nghttp2_is_fatal(rv)) { return rv; } rv = session_handle_invalid_stream2(session, subject_stream->stream_id, frame, NGHTTP2_ERR_HTTP_HEADER); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE; } } if (rv == 0) { rv = session_call_on_header(session, frame, &nv); /* This handles NGHTTP2_ERR_PAUSE and NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE as well */ if (rv != 0) { return rv; } } } } if (inflate_flags & NGHTTP2_HD_INFLATE_FINAL) { nghttp2_hd_inflate_end_headers(&session->hd_inflater); break; } if ((inflate_flags & NGHTTP2_HD_INFLATE_EMIT) == 0 && inlen == 0) { break; } } return 0; } /* * Call this function when HEADERS frame was completely received. * * This function returns 0 if it succeeds, or one of negative error * codes: * * NGHTTP2_ERR_CALLBACK_FAILURE * The callback function failed. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_end_stream_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; if ((frame->hd.flags & NGHTTP2_FLAG_END_STREAM) == 0) { return 0; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_after_header_block_received(nghttp2_session *session) { int rv = 0; nghttp2_frame *frame = &session->iframe.frame; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } if (session_enforce_http_messaging(session)) { if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { nghttp2_stream *subject_stream; subject_stream = nghttp2_session_get_stream( session, frame->push_promise.promised_stream_id); if (subject_stream) { rv = nghttp2_http_on_request_headers(subject_stream, frame); } } else { assert(frame->hd.type == NGHTTP2_HEADERS); switch (frame->headers.cat) { case NGHTTP2_HCAT_REQUEST: rv = nghttp2_http_on_request_headers(stream, frame); break; case NGHTTP2_HCAT_RESPONSE: case NGHTTP2_HCAT_PUSH_RESPONSE: rv = nghttp2_http_on_response_headers(stream); break; case NGHTTP2_HCAT_HEADERS: if (stream->http_flags & NGHTTP2_HTTP_FLAG_EXPECT_FINAL_RESPONSE) { assert(!session->server); rv = nghttp2_http_on_response_headers(stream); } else { rv = nghttp2_http_on_trailer_headers(stream, frame); } break; default: assert(0); } if (rv == 0 && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { rv = nghttp2_http_on_remote_end_stream(stream); } } if (rv != 0) { int32_t stream_id; if (frame->hd.type == NGHTTP2_PUSH_PROMISE) { stream_id = frame->push_promise.promised_stream_id; } else { stream_id = frame->hd.stream_id; } rv = session_handle_invalid_stream2(session, stream_id, frame, NGHTTP2_ERR_HTTP_MESSAGING); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type == NGHTTP2_HEADERS && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ } return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.type != NGHTTP2_HEADERS) { return 0; } return session_end_stream_headers_received(session, frame, stream); } int nghttp2_session_on_request_headers_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: stream_id == 0"); } /* If client receives idle stream from server, it is invalid regardless stream ID is even or odd. This is because client is not expected to receive request from server. */ if (!session->server) { if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: client received request"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } assert(session->server); if (!session_is_new_peer_stream_id(session, frame->hd.stream_id)) { if (frame->hd.stream_id == 0 || nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: invalid stream_id"); } /* RFC 7540 says if an endpoint receives a HEADERS with invalid * stream ID (e.g, numerically smaller than previous), it MUST * issue connection error with error code PROTOCOL_ERROR. It is a * bit hard to detect this, since we cannot remember all streams * we observed so far. * * You might imagine this is really easy. But no. HTTP/2 is * asynchronous protocol, and usually client and server do not * share the complete picture of open/closed stream status. For * example, after server sends RST_STREAM for a stream, client may * send trailer HEADERS for that stream. If naive server detects * that, and issued connection error, then it is a bug of server * implementation since client is not wrong if it did not get * RST_STREAM when it issued trailer HEADERS. * * At the moment, we are very conservative here. We only use * connection error if stream ID refers idle stream, or we are * sure that stream is half-closed(remote) or closed. Otherwise * we just ignore HEADERS for now. */ stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } session->last_recv_stream_id = frame->hd.stream_id; if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We just ignore stream after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (frame->headers.pri_spec.stream_id == frame->hd.stream_id) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "request HEADERS: depend on itself"); } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->headers.pri_spec, NGHTTP2_STREAM_OPENING, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_closed_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv; /* This function is only called if stream->state == NGHTTP2_STREAM_OPENING and stream_id is local side initiated. */ assert(stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "response HEADERS: stream_id == 0"); } if (stream->shut_flags & NGHTTP2_SHUT_RD) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. We go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } stream->state = NGHTTP2_STREAM_OPENED; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_push_response_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; assert(stream->state == NGHTTP2_STREAM_RESERVED); if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: stream_id == 0"); } if (session->server) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: no HEADERS allowed from client in reserved state"); } if (session_is_incoming_concurrent_streams_max(session)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "push response HEADERS: max concurrent streams exceeded"); } if (!session_allow_incoming_new_stream(session)) { /* We don't accept new stream after GOAWAY was sent. */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (session_is_incoming_concurrent_streams_pending_max(session)) { return session_inflate_handle_invalid_stream(session, frame, NGHTTP2_ERR_REFUSED_STREAM); } nghttp2_stream_promise_fulfilled(stream); if (!nghttp2_session_is_my_stream_id(session, stream->stream_id)) { --session->num_incoming_reserved_streams; } ++session->num_incoming_streams; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } int nghttp2_session_on_headers_received(nghttp2_session *session, nghttp2_frame *frame, nghttp2_stream *stream) { int rv = 0; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "HEADERS: stream_id == 0"); } if ((stream->shut_flags & NGHTTP2_SHUT_RD)) { /* half closed (remote): from the spec: If an endpoint receives additional frames for a stream that is in this state it MUST respond with a stream error (Section 5.4.2) of type STREAM_CLOSED. we go further, and make it connection error. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "HEADERS: stream closed"); } if (nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { if (stream->state == NGHTTP2_STREAM_OPENED) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } /* If this is remote peer initiated stream, it is OK unless it has sent END_STREAM frame already. But if stream is in NGHTTP2_STREAM_CLOSING, we discard the frame. This is a race condition. */ if (stream->state != NGHTTP2_STREAM_CLOSING) { rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } static int session_process_headers_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_stream *stream; rv = nghttp2_frame_unpack_headers_payload(&frame->headers, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: could not unpack"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { frame->headers.cat = NGHTTP2_HCAT_REQUEST; return nghttp2_session_on_request_headers_received(session, frame); } if (stream->state == NGHTTP2_STREAM_RESERVED) { frame->headers.cat = NGHTTP2_HCAT_PUSH_RESPONSE; return nghttp2_session_on_push_response_headers_received(session, frame, stream); } if (stream->state == NGHTTP2_STREAM_OPENING && nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { frame->headers.cat = NGHTTP2_HCAT_RESPONSE; return nghttp2_session_on_response_headers_received(session, frame, stream); } frame->headers.cat = NGHTTP2_HCAT_HEADERS; return nghttp2_session_on_headers_received(session, frame, stream); } int nghttp2_session_on_priority_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PRIORITY: stream_id == 0"); } if (frame->priority.pri_spec.stream_id == frame->hd.stream_id) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "depend on itself"); } if (!session->server) { /* Re-prioritization works only in server */ return session_call_on_frame_received(session, frame); } stream = nghttp2_session_get_stream_raw(session, frame->hd.stream_id); if (!stream) { /* PRIORITY against idle stream can create anchor node in dependency tree. */ if (!session_detect_idle_stream(session, frame->hd.stream_id)) { return 0; } stream = nghttp2_session_open_stream( session, frame->hd.stream_id, NGHTTP2_STREAM_FLAG_NONE, &frame->priority.pri_spec, NGHTTP2_STREAM_IDLE, NULL); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } else { rv = nghttp2_session_reprioritize_stream(session, stream, &frame->priority.pri_spec); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_priority_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_priority_payload(&frame->priority, iframe->sbuf.pos); return nghttp2_session_on_priority_received(session, frame); } int nghttp2_session_on_rst_stream_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (frame->hd.stream_id == 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream_id == 0"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "RST_STREAM: stream in idle"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (stream) { /* We may use stream->shut_flags for strict error checking. */ nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); } rv = session_call_on_frame_received(session, frame); if (rv != 0) { return rv; } rv = nghttp2_session_close_stream(session, frame->hd.stream_id, frame->rst_stream.error_code); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } static int session_process_rst_stream_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_rst_stream_payload(&frame->rst_stream, iframe->sbuf.pos); return nghttp2_session_on_rst_stream_received(session, frame); } static int update_remote_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_remote_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* If window size gets positive, push deferred DATA frame to outbound queue. */ if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* * Updates the remote initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_remote_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = (int32_t)session->remote_settings.initial_window_size; return nghttp2_map_each(&session->streams, update_remote_initial_window_size_func, &arg); } static int update_local_initial_window_size_func(nghttp2_map_entry *entry, void *ptr) { int rv; nghttp2_update_window_size_arg *arg; nghttp2_stream *stream; arg = (nghttp2_update_window_size_arg *)ptr; stream = (nghttp2_stream *)entry; rv = nghttp2_stream_update_local_initial_window_size( stream, arg->new_window_size, arg->old_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(arg->session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(arg->session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(arg->session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } /* * Updates the local initial window size of all active streams. If * error occurs, all streams may not be updated. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_update_local_initial_window_size(nghttp2_session *session, int32_t new_initial_window_size, int32_t old_initial_window_size) { nghttp2_update_window_size_arg arg; arg.session = session; arg.new_window_size = new_initial_window_size; arg.old_window_size = old_initial_window_size; return nghttp2_map_each(&session->streams, update_local_initial_window_size_func, &arg); } /* * Apply SETTINGS values |iv| having |niv| elements to the local * settings. We assumes that all values in |iv| is correct, since we * validated them in nghttp2_session_add_settings() already. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_HEADER_COMP * The header table size is out of range * NGHTTP2_ERR_NOMEM * Out of memory */ int nghttp2_session_update_local_settings(nghttp2_session *session, nghttp2_settings_entry *iv, size_t niv) { int rv; size_t i; int32_t new_initial_window_size = -1; uint32_t header_table_size = 0; uint32_t min_header_table_size = UINT32_MAX; uint8_t header_table_size_seen = 0; /* For NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, use the value last seen. For NGHTTP2_SETTINGS_HEADER_TABLE_SIZE, use both minimum value and last seen value. */ for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: header_table_size_seen = 1; header_table_size = iv[i].value; min_header_table_size = nghttp2_min(min_header_table_size, iv[i].value); break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: new_initial_window_size = (int32_t)iv[i].value; break; } } if (header_table_size_seen) { if (min_header_table_size < header_table_size) { rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, min_header_table_size); if (rv != 0) { return rv; } } rv = nghttp2_hd_inflate_change_table_size(&session->hd_inflater, header_table_size); if (rv != 0) { return rv; } } if (new_initial_window_size != -1) { rv = session_update_local_initial_window_size( session, new_initial_window_size, (int32_t)session->local_settings.initial_window_size); if (rv != 0) { return rv; } } for (i = 0; i < niv; ++i) { switch (iv[i].settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: session->local_settings.header_table_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: session->local_settings.enable_push = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->local_settings.max_concurrent_streams = iv[i].value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: session->local_settings.initial_window_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: session->local_settings.max_frame_size = iv[i].value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->local_settings.max_header_list_size = iv[i].value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: session->local_settings.enable_connect_protocol = iv[i].value; break; } } return 0; } int nghttp2_session_on_settings_received(nghttp2_session *session, nghttp2_frame *frame, int noack) { int rv; size_t i; nghttp2_mem *mem; nghttp2_inflight_settings *settings; mem = &session->mem; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: stream_id != 0"); } if (frame->hd.flags & NGHTTP2_FLAG_ACK) { if (frame->settings.niv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FRAME_SIZE_ERROR, "SETTINGS: ACK and payload != 0"); } settings = session->inflight_settings_head; if (!settings) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: unexpected ACK"); } rv = nghttp2_session_update_local_settings(session, settings->iv, settings->niv); session->inflight_settings_head = settings->next; inflight_settings_del(settings, mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, rv, NULL); } return session_call_on_frame_received(session, frame); } if (!session->remote_settings_received) { session->remote_settings.max_concurrent_streams = NGHTTP2_DEFAULT_MAX_CONCURRENT_STREAMS; session->remote_settings_received = 1; } for (i = 0; i < frame->settings.niv; ++i) { nghttp2_settings_entry *entry = &frame->settings.iv[i]; switch (entry->settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: rv = nghttp2_hd_deflate_change_table_size(&session->hd_deflater, entry->value); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } else { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_HEADER_COMP, NULL); } } session->remote_settings.header_table_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_PUSH: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENBLE_PUSH"); } if (!session->server && entry->value != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to enable push"); } session->remote_settings.enable_push = entry->value; break; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: session->remote_settings.max_concurrent_streams = entry->value; break; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: /* Update the initial window size of the all active streams */ /* Check that initial_window_size < (1u << 31) */ if (entry->value > NGHTTP2_MAX_WINDOW_SIZE) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, "SETTINGS: too large SETTINGS_INITIAL_WINDOW_SIZE"); } rv = session_update_remote_initial_window_size(session, (int32_t)entry->value); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_settings.initial_window_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: if (entry->value < NGHTTP2_MAX_FRAME_SIZE_MIN || entry->value > NGHTTP2_MAX_FRAME_SIZE_MAX) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_MAX_FRAME_SIZE"); } session->remote_settings.max_frame_size = entry->value; break; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: session->remote_settings.max_header_list_size = entry->value; break; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: if (entry->value != 0 && entry->value != 1) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: invalid SETTINGS_ENABLE_CONNECT_PROTOCOL"); } if (!session->server && session->remote_settings.enable_connect_protocol && entry->value == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "SETTINGS: server attempted to disable " "SETTINGS_ENABLE_CONNECT_PROTOCOL"); } session->remote_settings.enable_connect_protocol = entry->value; break; } } if (!noack && !session_is_closing(session)) { rv = nghttp2_session_add_settings(session, NGHTTP2_FLAG_ACK, NULL, 0); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_INTERNAL, NULL); } } return session_call_on_frame_received(session, frame); } static int session_process_settings_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; size_t i; nghttp2_settings_entry min_header_size_entry; if (iframe->max_niv) { min_header_size_entry = iframe->iv[iframe->max_niv - 1]; if (min_header_size_entry.value < UINT32_MAX) { /* If we have less value, then we must have SETTINGS_HEADER_TABLE_SIZE in i < iframe->niv */ for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { break; } } assert(i < iframe->niv); if (min_header_size_entry.value != iframe->iv[i].value) { iframe->iv[iframe->niv++] = iframe->iv[i]; iframe->iv[i] = min_header_size_entry; } } } nghttp2_frame_unpack_settings_payload(&frame->settings, iframe->iv, iframe->niv); iframe->iv = NULL; iframe->niv = 0; iframe->max_niv = 0; return nghttp2_session_on_settings_received(session, frame, 0 /* ACK */); } int nghttp2_session_on_push_promise_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; nghttp2_stream *promised_stream; nghttp2_priority_spec pri_spec; if (frame->hd.stream_id == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream_id == 0"); } if (session->server || session->local_settings.enable_push == 0) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: push disabled"); } if (!nghttp2_session_is_my_stream_id(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid stream_id"); } if (!session_allow_incoming_new_stream(session)) { /* We just discard PUSH_PROMISE after GOAWAY was sent */ return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (!session_is_new_peer_stream_id(session, frame->push_promise.promised_stream_id)) { /* The spec says if an endpoint receives a PUSH_PROMISE with illegal stream ID is subject to a connection error of type PROTOCOL_ERROR. */ return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: invalid promised_stream_id"); } if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "PUSH_PROMISE: stream in idle"); } session->last_recv_stream_id = frame->push_promise.promised_stream_id; stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING || !session->pending_enable_push || session->num_incoming_reserved_streams >= session->max_incoming_reserved_streams) { /* Currently, client does not retain closed stream, so we don't check NGHTTP2_SHUT_RD condition here. */ rv = nghttp2_session_add_rst_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_CANCEL); if (rv != 0) { return rv; } return NGHTTP2_ERR_IGN_HEADER_BLOCK; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { return session_inflate_handle_invalid_connection( session, frame, NGHTTP2_ERR_STREAM_CLOSED, "PUSH_PROMISE: stream closed"); } nghttp2_priority_spec_init(&pri_spec, stream->stream_id, NGHTTP2_DEFAULT_WEIGHT, 0); promised_stream = nghttp2_session_open_stream( session, frame->push_promise.promised_stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_RESERVED, NULL); if (!promised_stream) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since we don't keep closed stream in client side */ session->last_proc_stream_id = session->last_recv_stream_id; rv = session_call_on_begin_headers(session, frame); if (rv != 0) { return rv; } return 0; } static int session_process_push_promise_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = nghttp2_frame_unpack_push_promise_payload(&frame->push_promise, iframe->sbuf.pos); if (rv != 0) { return nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: could not unpack"); } return nghttp2_session_on_push_promise_received(session, frame); } int nghttp2_session_on_ping_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "PING: stream_id != 0"); } if ((session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_PING_ACK) == 0 && (frame->hd.flags & NGHTTP2_FLAG_ACK) == 0 && !session_is_closing(session)) { /* Peer sent ping, so ping it back */ rv = nghttp2_session_add_ping(session, NGHTTP2_FLAG_ACK, frame->ping.opaque_data); if (rv != 0) { return rv; } } return session_call_on_frame_received(session, frame); } static int session_process_ping_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_ping_payload(&frame->ping, iframe->sbuf.pos); return nghttp2_session_on_ping_received(session, frame); } int nghttp2_session_on_goaway_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; if (frame->hd.stream_id != 0) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: stream_id != 0"); } /* Spec says Endpoints MUST NOT increase the value they send in the last stream identifier. */ if ((frame->goaway.last_stream_id > 0 && !nghttp2_session_is_my_stream_id(session, frame->goaway.last_stream_id)) || session->remote_last_stream_id < frame->goaway.last_stream_id) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "GOAWAY: invalid last_stream_id"); } session->goaway_flags |= NGHTTP2_GOAWAY_RECV; session->remote_last_stream_id = frame->goaway.last_stream_id; rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } return session_close_stream_on_goaway(session, frame->goaway.last_stream_id, 0); } static int session_process_goaway_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_goaway_payload(&frame->goaway, iframe->sbuf.pos, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_goaway_received(session, frame); } static int session_on_connection_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { /* Handle connection-level flow control */ if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < session->remote_window_size) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_FLOW_CONTROL, NULL); } session->remote_window_size += frame->window_update.window_size_increment; return session_call_on_frame_received(session, frame); } static int session_on_stream_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { int rv; nghttp2_stream *stream; if (session_detect_idle_stream(session, frame->hd.stream_id)) { return session_handle_invalid_connection(session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE to idle stream"); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (state_reserved_remote(session, stream)) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPADATE to reserved stream"); } if (frame->window_update.window_size_increment == 0) { return session_handle_invalid_connection( session, frame, NGHTTP2_ERR_PROTO, "WINDOW_UPDATE: window_size_increment == 0"); } if (NGHTTP2_MAX_WINDOW_SIZE - frame->window_update.window_size_increment < stream->remote_window_size) { return session_handle_invalid_stream(session, frame, NGHTTP2_ERR_FLOW_CONTROL); } stream->remote_window_size += frame->window_update.window_size_increment; if (stream->remote_window_size > 0 && nghttp2_stream_check_deferred_by_flow_control(stream)) { rv = nghttp2_stream_resume_deferred_item( stream, NGHTTP2_STREAM_FLAG_DEFERRED_FLOW_CONTROL); if (nghttp2_is_fatal(rv)) { return rv; } } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_window_update_received(nghttp2_session *session, nghttp2_frame *frame) { if (frame->hd.stream_id == 0) { return session_on_connection_window_update_received(session, frame); } else { return session_on_stream_window_update_received(session, frame); } } static int session_process_window_update_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_window_update_payload(&frame->window_update, iframe->sbuf.pos); return nghttp2_session_on_window_update_received(session, frame); } int nghttp2_session_on_altsvc_received(nghttp2_session *session, nghttp2_frame *frame) { nghttp2_ext_altsvc *altsvc; nghttp2_stream *stream; altsvc = frame->ext.payload; /* session->server case has been excluded */ if (frame->hd.stream_id == 0) { if (altsvc->origin_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } } else { if (altsvc->origin_len > 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream) { return 0; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return 0; } } if (altsvc->field_value_len == 0) { return session_call_on_invalid_frame_recv_callback(session, frame, NGHTTP2_ERR_PROTO); } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_origin_received(nghttp2_session *session, nghttp2_frame *frame) { return session_call_on_frame_received(session, frame); } static int session_process_altsvc_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_frame_unpack_altsvc_payload( &frame->ext, nghttp2_get_uint16(iframe->sbuf.pos), iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf)); /* nghttp2_frame_unpack_altsvc_payload steals buffer from iframe->lbuf */ nghttp2_buf_wrap_init(&iframe->lbuf, NULL, 0); return nghttp2_session_on_altsvc_received(session, frame); } static int session_process_origin_frame(nghttp2_session *session) { nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; nghttp2_mem *mem = &session->mem; int rv; rv = nghttp2_frame_unpack_origin_payload(&frame->ext, iframe->lbuf.pos, nghttp2_buf_len(&iframe->lbuf), mem); if (rv != 0) { if (nghttp2_is_fatal(rv)) { return rv; } /* Ignore ORIGIN frame which cannot be parsed. */ return 0; } return nghttp2_session_on_origin_received(session, frame); } static int session_process_extension_frame(nghttp2_session *session) { int rv; nghttp2_inbound_frame *iframe = &session->iframe; nghttp2_frame *frame = &iframe->frame; rv = session_call_unpack_extension_callback(session); if (nghttp2_is_fatal(rv)) { return rv; } /* This handles the case where rv == NGHTTP2_ERR_CANCEL as well */ if (rv != 0) { return 0; } return session_call_on_frame_received(session, frame); } int nghttp2_session_on_data_received(nghttp2_session *session, nghttp2_frame *frame) { int rv = 0; nghttp2_stream *stream; /* We don't call on_frame_recv_callback if stream has been closed already or being closed. */ stream = nghttp2_session_get_stream(session, frame->hd.stream_id); if (!stream || stream->state == NGHTTP2_STREAM_CLOSING) { /* This should be treated as stream error, but it results in lots of RST_STREAM. So just ignore frame against nonexistent stream for now. */ return 0; } if (session_enforce_http_messaging(session) && (frame->hd.flags & NGHTTP2_FLAG_END_STREAM)) { if (nghttp2_http_on_remote_end_stream(stream) != 0) { rv = nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); /* Don't call nghttp2_session_close_stream_if_shut_rdwr because RST_STREAM has been submitted. */ return 0; } } rv = session_call_on_frame_received(session, frame); if (nghttp2_is_fatal(rv)) { return rv; } if (frame->hd.flags & NGHTTP2_FLAG_END_STREAM) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); rv = nghttp2_session_close_stream_if_shut_rdwr(session, stream); if (nghttp2_is_fatal(rv)) { return rv; } } return 0; } /* For errors, this function only returns FATAL error. */ static int session_process_data_frame(nghttp2_session *session) { int rv; nghttp2_frame *public_data_frame = &session->iframe.frame; rv = nghttp2_session_on_data_received(session, public_data_frame); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } /* * Now we have SETTINGS synchronization, flow control error can be * detected strictly. If DATA frame is received with length > 0 and * current received window size + delta length is strictly larger than * local window size, it is subject to FLOW_CONTROL_ERROR, so return * -1. Note that local_window_size is calculated after SETTINGS ACK is * received from peer, so peer must honor this limit. If the resulting * recv_window_size is strictly larger than NGHTTP2_MAX_WINDOW_SIZE, * return -1 too. */ static int adjust_recv_window_size(int32_t *recv_window_size_ptr, size_t delta, int32_t local_window_size) { if (*recv_window_size_ptr > local_window_size - (int32_t)delta || *recv_window_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - (int32_t)delta) { return -1; } *recv_window_size_ptr += (int32_t)delta; return 0; } int nghttp2_session_update_recv_stream_window_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size, int send_window_update) { int rv; rv = adjust_recv_window_size(&stream->recv_window_size, delta_size, stream->local_window_size); if (rv != 0) { return nghttp2_session_add_rst_stream(session, stream->stream_id, NGHTTP2_FLOW_CONTROL_ERROR); } /* We don't have to send WINDOW_UPDATE if the data received is the last chunk in the incoming stream. */ /* We have to use local_settings here because it is the constraint the remote endpoint should honor. */ if (send_window_update && !(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && stream->window_update_queued == 0 && nghttp2_should_send_window_update(stream->local_window_size, stream->recv_window_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream->stream_id, stream->recv_window_size); if (rv != 0) { return rv; } stream->recv_window_size = 0; } return 0; } int nghttp2_session_update_recv_connection_window_size(nghttp2_session *session, size_t delta_size) { int rv; rv = adjust_recv_window_size(&session->recv_window_size, delta_size, session->local_window_size); if (rv != 0) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) && session->window_update_queued == 0 && nghttp2_should_send_window_update(session->local_window_size, session->recv_window_size)) { /* Use stream ID 0 to update connection-level flow control window */ rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, 0, session->recv_window_size); if (rv != 0) { return rv; } session->recv_window_size = 0; } return 0; } static int session_update_consumed_size(nghttp2_session *session, int32_t *consumed_size_ptr, int32_t *recv_window_size_ptr, uint8_t window_update_queued, int32_t stream_id, size_t delta_size, int32_t local_window_size) { int32_t recv_size; int rv; if ((size_t)*consumed_size_ptr > NGHTTP2_MAX_WINDOW_SIZE - delta_size) { return nghttp2_session_terminate_session(session, NGHTTP2_FLOW_CONTROL_ERROR); } *consumed_size_ptr += (int32_t)delta_size; if (window_update_queued == 0) { /* recv_window_size may be smaller than consumed_size, because it may be decreased by negative value with nghttp2_submit_window_update(). */ recv_size = nghttp2_min(*consumed_size_ptr, *recv_window_size_ptr); if (nghttp2_should_send_window_update(local_window_size, recv_size)) { rv = nghttp2_session_add_window_update(session, NGHTTP2_FLAG_NONE, stream_id, recv_size); if (rv != 0) { return rv; } *recv_window_size_ptr -= recv_size; *consumed_size_ptr -= recv_size; } } return 0; } static int session_update_stream_consumed_size(nghttp2_session *session, nghttp2_stream *stream, size_t delta_size) { return session_update_consumed_size( session, &stream->consumed_size, &stream->recv_window_size, stream->window_update_queued, stream->stream_id, delta_size, stream->local_window_size); } static int session_update_connection_consumed_size(nghttp2_session *session, size_t delta_size) { return session_update_consumed_size( session, &session->consumed_size, &session->recv_window_size, session->window_update_queued, 0, delta_size, session->local_window_size); } /* * Checks that we can receive the DATA frame for stream, which is * indicated by |session->iframe.frame.hd.stream_id|. If it is a * connection error situation, GOAWAY frame will be issued by this * function. * * If the DATA frame is allowed, returns 0. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGHTTP2_ERR_IGN_PAYLOAD * The reception of DATA frame is connection error; or should be * ignored. * NGHTTP2_ERR_NOMEM * Out of memory. */ static int session_on_data_received_fail_fast(nghttp2_session *session) { int rv; nghttp2_stream *stream; nghttp2_inbound_frame *iframe; int32_t stream_id; const char *failure_reason; uint32_t error_code = NGHTTP2_PROTOCOL_ERROR; iframe = &session->iframe; stream_id = iframe->frame.hd.stream_id; if (stream_id == 0) { /* The spec says that if a DATA frame is received whose stream ID is 0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. */ failure_reason = "DATA: stream_id == 0"; goto fail; } if (session_detect_idle_stream(session, stream_id)) { failure_reason = "DATA: stream in idle"; error_code = NGHTTP2_PROTOCOL_ERROR; goto fail; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream && (stream->shut_flags & NGHTTP2_SHUT_RD)) { failure_reason = "DATA: stream closed"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->shut_flags & NGHTTP2_SHUT_RD) { failure_reason = "DATA: stream in half-closed(remote)"; error_code = NGHTTP2_STREAM_CLOSED; goto fail; } if (nghttp2_session_is_my_stream_id(session, stream_id)) { if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } if (stream->state != NGHTTP2_STREAM_OPENED) { failure_reason = "DATA: stream not opened"; goto fail; } return 0; } if (stream->state == NGHTTP2_STREAM_RESERVED) { failure_reason = "DATA: stream in reserved"; goto fail; } if (stream->state == NGHTTP2_STREAM_CLOSING) { return NGHTTP2_ERR_IGN_PAYLOAD; } return 0; fail: rv = nghttp2_session_terminate_session_with_reason(session, error_code, failure_reason); if (nghttp2_is_fatal(rv)) { return rv; } return NGHTTP2_ERR_IGN_PAYLOAD; } static size_t inbound_frame_payload_readlen(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { return nghttp2_min((size_t)(last - in), iframe->payloadleft); } /* * Resets iframe->sbuf and advance its mark pointer by |left| bytes. */ static void inbound_frame_set_mark(nghttp2_inbound_frame *iframe, size_t left) { nghttp2_buf_reset(&iframe->sbuf); iframe->sbuf.mark += left; } static size_t inbound_frame_buf_read(nghttp2_inbound_frame *iframe, const uint8_t *in, const uint8_t *last) { size_t readlen; readlen = nghttp2_min((size_t)(last - in), nghttp2_buf_mark_avail(&iframe->sbuf)); iframe->sbuf.last = nghttp2_cpymem(iframe->sbuf.last, in, readlen); return readlen; } /* * Unpacks SETTINGS entry in iframe->sbuf. */ static void inbound_frame_set_settings_entry(nghttp2_inbound_frame *iframe) { nghttp2_settings_entry iv; nghttp2_settings_entry *min_header_table_size_entry; size_t i; nghttp2_frame_unpack_settings_entry(&iv, iframe->sbuf.pos); switch (iv.settings_id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: case NGHTTP2_SETTINGS_ENABLE_PUSH: case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: break; default: DEBUGF("recv: unknown settings id=0x%02x\n", iv.settings_id); iframe->iv[iframe->niv++] = iv; return; } for (i = 0; i < iframe->niv; ++i) { if (iframe->iv[i].settings_id == iv.settings_id) { iframe->iv[i] = iv; break; } } if (i == iframe->niv) { iframe->iv[iframe->niv++] = iv; } if (iv.settings_id == NGHTTP2_SETTINGS_HEADER_TABLE_SIZE) { /* Keep track of minimum value of SETTINGS_HEADER_TABLE_SIZE */ min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; if (iv.value < min_header_table_size_entry->value) { min_header_table_size_entry->value = iv.value; } } } /* * Checks PADDED flags and set iframe->sbuf to read them accordingly. * If padding is set, this function returns 1. If no padding is set, * this function returns 0. On error, returns -1. */ static int inbound_frame_handle_pad(nghttp2_inbound_frame *iframe, nghttp2_frame_hd *hd) { if (hd->flags & NGHTTP2_FLAG_PADDED) { if (hd->length < 1) { return -1; } inbound_frame_set_mark(iframe, 1); return 1; } DEBUGF("recv: no padding in payload\n"); return 0; } /* * Computes number of padding based on flags. This function returns * the calculated length if it succeeds, or -1. */ static ssize_t inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { size_t padlen; /* 1 for Pad Length field */ padlen = (size_t)(iframe->sbuf.pos[0] + 1); DEBUGF("recv: padlen=%zu\n", padlen); /* We cannot use iframe->frame.hd.length because of CONTINUATION */ if (padlen - 1 > iframe->payloadleft) { return -1; } iframe->padlen = padlen; return (ssize_t)padlen; } /* * This function returns the effective payload length in the data of * length |readlen| when the remaning payload is |payloadleft|. The * |payloadleft| does not include |readlen|. If padding was started * strictly before this data chunk, this function returns -1. */ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, size_t payloadleft, size_t readlen) { size_t trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); if (trail_padlen > payloadleft) { size_t padlen; padlen = trail_padlen - payloadleft; if (readlen < padlen) { return -1; } return (ssize_t)(readlen - padlen); } return (ssize_t)(readlen); } ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t inlen) { const uint8_t *first = in, *last = in + inlen; nghttp2_inbound_frame *iframe = &session->iframe; size_t readlen; ssize_t padlen; int rv; int busy = 0; nghttp2_frame_hd cont_hd; nghttp2_stream *stream; size_t pri_fieldlen; nghttp2_mem *mem; DEBUGF("recv: connection recv_window_size=%d, local_window=%d\n", session->recv_window_size, session->local_window_size); mem = &session->mem; /* We may have idle streams more than we expect (e.g., nghttp2_session_change_stream_priority() or nghttp2_session_create_idle_stream()). Adjust them here. */ rv = nghttp2_session_adjust_idle_stream(session); if (nghttp2_is_fatal(rv)) { return rv; } if (!nghttp2_session_want_read(session)) { return (ssize_t)inlen; } for (;;) { switch (iframe->state) { case NGHTTP2_IB_READ_CLIENT_MAGIC: readlen = nghttp2_min(inlen, iframe->payloadleft); if (memcmp(&NGHTTP2_CLIENT_MAGIC[NGHTTP2_CLIENT_MAGIC_LEN - iframe->payloadleft], in, readlen) != 0) { return NGHTTP2_ERR_BAD_CLIENT_MAGIC; } iframe->payloadleft -= readlen; in += readlen; if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); iframe->state = NGHTTP2_IB_READ_FIRST_SETTINGS; } break; case NGHTTP2_IB_READ_FIRST_SETTINGS: DEBUGF("recv: [IB_READ_FIRST_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } if (iframe->sbuf.pos[3] != NGHTTP2_SETTINGS || (iframe->sbuf.pos[4] & NGHTTP2_FLAG_ACK)) { rv = session_call_error_callback( session, NGHTTP2_ERR_SETTINGS_EXPECTED, "Remote peer returned unexpected data while we expected " "SETTINGS frame. Perhaps, peer does not support HTTP/2 " "properly."); if (nghttp2_is_fatal(rv)) { return rv; } rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "SETTINGS expected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->state = NGHTTP2_IB_READ_HEAD; /* Fall through */ case NGHTTP2_IB_READ_HEAD: { int on_begin_frame_called = 0; DEBUGF("recv: [IB_READ_HEAD]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&iframe->frame.hd, iframe->sbuf.pos); iframe->payloadleft = iframe->frame.hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", iframe->frame.hd.length, iframe->frame.hd.type, iframe->frame.hd.flags, iframe->frame.hd.stream_id); if (iframe->frame.hd.length > session->local_settings.max_frame_size) { DEBUGF("recv: length is too large %zu > %u\n", iframe->frame.hd.length, session->local_settings.max_frame_size); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_FRAME_SIZE_ERROR, "too large frame size"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } switch (iframe->frame.hd.type) { case NGHTTP2_DATA: { DEBUGF("recv: DATA\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_PADDED); /* Check stream is open. If it is not open or closing, ignore payload. */ busy = 1; rv = session_on_data_received_fail_fast(session); if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_IGN_PAYLOAD) { DEBUGF("recv: DATA not allowed stream_id=%d\n", iframe->frame.hd.stream_id); iframe->state = NGHTTP2_IB_IGN_DATA; break; } if (nghttp2_is_fatal(rv)) { return rv; } rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_PAD_DATA; break; } iframe->state = NGHTTP2_IB_READ_DATA; break; } case NGHTTP2_HEADERS: DEBUGF("recv: HEADERS\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED | NGHTTP2_FLAG_PRIORITY); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } /* Call on_begin_frame_callback here because session_process_headers_frame() may call on_begin_headers_callback */ rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } on_begin_frame_called = 1; rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: DEBUGF("recv: PRIORITY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != NGHTTP2_PRIORITY_SPECLEN) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, NGHTTP2_PRIORITY_SPECLEN); break; case NGHTTP2_RST_STREAM: case NGHTTP2_WINDOW_UPDATE: #ifdef DEBUGBUILD switch (iframe->frame.hd.type) { case NGHTTP2_RST_STREAM: DEBUGF("recv: RST_STREAM\n"); break; case NGHTTP2_WINDOW_UPDATE: DEBUGF("recv: WINDOW_UPDATE\n"); break; } #endif /* DEBUGBUILD */ iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft != 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_SETTINGS: DEBUGF("recv: SETTINGS\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if ((iframe->frame.hd.length % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) || ((iframe->frame.hd.flags & NGHTTP2_FLAG_ACK) && iframe->payloadleft > 0)) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_SETTINGS; if (iframe->payloadleft) { nghttp2_settings_entry *min_header_table_size_entry; /* We allocate iv with additional one entry, to store the minimum header table size. */ iframe->max_niv = iframe->frame.hd.length / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH + 1; if (iframe->max_niv - 1 > session->max_settings) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_ENHANCE_YOUR_CALM, "SETTINGS: too many setting entries"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->iv = nghttp2_mem_malloc(mem, sizeof(nghttp2_settings_entry) * iframe->max_niv); if (!iframe->iv) { return NGHTTP2_ERR_NOMEM; } min_header_table_size_entry = &iframe->iv[iframe->max_niv - 1]; min_header_table_size_entry->settings_id = NGHTTP2_SETTINGS_HEADER_TABLE_SIZE; min_header_table_size_entry->value = UINT32_MAX; inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } busy = 1; inbound_frame_set_mark(iframe, 0); break; case NGHTTP2_PUSH_PROMISE: DEBUGF("recv: PUSH_PROMISE\n"); iframe->frame.hd.flags &= (NGHTTP2_FLAG_END_HEADERS | NGHTTP2_FLAG_PADDED); rv = inbound_frame_handle_pad(iframe, &iframe->frame.hd); if (rv < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: insufficient padding space"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } if (rv == 1) { iframe->state = NGHTTP2_IB_READ_NBYTE; break; } if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; case NGHTTP2_PING: DEBUGF("recv: PING\n"); iframe->frame.hd.flags &= NGHTTP2_FLAG_ACK; if (iframe->payloadleft != 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_GOAWAY: DEBUGF("recv: GOAWAY\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft < 8) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 8); break; case NGHTTP2_CONTINUATION: DEBUGF("recv: unexpected CONTINUATION\n"); /* Receiving CONTINUATION in this state are subject to connection error of type PROTOCOL_ERROR */ rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "CONTINUATION: unexpected"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; default: DEBUGF("recv: extension frame\n"); if (check_ext_type_set(session->user_recv_ext_types, iframe->frame.hd.type)) { if (!session->callbacks.unpack_extension_callback) { /* Silently ignore unknown frame type. */ busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_EXTENSION_PAYLOAD; break; } else { switch (iframe->frame.hd.type) { case NGHTTP2_ALTSVC: if ((session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ALTSVC) == 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ALTSVC\n"); iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; iframe->frame.ext.payload = &iframe->ext_frame_payload.altsvc; if (session->server) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } if (iframe->payloadleft < 2) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } busy = 1; iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 2); break; case NGHTTP2_ORIGIN: if (!(session->builtin_recv_ext_types & NGHTTP2_TYPEMASK_ORIGIN)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } DEBUGF("recv: ORIGIN\n"); iframe->frame.ext.payload = &iframe->ext_frame_payload.origin; if (session->server || iframe->frame.hd.stream_id || (iframe->frame.hd.flags & 0xf0)) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } iframe->frame.hd.flags = NGHTTP2_FLAG_NONE; if (iframe->payloadleft) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->payloadleft); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->payloadleft); } else { busy = 1; } iframe->state = NGHTTP2_IB_READ_ORIGIN_PAYLOAD; break; default: busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } } if (!on_begin_frame_called) { switch (iframe->state) { case NGHTTP2_IB_IGN_HEADER_BLOCK: case NGHTTP2_IB_IGN_PAYLOAD: case NGHTTP2_IB_FRAME_SIZE_ERROR: case NGHTTP2_IB_IGN_DATA: case NGHTTP2_IB_IGN_ALL: break; default: rv = session_call_on_begin_frame(session, &iframe->frame.hd); if (nghttp2_is_fatal(rv)) { return rv; } } } break; } case NGHTTP2_IB_READ_NBYTE: DEBUGF("recv: [IB_READ_NBYTE]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zd\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { pri_fieldlen = nghttp2_frame_priority_len(iframe->frame.hd.flags); padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + pri_fieldlen > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "HEADERS: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.headers.padlen = (size_t)padlen; if (pri_fieldlen > 0) { if (iframe->payloadleft < pri_fieldlen) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, pri_fieldlen); break; } else { /* Truncate buffers used for padding spec */ inbound_frame_set_mark(iframe, 0); } } rv = session_process_headers_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PRIORITY: rv = session_process_priority_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_RST_STREAM: rv = session_process_rst_stream_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_PUSH_PROMISE: if (iframe->padlen == 0 && (iframe->frame.hd.flags & NGHTTP2_FLAG_PADDED)) { padlen = inbound_frame_compute_pad(iframe); if (padlen < 0 || (size_t)padlen + 4 /* promised stream id */ > 1 + iframe->payloadleft) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "PUSH_PROMISE: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.push_promise.padlen = (size_t)padlen; if (iframe->payloadleft < 4) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } iframe->state = NGHTTP2_IB_READ_NBYTE; inbound_frame_set_mark(iframe, 4); break; } rv = session_process_push_promise_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { rv = nghttp2_session_add_rst_stream( session, iframe->frame.push_promise.promised_stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } if (rv == NGHTTP2_ERR_IGN_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; break; case NGHTTP2_PING: rv = session_process_ping_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_GOAWAY: { size_t debuglen; /* 8 is Last-stream-ID + Error Code */ debuglen = iframe->frame.hd.length - 8; if (debuglen > 0) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, debuglen); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, debuglen); } busy = 1; iframe->state = NGHTTP2_IB_READ_GOAWAY_DEBUG; break; } case NGHTTP2_WINDOW_UPDATE: rv = session_process_window_update_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_ALTSVC: { size_t origin_len; origin_len = nghttp2_get_uint16(iframe->sbuf.pos); DEBUGF("recv: origin_len=%zu\n", origin_len); if (origin_len > iframe->payloadleft) { busy = 1; iframe->state = NGHTTP2_IB_FRAME_SIZE_ERROR; break; } if (iframe->frame.hd.length > 2) { iframe->raw_lbuf = nghttp2_mem_malloc(mem, iframe->frame.hd.length - 2); if (iframe->raw_lbuf == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_buf_wrap_init(&iframe->lbuf, iframe->raw_lbuf, iframe->frame.hd.length); } busy = 1; iframe->state = NGHTTP2_IB_READ_ALTSVC_PAYLOAD; break; } default: /* This is unknown frame */ session_inbound_frame_reset(session); break; } break; case NGHTTP2_IB_READ_HEADER_BLOCK: case NGHTTP2_IB_IGN_HEADER_BLOCK: { ssize_t data_readlen; size_t trail_padlen; int final; #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { DEBUGF("recv: [IB_READ_HEADER_BLOCK]\n"); } else { DEBUGF("recv: [IB_IGN_HEADER_BLOCK]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_payload_readlen(iframe, in, last); DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft - readlen); data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft - readlen, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); final = (iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) && iframe->payloadleft - (size_t)data_readlen == trail_padlen; if (data_readlen > 0 || (data_readlen == 0 && final)) { size_t hd_proclen = 0; DEBUGF("recv: block final=%d\n", final); rv = inflate_header_block(session, &iframe->frame, &hd_proclen, (uint8_t *)in, (size_t)data_readlen, final, iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (rv == NGHTTP2_ERR_PAUSE) { in += hd_proclen; iframe->payloadleft -= hd_proclen; return in - first; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { /* The application says no more headers. We decompress the rest of the header block but not invoke on_header_callback and on_frame_recv_callback. */ in += hd_proclen; iframe->payloadleft -= hd_proclen; /* Use promised stream ID for PUSH_PROMISE */ rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.type == NGHTTP2_PUSH_PROMISE ? iframe->frame.push_promise.promised_stream_id : iframe->frame.hd.stream_id, NGHTTP2_INTERNAL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; break; } in += readlen; iframe->payloadleft -= readlen; if (rv == NGHTTP2_ERR_HEADER_COMP) { /* GOAWAY is already issued */ if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); } else { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; } break; } } else { in += readlen; iframe->payloadleft -= readlen; } if (iframe->payloadleft) { break; } if ((iframe->frame.hd.flags & NGHTTP2_FLAG_END_HEADERS) == 0) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_HDLEN); iframe->padlen = 0; if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { iframe->state = NGHTTP2_IB_EXPECT_CONTINUATION; } else { iframe->state = NGHTTP2_IB_IGN_CONTINUATION; } } else { if (iframe->state == NGHTTP2_IB_READ_HEADER_BLOCK) { rv = session_after_header_block_received(session); if (nghttp2_is_fatal(rv)) { return rv; } } session_inbound_frame_reset(session); } break; } case NGHTTP2_IB_IGN_PAYLOAD: DEBUGF("recv: [IB_IGN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { break; } switch (iframe->frame.hd.type) { case NGHTTP2_HEADERS: case NGHTTP2_PUSH_PROMISE: case NGHTTP2_CONTINUATION: /* Mark inflater bad so that we won't perform further decoding */ session->hd_inflater.ctx.bad = 1; break; default: break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_FRAME_SIZE_ERROR: DEBUGF("recv: [IB_FRAME_SIZE_ERROR]\n"); rv = session_handle_frame_size_error(session); if (nghttp2_is_fatal(rv)) { return rv; } assert(iframe->state == NGHTTP2_IB_IGN_ALL); return (ssize_t)inlen; case NGHTTP2_IB_READ_SETTINGS: DEBUGF("recv: [IB_READ_SETTINGS]\n"); readlen = inbound_frame_buf_read(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { break; } if (readlen > 0) { inbound_frame_set_settings_entry(iframe); } if (iframe->payloadleft) { inbound_frame_set_mark(iframe, NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH); break; } rv = session_process_settings_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_GOAWAY_DEBUG: DEBUGF("recv: [IB_READ_GOAWAY_DEBUG]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_goaway_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_EXPECT_CONTINUATION: case NGHTTP2_IB_IGN_CONTINUATION: #ifdef DEBUGBUILD if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { fprintf(stderr, "recv: [IB_EXPECT_CONTINUATION]\n"); } else { fprintf(stderr, "recv: [IB_IGN_CONTINUATION]\n"); } #endif /* DEBUGBUILD */ readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } nghttp2_frame_unpack_frame_hd(&cont_hd, iframe->sbuf.pos); iframe->payloadleft = cont_hd.length; DEBUGF("recv: payloadlen=%zu, type=%u, flags=0x%02x, stream_id=%d\n", cont_hd.length, cont_hd.type, cont_hd.flags, cont_hd.stream_id); if (cont_hd.type != NGHTTP2_CONTINUATION || cont_hd.stream_id != iframe->frame.hd.stream_id) { DEBUGF("recv: expected stream_id=%d, type=%d, but got stream_id=%d, " "type=%u\n", iframe->frame.hd.stream_id, NGHTTP2_CONTINUATION, cont_hd.stream_id, cont_hd.type); rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "unexpected non-CONTINUATION frame or stream_id is invalid"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } /* CONTINUATION won't bear NGHTTP2_PADDED flag */ iframe->frame.hd.flags = (uint8_t)( iframe->frame.hd.flags | (cont_hd.flags & NGHTTP2_FLAG_END_HEADERS)); iframe->frame.hd.length += cont_hd.length; busy = 1; if (iframe->state == NGHTTP2_IB_EXPECT_CONTINUATION) { iframe->state = NGHTTP2_IB_READ_HEADER_BLOCK; rv = session_call_on_begin_frame(session, &cont_hd); if (nghttp2_is_fatal(rv)) { return rv; } } else { iframe->state = NGHTTP2_IB_IGN_HEADER_BLOCK; } break; case NGHTTP2_IB_READ_PAD_DATA: DEBUGF("recv: [IB_READ_PAD_DATA]\n"); readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; iframe->payloadleft -= readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zu\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { return in - first; } /* Pad Length field is subject to flow control */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } /* Pad Length field is consumed immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (stream) { rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } } busy = 1; padlen = inbound_frame_compute_pad(iframe); if (padlen < 0) { rv = nghttp2_session_terminate_session_with_reason( session, NGHTTP2_PROTOCOL_ERROR, "DATA: invalid padding"); if (nghttp2_is_fatal(rv)) { return rv; } return (ssize_t)inlen; } iframe->frame.data.padlen = (size_t)padlen; iframe->state = NGHTTP2_IB_READ_DATA; break; case NGHTTP2_IB_READ_DATA: stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); if (!stream) { busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } DEBUGF("recv: [IB_READ_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { ssize_t data_readlen; rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } rv = nghttp2_session_update_recv_stream_window_size( session, stream, readlen, iframe->payloadleft || (iframe->frame.hd.flags & NGHTTP2_FLAG_END_STREAM) == 0); if (nghttp2_is_fatal(rv)) { return rv; } data_readlen = inbound_frame_effective_readlen( iframe, iframe->payloadleft, readlen); if (data_readlen == -1) { /* everything is padding */ data_readlen = 0; } padlen = (ssize_t)readlen - data_readlen; if (padlen > 0) { /* Padding is considered as "consumed" immediately */ rv = nghttp2_session_consume(session, iframe->frame.hd.stream_id, (size_t)padlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } DEBUGF("recv: data_readlen=%zd\n", data_readlen); if (data_readlen > 0) { if (session_enforce_http_messaging(session)) { if (nghttp2_http_on_data_chunk(stream, (size_t)data_readlen) != 0) { if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Consume all data for connection immediately here */ rv = session_update_connection_consumed_size( session, (size_t)data_readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_DATA) { return (ssize_t)inlen; } } rv = nghttp2_session_add_rst_stream( session, iframe->frame.hd.stream_id, NGHTTP2_PROTOCOL_ERROR); if (nghttp2_is_fatal(rv)) { return rv; } busy = 1; iframe->state = NGHTTP2_IB_IGN_DATA; break; } } if (session->callbacks.on_data_chunk_recv_callback) { rv = session->callbacks.on_data_chunk_recv_callback( session, iframe->frame.hd.flags, iframe->frame.hd.stream_id, in - readlen, (size_t)data_readlen, session->user_data); if (rv == NGHTTP2_ERR_PAUSE) { return in - first; } if (nghttp2_is_fatal(rv)) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } } if (iframe->payloadleft) { break; } rv = session_process_data_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_DATA: DEBUGF("recv: [IB_IGN_DATA]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { /* Update connection-level flow control window for ignored DATA frame too */ rv = nghttp2_session_update_recv_connection_window_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { /* Ignored DATA is considered as "consumed" immediately. */ rv = session_update_connection_consumed_size(session, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } } } if (iframe->payloadleft) { break; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_IGN_ALL: return (ssize_t)inlen; case NGHTTP2_IB_READ_EXTENSION_PAYLOAD: DEBUGF("recv: [IB_READ_EXTENSION_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); iframe->payloadleft -= readlen; in += readlen; DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (readlen > 0) { rv = session_call_on_extension_chunk_recv_callback( session, in - readlen, readlen); if (nghttp2_is_fatal(rv)) { return rv; } if (rv != 0) { busy = 1; iframe->state = NGHTTP2_IB_IGN_PAYLOAD; break; } } if (iframe->payloadleft > 0) { break; } rv = session_process_extension_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ALTSVC_PAYLOAD: DEBUGF("recv: [IB_READ_ALTSVC_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_altsvc_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } session_inbound_frame_reset(session); break; case NGHTTP2_IB_READ_ORIGIN_PAYLOAD: DEBUGF("recv: [IB_READ_ORIGIN_PAYLOAD]\n"); readlen = inbound_frame_payload_readlen(iframe, in, last); if (readlen > 0) { iframe->lbuf.last = nghttp2_cpymem(iframe->lbuf.last, in, readlen); iframe->payloadleft -= readlen; in += readlen; } DEBUGF("recv: readlen=%zu, payloadleft=%zu\n", readlen, iframe->payloadleft); if (iframe->payloadleft) { assert(nghttp2_buf_avail(&iframe->lbuf) > 0); break; } rv = session_process_origin_frame(session); if (nghttp2_is_fatal(rv)) { return rv; } if (iframe->state == NGHTTP2_IB_IGN_ALL) { return (ssize_t)inlen; } session_inbound_frame_reset(session); break; } if (!busy && in == last) { break; } busy = 0; } assert(in == last); return in - first; } int nghttp2_session_recv(nghttp2_session *session) { uint8_t buf[NGHTTP2_INBOUND_BUFFER_LENGTH]; while (1) { ssize_t readlen; readlen = session_recv(session, buf, sizeof(buf)); if (readlen > 0) { ssize_t proclen = nghttp2_session_mem_recv(session, buf, (size_t)readlen); if (proclen < 0) { return (int)proclen; } assert(proclen == readlen); } else if (readlen == 0 || readlen == NGHTTP2_ERR_WOULDBLOCK) { return 0; } else if (readlen == NGHTTP2_ERR_EOF) { return NGHTTP2_ERR_EOF; } else if (readlen < 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } } } /* * Returns the number of active streams, which includes streams in * reserved state. */ static size_t session_get_num_active_streams(nghttp2_session *session) { return nghttp2_map_size(&session->streams) - session->num_closed_streams - session->num_idle_streams; } int nghttp2_session_want_read(nghttp2_session *session) { size_t num_active_streams; /* If this flag is set, we don't want to read. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } num_active_streams = session_get_num_active_streams(session); /* Unless termination GOAWAY is sent or received, we always want to read incoming frames. */ if (num_active_streams > 0) { return 1; } /* If there is no active streams and GOAWAY has been sent or received, we are done with this session. */ return (session->goaway_flags & (NGHTTP2_GOAWAY_SENT | NGHTTP2_GOAWAY_RECV)) == 0; } int nghttp2_session_want_write(nghttp2_session *session) { /* If these flag is set, we don't want to write any data. The application should drop the connection. */ if (session->goaway_flags & NGHTTP2_GOAWAY_TERM_SENT) { return 0; } /* * Unless termination GOAWAY is sent or received, we want to write * frames if there is pending ones. If pending frame is request/push * response HEADERS and concurrent stream limit is reached, we don't * want to write them. */ return session->aob.item || nghttp2_outbound_queue_top(&session->ob_urgent) || nghttp2_outbound_queue_top(&session->ob_reg) || (!nghttp2_pq_empty(&session->root.obq) && session->remote_window_size > 0) || (nghttp2_outbound_queue_top(&session->ob_syn) && !session_is_outgoing_concurrent_streams_max(session)); } int nghttp2_session_add_ping(nghttp2_session *session, uint8_t flags, const uint8_t *opaque_data) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; if ((flags & NGHTTP2_FLAG_ACK) && session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_ping_init(&frame->ping, flags, opaque_data); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_ping_free(&frame->ping); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } return 0; } int nghttp2_session_add_goaway(nghttp2_session *session, int32_t last_stream_id, uint32_t error_code, const uint8_t *opaque_data, size_t opaque_data_len, uint8_t aux_flags) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; uint8_t *opaque_data_copy = NULL; nghttp2_goaway_aux_data *aux_data; nghttp2_mem *mem; mem = &session->mem; if (nghttp2_session_is_my_stream_id(session, last_stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (opaque_data_len) { if (opaque_data_len + 8 > NGHTTP2_MAX_PAYLOADLEN) { return NGHTTP2_ERR_INVALID_ARGUMENT; } opaque_data_copy = nghttp2_mem_malloc(mem, opaque_data_len); if (opaque_data_copy == NULL) { return NGHTTP2_ERR_NOMEM; } memcpy(opaque_data_copy, opaque_data, opaque_data_len); } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { nghttp2_mem_free(mem, opaque_data_copy); return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; /* last_stream_id must not be increased from the value previously sent */ last_stream_id = nghttp2_min(last_stream_id, session->local_last_stream_id); nghttp2_frame_goaway_init(&frame->goaway, last_stream_id, error_code, opaque_data_copy, opaque_data_len); aux_data = &item->aux_data.goaway; aux_data->flags = aux_flags; rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_goaway_free(&frame->goaway, mem); nghttp2_mem_free(mem, item); return rv; } return 0; } int nghttp2_session_add_window_update(nghttp2_session *session, uint8_t flags, int32_t stream_id, int32_t window_size_increment) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_mem *mem; mem = &session->mem; item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_window_update_init(&frame->window_update, flags, stream_id, window_size_increment); rv = nghttp2_session_add_item(session, item); if (rv != 0) { nghttp2_frame_window_update_free(&frame->window_update); nghttp2_mem_free(mem, item); return rv; } return 0; } static void session_append_inflight_settings(nghttp2_session *session, nghttp2_inflight_settings *settings) { nghttp2_inflight_settings **i; for (i = &session->inflight_settings_head; *i; i = &(*i)->next) ; *i = settings; } int nghttp2_session_add_settings(nghttp2_session *session, uint8_t flags, const nghttp2_settings_entry *iv, size_t niv) { nghttp2_outbound_item *item; nghttp2_frame *frame; nghttp2_settings_entry *iv_copy; size_t i; int rv; nghttp2_mem *mem; nghttp2_inflight_settings *inflight_settings = NULL; mem = &session->mem; if (flags & NGHTTP2_FLAG_ACK) { if (niv != 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->obq_flood_counter_ >= session->max_outbound_ack) { return NGHTTP2_ERR_FLOODED; } } if (!nghttp2_iv_check(iv, niv)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } item = nghttp2_mem_malloc(mem, sizeof(nghttp2_outbound_item)); if (item == NULL) { return NGHTTP2_ERR_NOMEM; } if (niv > 0) { iv_copy = nghttp2_frame_iv_copy(iv, niv, mem); if (iv_copy == NULL) { nghttp2_mem_free(mem, item); return NGHTTP2_ERR_NOMEM; } } else { iv_copy = NULL; } if ((flags & NGHTTP2_FLAG_ACK) == 0) { rv = inflight_settings_new(&inflight_settings, iv, niv, mem); if (rv != 0) { assert(nghttp2_is_fatal(rv)); nghttp2_mem_free(mem, iv_copy); nghttp2_mem_free(mem, item); return rv; } } nghttp2_outbound_item_init(item); frame = &item->frame; nghttp2_frame_settings_init(&frame->settings, flags, iv_copy, niv); rv = nghttp2_session_add_item(session, item); if (rv != 0) { /* The only expected error is fatal one */ assert(nghttp2_is_fatal(rv)); inflight_settings_del(inflight_settings, mem); nghttp2_frame_settings_free(&frame->settings, mem); nghttp2_mem_free(mem, item); return rv; } if (flags & NGHTTP2_FLAG_ACK) { ++session->obq_flood_counter_; } else { session_append_inflight_settings(session, inflight_settings); } /* Extract NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS and ENABLE_PUSH here. We use it to refuse the incoming stream and PUSH_PROMISE with RST_STREAM. */ for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS) { session->pending_local_max_concurrent_stream = iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_PUSH) { session->pending_enable_push = (uint8_t)iv[i - 1].value; break; } } for (i = niv; i > 0; --i) { if (iv[i - 1].settings_id == NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL) { session->pending_enable_connect_protocol = (uint8_t)iv[i - 1].value; break; } } return 0; } int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, size_t datamax, nghttp2_frame *frame, nghttp2_data_aux_data *aux_data, nghttp2_stream *stream) { int rv; uint32_t data_flags; ssize_t payloadlen; ssize_t padded_payloadlen; nghttp2_buf *buf; size_t max_payloadlen; assert(bufs->head == bufs->cur); buf = &bufs->cur->buf; if (session->callbacks.read_length_callback) { payloadlen = session->callbacks.read_length_callback( session, frame->hd.type, stream->stream_id, session->remote_window_size, stream->remote_window_size, session->remote_settings.max_frame_size, session->user_data); DEBUGF("send: read_length_callback=%zd\n", payloadlen); payloadlen = nghttp2_session_enforce_flow_control_limits(session, stream, payloadlen); DEBUGF("send: read_length_callback after flow control=%zd\n", payloadlen); if (payloadlen <= 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; } if ((size_t)payloadlen > nghttp2_buf_avail(buf)) { /* Resize the current buffer(s). The reason why we do +1 for buffer size is for possible padding field. */ rv = nghttp2_bufs_realloc(&session->aob.framebufs, (size_t)(NGHTTP2_FRAME_HDLEN + 1 + payloadlen)); if (rv != 0) { DEBUGF("send: realloc buffer failed rv=%d", rv); /* If reallocation failed, old buffers are still in tact. So use safe limit. */ payloadlen = (ssize_t)datamax; DEBUGF("send: use safe limit payloadlen=%zd", payloadlen); } else { assert(&session->aob.framebufs == bufs); buf = &bufs->cur->buf; } } datamax = (size_t)payloadlen; } /* Current max DATA length is less then buffer chunk size */ assert(nghttp2_buf_avail(buf) >= datamax); data_flags = NGHTTP2_DATA_FLAG_NONE; payloadlen = aux_data->data_prd.read_callback( session, frame->hd.stream_id, buf->pos, datamax, &data_flags, &aux_data->data_prd.source, session->user_data); if (payloadlen == NGHTTP2_ERR_DEFERRED || payloadlen == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE || payloadlen == NGHTTP2_ERR_PAUSE) { DEBUGF("send: DATA postponed due to %s\n", nghttp2_strerror((int)payloadlen)); return (int)payloadlen; } if (payloadlen < 0 || datamax < (size_t)payloadlen) { /* This is the error code when callback is failed. */ return NGHTTP2_ERR_CALLBACK_FAILURE; } buf->last = buf->pos + payloadlen; buf->pos -= NGHTTP2_FRAME_HDLEN; /* Clear flags, because this may contain previous flags of previous DATA */ frame->hd.flags = NGHTTP2_FLAG_NONE; if (data_flags & NGHTTP2_DATA_FLAG_EOF) { aux_data->eof = 1; /* If NGHTTP2_DATA_FLAG_NO_END_STREAM is set, don't set NGHTTP2_FLAG_END_STREAM */ if ((aux_data->flags & NGHTTP2_FLAG_END_STREAM) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM) == 0) { frame->hd.flags |= NGHTTP2_FLAG_END_STREAM; } } if (data_flags & NGHTTP2_DATA_FLAG_NO_COPY) { if (session->callbacks.send_data_callback == NULL) { DEBUGF("NGHTTP2_DATA_FLAG_NO_COPY requires send_data_callback set\n"); return NGHTTP2_ERR_CALLBACK_FAILURE; } aux_data->no_copy = 1; } frame->hd.length = (size_t)payloadlen; frame->data.padlen = 0; max_payloadlen = nghttp2_min(datamax, frame->hd.length + NGHTTP2_MAX_PADLEN); padded_payloadlen = session_call_select_padding(session, frame, max_payloadlen); if (nghttp2_is_fatal((int)padded_payloadlen)) { return (int)padded_payloadlen; } frame->data.padlen = (size_t)(padded_payloadlen - payloadlen); nghttp2_frame_pack_frame_hd(buf->pos, &frame->hd); rv = nghttp2_frame_add_pad(bufs, &frame->hd, frame->data.padlen, aux_data->no_copy); if (rv != 0) { return rv; } reschedule_stream(stream); if (frame->hd.length == 0 && (data_flags & NGHTTP2_DATA_FLAG_EOF) && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM)) { /* DATA payload length is 0, and DATA frame does not bear END_STREAM. In this case, there is no point to send 0 length DATA frame. */ return NGHTTP2_ERR_CANCEL; } return 0; } void *nghttp2_session_get_stream_user_data(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { return stream->stream_user_data; } else { return NULL; } } int nghttp2_session_set_stream_user_data(nghttp2_session *session, int32_t stream_id, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame *frame; nghttp2_outbound_item *item; stream = nghttp2_session_get_stream(session, stream_id); if (stream) { stream->stream_user_data = stream_user_data; return 0; } if (session->server || !nghttp2_session_is_my_stream_id(session, stream_id) || !nghttp2_outbound_queue_top(&session->ob_syn)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } frame = &nghttp2_outbound_queue_top(&session->ob_syn)->frame; assert(frame->hd.type == NGHTTP2_HEADERS); if (frame->hd.stream_id > stream_id || (uint32_t)stream_id >= session->next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } for (item = session->ob_syn.head; item; item = item->qnext) { if (item->frame.hd.stream_id < stream_id) { continue; } if (item->frame.hd.stream_id > stream_id) { break; } item->aux_data.headers.stream_user_data = stream_user_data; return 0; } return NGHTTP2_ERR_INVALID_ARGUMENT; } int nghttp2_session_resume_data(nghttp2_session *session, int32_t stream_id) { int rv; nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL || !nghttp2_stream_check_deferred_item(stream)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } rv = nghttp2_stream_resume_deferred_item(stream, NGHTTP2_STREAM_FLAG_DEFERRED_USER); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } size_t nghttp2_session_get_outbound_queue_size(nghttp2_session *session) { return nghttp2_outbound_queue_size(&session->ob_urgent) + nghttp2_outbound_queue_size(&session->ob_reg) + nghttp2_outbound_queue_size(&session->ob_syn); /* TODO account for item attached to stream */ } int32_t nghttp2_session_get_stream_effective_recv_data_length(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->recv_window_size < 0 ? 0 : stream->recv_window_size; } int32_t nghttp2_session_get_stream_effective_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } return stream->local_window_size; } int32_t nghttp2_session_get_stream_local_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; int32_t size; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } size = stream->local_window_size - stream->recv_window_size; /* size could be negative if local endpoint reduced SETTINGS_INITIAL_WINDOW_SIZE */ if (size < 0) { return 0; } return size; } int32_t nghttp2_session_get_effective_recv_data_length(nghttp2_session *session) { return session->recv_window_size < 0 ? 0 : session->recv_window_size; } int32_t nghttp2_session_get_effective_local_window_size(nghttp2_session *session) { return session->local_window_size; } int32_t nghttp2_session_get_local_window_size(nghttp2_session *session) { return session->local_window_size - session->recv_window_size; } int32_t nghttp2_session_get_stream_remote_window_size(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (stream == NULL) { return -1; } /* stream->remote_window_size can be negative when SETTINGS_INITIAL_WINDOW_SIZE is changed. */ return nghttp2_max(0, stream->remote_window_size); } int32_t nghttp2_session_get_remote_window_size(nghttp2_session *session) { return session->remote_window_size; } uint32_t nghttp2_session_get_remote_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->remote_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->remote_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->remote_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->remote_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->remote_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->remote_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->remote_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } uint32_t nghttp2_session_get_local_settings(nghttp2_session *session, nghttp2_settings_id id) { switch (id) { case NGHTTP2_SETTINGS_HEADER_TABLE_SIZE: return session->local_settings.header_table_size; case NGHTTP2_SETTINGS_ENABLE_PUSH: return session->local_settings.enable_push; case NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS: return session->local_settings.max_concurrent_streams; case NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE: return session->local_settings.initial_window_size; case NGHTTP2_SETTINGS_MAX_FRAME_SIZE: return session->local_settings.max_frame_size; case NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE: return session->local_settings.max_header_list_size; case NGHTTP2_SETTINGS_ENABLE_CONNECT_PROTOCOL: return session->local_settings.enable_connect_protocol; } assert(0); abort(); /* if NDEBUG is set */ } static int nghttp2_session_upgrade_internal(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { nghttp2_stream *stream; nghttp2_frame frame; nghttp2_settings_entry *iv; size_t niv; int rv; nghttp2_priority_spec pri_spec; nghttp2_mem *mem; mem = &session->mem; if ((!session->server && session->next_stream_id != 1) || (session->server && session->last_recv_stream_id >= 1)) { return NGHTTP2_ERR_PROTO; } if (settings_payloadlen % NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH) { return NGHTTP2_ERR_INVALID_ARGUMENT; } /* SETTINGS frame contains too many settings */ if (settings_payloadlen / NGHTTP2_FRAME_SETTINGS_ENTRY_LENGTH > session->max_settings) { return NGHTTP2_ERR_TOO_MANY_SETTINGS; } rv = nghttp2_frame_unpack_settings_payload2(&iv, &niv, settings_payload, settings_payloadlen, mem); if (rv != 0) { return rv; } if (session->server) { nghttp2_frame_hd_init(&frame.hd, settings_payloadlen, NGHTTP2_SETTINGS, NGHTTP2_FLAG_NONE, 0); frame.settings.iv = iv; frame.settings.niv = niv; rv = nghttp2_session_on_settings_received(session, &frame, 1 /* No ACK */); } else { rv = nghttp2_submit_settings(session, NGHTTP2_FLAG_NONE, iv, niv); } nghttp2_mem_free(mem, iv); if (rv != 0) { return rv; } nghttp2_priority_spec_default_init(&pri_spec); stream = nghttp2_session_open_stream( session, 1, NGHTTP2_STREAM_FLAG_NONE, &pri_spec, NGHTTP2_STREAM_OPENING, session->server ? NULL : stream_user_data); if (stream == NULL) { return NGHTTP2_ERR_NOMEM; } /* We don't call nghttp2_session_adjust_closed_stream(), since this should be the first stream open. */ if (session->server) { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_RD); session->last_recv_stream_id = 1; session->last_proc_stream_id = 1; } else { nghttp2_stream_shutdown(stream, NGHTTP2_SHUT_WR); session->last_sent_stream_id = 1; session->next_stream_id += 2; } return 0; } int nghttp2_session_upgrade(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); /* We have no information about request header fields when Upgrade was happened. So we don't know the request method here. If request method is HEAD, we have a trouble because we may have nonzero content-length header field in response headers, and we will going to check it against the actual DATA frames, but we may get mismatch because HEAD response body must be empty. Because of this reason, nghttp2_session_upgrade() was deprecated in favor of nghttp2_session_upgrade2(), which has |head_request| parameter to indicate that request method is HEAD or not. */ stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_UPGRADE_WORKAROUND; return 0; } int nghttp2_session_upgrade2(nghttp2_session *session, const uint8_t *settings_payload, size_t settings_payloadlen, int head_request, void *stream_user_data) { int rv; nghttp2_stream *stream; rv = nghttp2_session_upgrade_internal(session, settings_payload, settings_payloadlen, stream_user_data); if (rv != 0) { return rv; } stream = nghttp2_session_get_stream(session, 1); assert(stream); if (head_request) { stream->http_flags |= NGHTTP2_HTTP_FLAG_METH_HEAD; } return 0; } int nghttp2_session_get_stream_local_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_WR) != 0; } int nghttp2_session_get_stream_remote_close(nghttp2_session *session, int32_t stream_id) { nghttp2_stream *stream; stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return -1; } return (stream->shut_flags & NGHTTP2_SHUT_RD) != 0; } int nghttp2_session_consume(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_connection(nghttp2_session *session, size_t size) { int rv; if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } rv = session_update_connection_consumed_size(session, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_consume_stream(nghttp2_session *session, int32_t stream_id, size_t size) { int rv; nghttp2_stream *stream; if (stream_id == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (!(session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE)) { return NGHTTP2_ERR_INVALID_STATE; } stream = nghttp2_session_get_stream(session, stream_id); if (!stream) { return 0; } rv = session_update_stream_consumed_size(session, stream, size); if (nghttp2_is_fatal(rv)) { return rv; } return 0; } int nghttp2_session_set_next_stream_id(nghttp2_session *session, int32_t next_stream_id) { if (next_stream_id <= 0 || session->next_stream_id > (uint32_t)next_stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } if (session->server) { if (next_stream_id % 2) { return NGHTTP2_ERR_INVALID_ARGUMENT; } } else if (next_stream_id % 2 == 0) { return NGHTTP2_ERR_INVALID_ARGUMENT; } session->next_stream_id = (uint32_t)next_stream_id; return 0; } uint32_t nghttp2_session_get_next_stream_id(nghttp2_session *session) { return session->next_stream_id; } int32_t nghttp2_session_get_last_proc_stream_id(nghttp2_session *session) { return session->last_proc_stream_id; } nghttp2_stream *nghttp2_session_find_stream(nghttp2_session *session, int32_t stream_id) { if (stream_id == 0) { return &session->root; } return nghttp2_session_get_stream_raw(session, stream_id); } nghttp2_stream *nghttp2_session_get_root_stream(nghttp2_session *session) { return &session->root; } int nghttp2_session_check_server_session(nghttp2_session *session) { return session->server; } int nghttp2_session_change_stream_priority( nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { int rv; nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (!stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); rv = nghttp2_session_reprioritize_stream(session, stream, &pri_spec_copy); if (nghttp2_is_fatal(rv)) { return rv; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } int nghttp2_session_create_idle_stream(nghttp2_session *session, int32_t stream_id, const nghttp2_priority_spec *pri_spec) { nghttp2_stream *stream; nghttp2_priority_spec pri_spec_copy; if (stream_id == 0 || stream_id == pri_spec->stream_id || !session_detect_idle_stream(session, stream_id)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } stream = nghttp2_session_get_stream_raw(session, stream_id); if (stream) { return NGHTTP2_ERR_INVALID_ARGUMENT; } pri_spec_copy = *pri_spec; nghttp2_priority_spec_normalize_weight(&pri_spec_copy); stream = nghttp2_session_open_stream(session, stream_id, NGHTTP2_STREAM_FLAG_NONE, &pri_spec_copy, NGHTTP2_STREAM_IDLE, NULL); if (!stream) { return NGHTTP2_ERR_NOMEM; } /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream in nghttp2_session_mem_send or nghttp2_session_mem_recv is called. */ return 0; } size_t nghttp2_session_get_hd_inflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_inflate_get_dynamic_table_size(&session->hd_inflater); } size_t nghttp2_session_get_hd_deflate_dynamic_table_size(nghttp2_session *session) { return nghttp2_hd_deflate_get_dynamic_table_size(&session->hd_deflater); } void nghttp2_session_set_user_data(nghttp2_session *session, void *user_data) { session->user_data = user_data; }
./CrossVul/dataset_final_sorted/CWE-707/c/good_3936_6
crossvul-cpp_data_bad_3936_8
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <string.h> #include <CUnit/Basic.h> /* include test cases' include files here */ #include "nghttp2_pq_test.h" #include "nghttp2_map_test.h" #include "nghttp2_queue_test.h" #include "nghttp2_session_test.h" #include "nghttp2_frame_test.h" #include "nghttp2_stream_test.h" #include "nghttp2_hd_test.h" #include "nghttp2_npn_test.h" #include "nghttp2_helper_test.h" #include "nghttp2_buf_test.h" extern int nghttp2_enable_strict_preface; static int init_suite1(void) { return 0; } static int clean_suite1(void) { return 0; } int main() { CU_pSuite pSuite = NULL; unsigned int num_tests_failed; nghttp2_enable_strict_preface = 0; /* initialize the CUnit test registry */ if (CUE_SUCCESS != CU_initialize_registry()) return (int)CU_get_error(); /* add a suite to the registry */ pSuite = CU_add_suite("libnghttp2_TestSuite", init_suite1, clean_suite1); if (NULL == pSuite) { CU_cleanup_registry(); return (int)CU_get_error(); } /* add the tests to the suite */ if (!CU_add_test(pSuite, "pq", test_nghttp2_pq) || !CU_add_test(pSuite, "pq_update", test_nghttp2_pq_update) || !CU_add_test(pSuite, "pq_remove", test_nghttp2_pq_remove) || !CU_add_test(pSuite, "map", test_nghttp2_map) || !CU_add_test(pSuite, "map_functional", test_nghttp2_map_functional) || !CU_add_test(pSuite, "map_each_free", test_nghttp2_map_each_free) || !CU_add_test(pSuite, "queue", test_nghttp2_queue) || !CU_add_test(pSuite, "npn", test_nghttp2_npn) || !CU_add_test(pSuite, "session_recv", test_nghttp2_session_recv) || !CU_add_test(pSuite, "session_recv_invalid_stream_id", test_nghttp2_session_recv_invalid_stream_id) || !CU_add_test(pSuite, "session_recv_invalid_frame", test_nghttp2_session_recv_invalid_frame) || !CU_add_test(pSuite, "session_recv_eof", test_nghttp2_session_recv_eof) || !CU_add_test(pSuite, "session_recv_data", test_nghttp2_session_recv_data) || !CU_add_test(pSuite, "session_recv_data_no_auto_flow_control", test_nghttp2_session_recv_data_no_auto_flow_control) || !CU_add_test(pSuite, "session_recv_continuation", test_nghttp2_session_recv_continuation) || !CU_add_test(pSuite, "session_recv_headers_with_priority", test_nghttp2_session_recv_headers_with_priority) || !CU_add_test(pSuite, "session_recv_headers_with_padding", test_nghttp2_session_recv_headers_with_padding) || !CU_add_test(pSuite, "session_recv_headers_early_response", test_nghttp2_session_recv_headers_early_response) || !CU_add_test(pSuite, "session_recv_headers_for_closed_stream", test_nghttp2_session_recv_headers_for_closed_stream) || !CU_add_test(pSuite, "session_server_recv_push_response", test_nghttp2_session_server_recv_push_response) || !CU_add_test(pSuite, "session_recv_premature_headers", test_nghttp2_session_recv_premature_headers) || !CU_add_test(pSuite, "session_recv_unknown_frame", test_nghttp2_session_recv_unknown_frame) || !CU_add_test(pSuite, "session_recv_unexpected_continuation", test_nghttp2_session_recv_unexpected_continuation) || !CU_add_test(pSuite, "session_recv_settings_header_table_size", test_nghttp2_session_recv_settings_header_table_size) || !CU_add_test(pSuite, "session_recv_too_large_frame_length", test_nghttp2_session_recv_too_large_frame_length) || !CU_add_test(pSuite, "session_recv_extension", test_nghttp2_session_recv_extension) || !CU_add_test(pSuite, "session_recv_altsvc", test_nghttp2_session_recv_altsvc) || !CU_add_test(pSuite, "session_recv_origin", test_nghttp2_session_recv_origin) || !CU_add_test(pSuite, "session_continue", test_nghttp2_session_continue) || !CU_add_test(pSuite, "session_add_frame", test_nghttp2_session_add_frame) || !CU_add_test(pSuite, "session_on_request_headers_received", test_nghttp2_session_on_request_headers_received) || !CU_add_test(pSuite, "session_on_response_headers_received", test_nghttp2_session_on_response_headers_received) || !CU_add_test(pSuite, "session_on_headers_received", test_nghttp2_session_on_headers_received) || !CU_add_test(pSuite, "session_on_push_response_headers_received", test_nghttp2_session_on_push_response_headers_received) || !CU_add_test(pSuite, "session_on_priority_received", test_nghttp2_session_on_priority_received) || !CU_add_test(pSuite, "session_on_rst_stream_received", test_nghttp2_session_on_rst_stream_received) || !CU_add_test(pSuite, "session_on_settings_received", test_nghttp2_session_on_settings_received) || !CU_add_test(pSuite, "session_on_push_promise_received", test_nghttp2_session_on_push_promise_received) || !CU_add_test(pSuite, "session_on_ping_received", test_nghttp2_session_on_ping_received) || !CU_add_test(pSuite, "session_on_goaway_received", test_nghttp2_session_on_goaway_received) || !CU_add_test(pSuite, "session_on_window_update_received", test_nghttp2_session_on_window_update_received) || !CU_add_test(pSuite, "session_on_data_received", test_nghttp2_session_on_data_received) || !CU_add_test(pSuite, "session_on_data_received_fail_fast", test_nghttp2_session_on_data_received_fail_fast) || !CU_add_test(pSuite, "session_on_altsvc_received", test_nghttp2_session_on_altsvc_received) || !CU_add_test(pSuite, "session_send_headers_start_stream", test_nghttp2_session_send_headers_start_stream) || !CU_add_test(pSuite, "session_send_headers_reply", test_nghttp2_session_send_headers_reply) || !CU_add_test(pSuite, "session_send_headers_frame_size_error", test_nghttp2_session_send_headers_frame_size_error) || !CU_add_test(pSuite, "session_send_headers_push_reply", test_nghttp2_session_send_headers_push_reply) || !CU_add_test(pSuite, "session_send_rst_stream", test_nghttp2_session_send_rst_stream) || !CU_add_test(pSuite, "session_send_push_promise", test_nghttp2_session_send_push_promise) || !CU_add_test(pSuite, "session_is_my_stream_id", test_nghttp2_session_is_my_stream_id) || !CU_add_test(pSuite, "session_upgrade2", test_nghttp2_session_upgrade2) || !CU_add_test(pSuite, "session_reprioritize_stream", test_nghttp2_session_reprioritize_stream) || !CU_add_test( pSuite, "session_reprioritize_stream_with_idle_stream_dep", test_nghttp2_session_reprioritize_stream_with_idle_stream_dep) || !CU_add_test(pSuite, "submit_data", test_nghttp2_submit_data) || !CU_add_test(pSuite, "submit_data_read_length_too_large", test_nghttp2_submit_data_read_length_too_large) || !CU_add_test(pSuite, "submit_data_read_length_smallest", test_nghttp2_submit_data_read_length_smallest) || !CU_add_test(pSuite, "submit_data_twice", test_nghttp2_submit_data_twice) || !CU_add_test(pSuite, "submit_request_with_data", test_nghttp2_submit_request_with_data) || !CU_add_test(pSuite, "submit_request_without_data", test_nghttp2_submit_request_without_data) || !CU_add_test(pSuite, "submit_response_with_data", test_nghttp2_submit_response_with_data) || !CU_add_test(pSuite, "submit_response_without_data", test_nghttp2_submit_response_without_data) || !CU_add_test(pSuite, "Submit_response_push_response", test_nghttp2_submit_response_push_response) || !CU_add_test(pSuite, "submit_trailer", test_nghttp2_submit_trailer) || !CU_add_test(pSuite, "submit_headers_start_stream", test_nghttp2_submit_headers_start_stream) || !CU_add_test(pSuite, "submit_headers_reply", test_nghttp2_submit_headers_reply) || !CU_add_test(pSuite, "submit_headers_push_reply", test_nghttp2_submit_headers_push_reply) || !CU_add_test(pSuite, "submit_headers", test_nghttp2_submit_headers) || !CU_add_test(pSuite, "submit_headers_continuation", test_nghttp2_submit_headers_continuation) || !CU_add_test(pSuite, "submit_headers_continuation_extra_large", test_nghttp2_submit_headers_continuation_extra_large) || !CU_add_test(pSuite, "submit_priority", test_nghttp2_submit_priority) || !CU_add_test(pSuite, "session_submit_settings", test_nghttp2_submit_settings) || !CU_add_test(pSuite, "session_submit_settings_update_local_window_size", test_nghttp2_submit_settings_update_local_window_size) || !CU_add_test(pSuite, "session_submit_settings_multiple_times", test_nghttp2_submit_settings_multiple_times) || !CU_add_test(pSuite, "session_submit_push_promise", test_nghttp2_submit_push_promise) || !CU_add_test(pSuite, "submit_window_update", test_nghttp2_submit_window_update) || !CU_add_test(pSuite, "submit_window_update_local_window_size", test_nghttp2_submit_window_update_local_window_size) || !CU_add_test(pSuite, "submit_shutdown_notice", test_nghttp2_submit_shutdown_notice) || !CU_add_test(pSuite, "submit_invalid_nv", test_nghttp2_submit_invalid_nv) || !CU_add_test(pSuite, "submit_extension", test_nghttp2_submit_extension) || !CU_add_test(pSuite, "submit_altsvc", test_nghttp2_submit_altsvc) || !CU_add_test(pSuite, "submit_origin", test_nghttp2_submit_origin) || !CU_add_test(pSuite, "session_open_stream", test_nghttp2_session_open_stream) || !CU_add_test(pSuite, "session_open_stream_with_idle_stream_dep", test_nghttp2_session_open_stream_with_idle_stream_dep) || !CU_add_test(pSuite, "session_get_next_ob_item", test_nghttp2_session_get_next_ob_item) || !CU_add_test(pSuite, "session_pop_next_ob_item", test_nghttp2_session_pop_next_ob_item) || !CU_add_test(pSuite, "session_reply_fail", test_nghttp2_session_reply_fail) || !CU_add_test(pSuite, "session_max_concurrent_streams", test_nghttp2_session_max_concurrent_streams) || !CU_add_test(pSuite, "session_stop_data_with_rst_stream", test_nghttp2_session_stop_data_with_rst_stream) || !CU_add_test(pSuite, "session_defer_data", test_nghttp2_session_defer_data) || !CU_add_test(pSuite, "session_flow_control", test_nghttp2_session_flow_control) || !CU_add_test(pSuite, "session_flow_control_data_recv", test_nghttp2_session_flow_control_data_recv) || !CU_add_test(pSuite, "session_flow_control_data_with_padding_recv", test_nghttp2_session_flow_control_data_with_padding_recv) || !CU_add_test(pSuite, "session_data_read_temporal_failure", test_nghttp2_session_data_read_temporal_failure) || !CU_add_test(pSuite, "session_on_stream_close", test_nghttp2_session_on_stream_close) || !CU_add_test(pSuite, "session_on_ctrl_not_send", test_nghttp2_session_on_ctrl_not_send) || !CU_add_test(pSuite, "session_get_outbound_queue_size", test_nghttp2_session_get_outbound_queue_size) || !CU_add_test(pSuite, "session_get_effective_local_window_size", test_nghttp2_session_get_effective_local_window_size) || !CU_add_test(pSuite, "session_set_option", test_nghttp2_session_set_option) || !CU_add_test(pSuite, "session_data_backoff_by_high_pri_frame", test_nghttp2_session_data_backoff_by_high_pri_frame) || !CU_add_test(pSuite, "session_pack_data_with_padding", test_nghttp2_session_pack_data_with_padding) || !CU_add_test(pSuite, "session_pack_headers_with_padding", test_nghttp2_session_pack_headers_with_padding) || !CU_add_test(pSuite, "pack_settings_payload", test_nghttp2_pack_settings_payload) || !CU_add_test(pSuite, "session_stream_dep_add", test_nghttp2_session_stream_dep_add) || !CU_add_test(pSuite, "session_stream_dep_remove", test_nghttp2_session_stream_dep_remove) || !CU_add_test(pSuite, "session_stream_dep_add_subtree", test_nghttp2_session_stream_dep_add_subtree) || !CU_add_test(pSuite, "session_stream_dep_remove_subtree", test_nghttp2_session_stream_dep_remove_subtree) || !CU_add_test( pSuite, "session_stream_dep_all_your_stream_are_belong_to_us", test_nghttp2_session_stream_dep_all_your_stream_are_belong_to_us) || !CU_add_test(pSuite, "session_stream_attach_item", test_nghttp2_session_stream_attach_item) || !CU_add_test(pSuite, "session_stream_attach_item_subtree", test_nghttp2_session_stream_attach_item_subtree) || !CU_add_test(pSuite, "session_stream_get_state", test_nghttp2_session_stream_get_state) || !CU_add_test(pSuite, "session_stream_get_something", test_nghttp2_session_stream_get_something) || !CU_add_test(pSuite, "session_find_stream", test_nghttp2_session_find_stream) || !CU_add_test(pSuite, "session_keep_closed_stream", test_nghttp2_session_keep_closed_stream) || !CU_add_test(pSuite, "session_keep_idle_stream", test_nghttp2_session_keep_idle_stream) || !CU_add_test(pSuite, "session_detach_idle_stream", test_nghttp2_session_detach_idle_stream) || !CU_add_test(pSuite, "session_large_dep_tree", test_nghttp2_session_large_dep_tree) || !CU_add_test(pSuite, "session_graceful_shutdown", test_nghttp2_session_graceful_shutdown) || !CU_add_test(pSuite, "session_on_header_temporal_failure", test_nghttp2_session_on_header_temporal_failure) || !CU_add_test(pSuite, "session_recv_client_magic", test_nghttp2_session_recv_client_magic) || !CU_add_test(pSuite, "session_delete_data_item", test_nghttp2_session_delete_data_item) || !CU_add_test(pSuite, "session_open_idle_stream", test_nghttp2_session_open_idle_stream) || !CU_add_test(pSuite, "session_cancel_reserved_remote", test_nghttp2_session_cancel_reserved_remote) || !CU_add_test(pSuite, "session_reset_pending_headers", test_nghttp2_session_reset_pending_headers) || !CU_add_test(pSuite, "session_send_data_callback", test_nghttp2_session_send_data_callback) || !CU_add_test(pSuite, "session_on_begin_headers_temporal_failure", test_nghttp2_session_on_begin_headers_temporal_failure) || !CU_add_test(pSuite, "session_defer_then_close", test_nghttp2_session_defer_then_close) || !CU_add_test(pSuite, "session_detach_item_from_closed_stream", test_nghttp2_session_detach_item_from_closed_stream) || !CU_add_test(pSuite, "session_flooding", test_nghttp2_session_flooding) || !CU_add_test(pSuite, "session_change_stream_priority", test_nghttp2_session_change_stream_priority) || !CU_add_test(pSuite, "session_create_idle_stream", test_nghttp2_session_create_idle_stream) || !CU_add_test(pSuite, "session_repeated_priority_change", test_nghttp2_session_repeated_priority_change) || !CU_add_test(pSuite, "session_repeated_priority_submission", test_nghttp2_session_repeated_priority_submission) || !CU_add_test(pSuite, "session_set_local_window_size", test_nghttp2_session_set_local_window_size) || !CU_add_test(pSuite, "session_cancel_from_before_frame_send", test_nghttp2_session_cancel_from_before_frame_send) || !CU_add_test(pSuite, "session_removed_closed_stream", test_nghttp2_session_removed_closed_stream) || !CU_add_test(pSuite, "session_pause_data", test_nghttp2_session_pause_data) || !CU_add_test(pSuite, "session_no_closed_streams", test_nghttp2_session_no_closed_streams) || !CU_add_test(pSuite, "session_set_stream_user_data", test_nghttp2_session_set_stream_user_data) || !CU_add_test(pSuite, "http_mandatory_headers", test_nghttp2_http_mandatory_headers) || !CU_add_test(pSuite, "http_content_length", test_nghttp2_http_content_length) || !CU_add_test(pSuite, "http_content_length_mismatch", test_nghttp2_http_content_length_mismatch) || !CU_add_test(pSuite, "http_non_final_response", test_nghttp2_http_non_final_response) || !CU_add_test(pSuite, "http_trailer_headers", test_nghttp2_http_trailer_headers) || !CU_add_test(pSuite, "http_ignore_regular_header", test_nghttp2_http_ignore_regular_header) || !CU_add_test(pSuite, "http_ignore_content_length", test_nghttp2_http_ignore_content_length) || !CU_add_test(pSuite, "http_record_request_method", test_nghttp2_http_record_request_method) || !CU_add_test(pSuite, "http_push_promise", test_nghttp2_http_push_promise) || !CU_add_test(pSuite, "http_head_method_upgrade_workaround", test_nghttp2_http_head_method_upgrade_workaround) || !CU_add_test(pSuite, "frame_pack_headers", test_nghttp2_frame_pack_headers) || !CU_add_test(pSuite, "frame_pack_headers_frame_too_large", test_nghttp2_frame_pack_headers_frame_too_large) || !CU_add_test(pSuite, "frame_pack_priority", test_nghttp2_frame_pack_priority) || !CU_add_test(pSuite, "frame_pack_rst_stream", test_nghttp2_frame_pack_rst_stream) || !CU_add_test(pSuite, "frame_pack_settings", test_nghttp2_frame_pack_settings) || !CU_add_test(pSuite, "frame_pack_push_promise", test_nghttp2_frame_pack_push_promise) || !CU_add_test(pSuite, "frame_pack_ping", test_nghttp2_frame_pack_ping) || !CU_add_test(pSuite, "frame_pack_goaway", test_nghttp2_frame_pack_goaway) || !CU_add_test(pSuite, "frame_pack_window_update", test_nghttp2_frame_pack_window_update) || !CU_add_test(pSuite, "frame_pack_altsvc", test_nghttp2_frame_pack_altsvc) || !CU_add_test(pSuite, "frame_pack_origin", test_nghttp2_frame_pack_origin) || !CU_add_test(pSuite, "nv_array_copy", test_nghttp2_nv_array_copy) || !CU_add_test(pSuite, "iv_check", test_nghttp2_iv_check) || !CU_add_test(pSuite, "hd_deflate", test_nghttp2_hd_deflate) || !CU_add_test(pSuite, "hd_deflate_same_indexed_repr", test_nghttp2_hd_deflate_same_indexed_repr) || !CU_add_test(pSuite, "hd_inflate_indexed", test_nghttp2_hd_inflate_indexed) || !CU_add_test(pSuite, "hd_inflate_indname_noinc", test_nghttp2_hd_inflate_indname_noinc) || !CU_add_test(pSuite, "hd_inflate_indname_inc", test_nghttp2_hd_inflate_indname_inc) || !CU_add_test(pSuite, "hd_inflate_indname_inc_eviction", test_nghttp2_hd_inflate_indname_inc_eviction) || !CU_add_test(pSuite, "hd_inflate_newname_noinc", test_nghttp2_hd_inflate_newname_noinc) || !CU_add_test(pSuite, "hd_inflate_newname_inc", test_nghttp2_hd_inflate_newname_inc) || !CU_add_test(pSuite, "hd_inflate_clearall_inc", test_nghttp2_hd_inflate_clearall_inc) || !CU_add_test(pSuite, "hd_inflate_zero_length_huffman", test_nghttp2_hd_inflate_zero_length_huffman) || !CU_add_test(pSuite, "hd_inflate_expect_table_size_update", test_nghttp2_hd_inflate_expect_table_size_update) || !CU_add_test(pSuite, "hd_inflate_unexpected_table_size_update", test_nghttp2_hd_inflate_unexpected_table_size_update) || !CU_add_test(pSuite, "hd_ringbuf_reserve", test_nghttp2_hd_ringbuf_reserve) || !CU_add_test(pSuite, "hd_change_table_size", test_nghttp2_hd_change_table_size) || !CU_add_test(pSuite, "hd_deflate_inflate", test_nghttp2_hd_deflate_inflate) || !CU_add_test(pSuite, "hd_no_index", test_nghttp2_hd_no_index) || !CU_add_test(pSuite, "hd_deflate_bound", test_nghttp2_hd_deflate_bound) || !CU_add_test(pSuite, "hd_public_api", test_nghttp2_hd_public_api) || !CU_add_test(pSuite, "hd_deflate_hd_vec", test_nghttp2_hd_deflate_hd_vec) || !CU_add_test(pSuite, "hd_decode_length", test_nghttp2_hd_decode_length) || !CU_add_test(pSuite, "hd_huff_encode", test_nghttp2_hd_huff_encode) || !CU_add_test(pSuite, "hd_huff_decode", test_nghttp2_hd_huff_decode) || !CU_add_test(pSuite, "adjust_local_window_size", test_nghttp2_adjust_local_window_size) || !CU_add_test(pSuite, "check_header_name", test_nghttp2_check_header_name) || !CU_add_test(pSuite, "check_header_value", test_nghttp2_check_header_value) || !CU_add_test(pSuite, "bufs_add", test_nghttp2_bufs_add) || !CU_add_test(pSuite, "bufs_add_stack_buffer_overflow_bug", test_nghttp2_bufs_add_stack_buffer_overflow_bug) || !CU_add_test(pSuite, "bufs_addb", test_nghttp2_bufs_addb) || !CU_add_test(pSuite, "bufs_orb", test_nghttp2_bufs_orb) || !CU_add_test(pSuite, "bufs_remove", test_nghttp2_bufs_remove) || !CU_add_test(pSuite, "bufs_reset", test_nghttp2_bufs_reset) || !CU_add_test(pSuite, "bufs_advance", test_nghttp2_bufs_advance) || !CU_add_test(pSuite, "bufs_next_present", test_nghttp2_bufs_next_present) || !CU_add_test(pSuite, "bufs_realloc", test_nghttp2_bufs_realloc)) { CU_cleanup_registry(); return (int)CU_get_error(); } /* Run all tests using the CUnit Basic interface */ CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); num_tests_failed = CU_get_number_of_tests_failed(); CU_cleanup_registry(); if (CU_get_error() == CUE_SUCCESS) { return (int)num_tests_failed; } else { printf("CUnit Error: %s\n", CU_get_error_msg()); return (int)CU_get_error(); } }
./CrossVul/dataset_final_sorted/CWE-707/c/bad_3936_8
crossvul-cpp_data_good_3936_8
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2012 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif /* HAVE_CONFIG_H */ #include <stdio.h> #include <string.h> #include <CUnit/Basic.h> /* include test cases' include files here */ #include "nghttp2_pq_test.h" #include "nghttp2_map_test.h" #include "nghttp2_queue_test.h" #include "nghttp2_session_test.h" #include "nghttp2_frame_test.h" #include "nghttp2_stream_test.h" #include "nghttp2_hd_test.h" #include "nghttp2_npn_test.h" #include "nghttp2_helper_test.h" #include "nghttp2_buf_test.h" extern int nghttp2_enable_strict_preface; static int init_suite1(void) { return 0; } static int clean_suite1(void) { return 0; } int main() { CU_pSuite pSuite = NULL; unsigned int num_tests_failed; nghttp2_enable_strict_preface = 0; /* initialize the CUnit test registry */ if (CUE_SUCCESS != CU_initialize_registry()) return (int)CU_get_error(); /* add a suite to the registry */ pSuite = CU_add_suite("libnghttp2_TestSuite", init_suite1, clean_suite1); if (NULL == pSuite) { CU_cleanup_registry(); return (int)CU_get_error(); } /* add the tests to the suite */ if (!CU_add_test(pSuite, "pq", test_nghttp2_pq) || !CU_add_test(pSuite, "pq_update", test_nghttp2_pq_update) || !CU_add_test(pSuite, "pq_remove", test_nghttp2_pq_remove) || !CU_add_test(pSuite, "map", test_nghttp2_map) || !CU_add_test(pSuite, "map_functional", test_nghttp2_map_functional) || !CU_add_test(pSuite, "map_each_free", test_nghttp2_map_each_free) || !CU_add_test(pSuite, "queue", test_nghttp2_queue) || !CU_add_test(pSuite, "npn", test_nghttp2_npn) || !CU_add_test(pSuite, "session_recv", test_nghttp2_session_recv) || !CU_add_test(pSuite, "session_recv_invalid_stream_id", test_nghttp2_session_recv_invalid_stream_id) || !CU_add_test(pSuite, "session_recv_invalid_frame", test_nghttp2_session_recv_invalid_frame) || !CU_add_test(pSuite, "session_recv_eof", test_nghttp2_session_recv_eof) || !CU_add_test(pSuite, "session_recv_data", test_nghttp2_session_recv_data) || !CU_add_test(pSuite, "session_recv_data_no_auto_flow_control", test_nghttp2_session_recv_data_no_auto_flow_control) || !CU_add_test(pSuite, "session_recv_continuation", test_nghttp2_session_recv_continuation) || !CU_add_test(pSuite, "session_recv_headers_with_priority", test_nghttp2_session_recv_headers_with_priority) || !CU_add_test(pSuite, "session_recv_headers_with_padding", test_nghttp2_session_recv_headers_with_padding) || !CU_add_test(pSuite, "session_recv_headers_early_response", test_nghttp2_session_recv_headers_early_response) || !CU_add_test(pSuite, "session_recv_headers_for_closed_stream", test_nghttp2_session_recv_headers_for_closed_stream) || !CU_add_test(pSuite, "session_server_recv_push_response", test_nghttp2_session_server_recv_push_response) || !CU_add_test(pSuite, "session_recv_premature_headers", test_nghttp2_session_recv_premature_headers) || !CU_add_test(pSuite, "session_recv_unknown_frame", test_nghttp2_session_recv_unknown_frame) || !CU_add_test(pSuite, "session_recv_unexpected_continuation", test_nghttp2_session_recv_unexpected_continuation) || !CU_add_test(pSuite, "session_recv_settings_header_table_size", test_nghttp2_session_recv_settings_header_table_size) || !CU_add_test(pSuite, "session_recv_too_large_frame_length", test_nghttp2_session_recv_too_large_frame_length) || !CU_add_test(pSuite, "session_recv_extension", test_nghttp2_session_recv_extension) || !CU_add_test(pSuite, "session_recv_altsvc", test_nghttp2_session_recv_altsvc) || !CU_add_test(pSuite, "session_recv_origin", test_nghttp2_session_recv_origin) || !CU_add_test(pSuite, "session_continue", test_nghttp2_session_continue) || !CU_add_test(pSuite, "session_add_frame", test_nghttp2_session_add_frame) || !CU_add_test(pSuite, "session_on_request_headers_received", test_nghttp2_session_on_request_headers_received) || !CU_add_test(pSuite, "session_on_response_headers_received", test_nghttp2_session_on_response_headers_received) || !CU_add_test(pSuite, "session_on_headers_received", test_nghttp2_session_on_headers_received) || !CU_add_test(pSuite, "session_on_push_response_headers_received", test_nghttp2_session_on_push_response_headers_received) || !CU_add_test(pSuite, "session_on_priority_received", test_nghttp2_session_on_priority_received) || !CU_add_test(pSuite, "session_on_rst_stream_received", test_nghttp2_session_on_rst_stream_received) || !CU_add_test(pSuite, "session_on_settings_received", test_nghttp2_session_on_settings_received) || !CU_add_test(pSuite, "session_on_push_promise_received", test_nghttp2_session_on_push_promise_received) || !CU_add_test(pSuite, "session_on_ping_received", test_nghttp2_session_on_ping_received) || !CU_add_test(pSuite, "session_on_goaway_received", test_nghttp2_session_on_goaway_received) || !CU_add_test(pSuite, "session_on_window_update_received", test_nghttp2_session_on_window_update_received) || !CU_add_test(pSuite, "session_on_data_received", test_nghttp2_session_on_data_received) || !CU_add_test(pSuite, "session_on_data_received_fail_fast", test_nghttp2_session_on_data_received_fail_fast) || !CU_add_test(pSuite, "session_on_altsvc_received", test_nghttp2_session_on_altsvc_received) || !CU_add_test(pSuite, "session_send_headers_start_stream", test_nghttp2_session_send_headers_start_stream) || !CU_add_test(pSuite, "session_send_headers_reply", test_nghttp2_session_send_headers_reply) || !CU_add_test(pSuite, "session_send_headers_frame_size_error", test_nghttp2_session_send_headers_frame_size_error) || !CU_add_test(pSuite, "session_send_headers_push_reply", test_nghttp2_session_send_headers_push_reply) || !CU_add_test(pSuite, "session_send_rst_stream", test_nghttp2_session_send_rst_stream) || !CU_add_test(pSuite, "session_send_push_promise", test_nghttp2_session_send_push_promise) || !CU_add_test(pSuite, "session_is_my_stream_id", test_nghttp2_session_is_my_stream_id) || !CU_add_test(pSuite, "session_upgrade2", test_nghttp2_session_upgrade2) || !CU_add_test(pSuite, "session_reprioritize_stream", test_nghttp2_session_reprioritize_stream) || !CU_add_test( pSuite, "session_reprioritize_stream_with_idle_stream_dep", test_nghttp2_session_reprioritize_stream_with_idle_stream_dep) || !CU_add_test(pSuite, "submit_data", test_nghttp2_submit_data) || !CU_add_test(pSuite, "submit_data_read_length_too_large", test_nghttp2_submit_data_read_length_too_large) || !CU_add_test(pSuite, "submit_data_read_length_smallest", test_nghttp2_submit_data_read_length_smallest) || !CU_add_test(pSuite, "submit_data_twice", test_nghttp2_submit_data_twice) || !CU_add_test(pSuite, "submit_request_with_data", test_nghttp2_submit_request_with_data) || !CU_add_test(pSuite, "submit_request_without_data", test_nghttp2_submit_request_without_data) || !CU_add_test(pSuite, "submit_response_with_data", test_nghttp2_submit_response_with_data) || !CU_add_test(pSuite, "submit_response_without_data", test_nghttp2_submit_response_without_data) || !CU_add_test(pSuite, "Submit_response_push_response", test_nghttp2_submit_response_push_response) || !CU_add_test(pSuite, "submit_trailer", test_nghttp2_submit_trailer) || !CU_add_test(pSuite, "submit_headers_start_stream", test_nghttp2_submit_headers_start_stream) || !CU_add_test(pSuite, "submit_headers_reply", test_nghttp2_submit_headers_reply) || !CU_add_test(pSuite, "submit_headers_push_reply", test_nghttp2_submit_headers_push_reply) || !CU_add_test(pSuite, "submit_headers", test_nghttp2_submit_headers) || !CU_add_test(pSuite, "submit_headers_continuation", test_nghttp2_submit_headers_continuation) || !CU_add_test(pSuite, "submit_headers_continuation_extra_large", test_nghttp2_submit_headers_continuation_extra_large) || !CU_add_test(pSuite, "submit_priority", test_nghttp2_submit_priority) || !CU_add_test(pSuite, "session_submit_settings", test_nghttp2_submit_settings) || !CU_add_test(pSuite, "session_submit_settings_update_local_window_size", test_nghttp2_submit_settings_update_local_window_size) || !CU_add_test(pSuite, "session_submit_settings_multiple_times", test_nghttp2_submit_settings_multiple_times) || !CU_add_test(pSuite, "session_submit_push_promise", test_nghttp2_submit_push_promise) || !CU_add_test(pSuite, "submit_window_update", test_nghttp2_submit_window_update) || !CU_add_test(pSuite, "submit_window_update_local_window_size", test_nghttp2_submit_window_update_local_window_size) || !CU_add_test(pSuite, "submit_shutdown_notice", test_nghttp2_submit_shutdown_notice) || !CU_add_test(pSuite, "submit_invalid_nv", test_nghttp2_submit_invalid_nv) || !CU_add_test(pSuite, "submit_extension", test_nghttp2_submit_extension) || !CU_add_test(pSuite, "submit_altsvc", test_nghttp2_submit_altsvc) || !CU_add_test(pSuite, "submit_origin", test_nghttp2_submit_origin) || !CU_add_test(pSuite, "session_open_stream", test_nghttp2_session_open_stream) || !CU_add_test(pSuite, "session_open_stream_with_idle_stream_dep", test_nghttp2_session_open_stream_with_idle_stream_dep) || !CU_add_test(pSuite, "session_get_next_ob_item", test_nghttp2_session_get_next_ob_item) || !CU_add_test(pSuite, "session_pop_next_ob_item", test_nghttp2_session_pop_next_ob_item) || !CU_add_test(pSuite, "session_reply_fail", test_nghttp2_session_reply_fail) || !CU_add_test(pSuite, "session_max_concurrent_streams", test_nghttp2_session_max_concurrent_streams) || !CU_add_test(pSuite, "session_stop_data_with_rst_stream", test_nghttp2_session_stop_data_with_rst_stream) || !CU_add_test(pSuite, "session_defer_data", test_nghttp2_session_defer_data) || !CU_add_test(pSuite, "session_flow_control", test_nghttp2_session_flow_control) || !CU_add_test(pSuite, "session_flow_control_data_recv", test_nghttp2_session_flow_control_data_recv) || !CU_add_test(pSuite, "session_flow_control_data_with_padding_recv", test_nghttp2_session_flow_control_data_with_padding_recv) || !CU_add_test(pSuite, "session_data_read_temporal_failure", test_nghttp2_session_data_read_temporal_failure) || !CU_add_test(pSuite, "session_on_stream_close", test_nghttp2_session_on_stream_close) || !CU_add_test(pSuite, "session_on_ctrl_not_send", test_nghttp2_session_on_ctrl_not_send) || !CU_add_test(pSuite, "session_get_outbound_queue_size", test_nghttp2_session_get_outbound_queue_size) || !CU_add_test(pSuite, "session_get_effective_local_window_size", test_nghttp2_session_get_effective_local_window_size) || !CU_add_test(pSuite, "session_set_option", test_nghttp2_session_set_option) || !CU_add_test(pSuite, "session_data_backoff_by_high_pri_frame", test_nghttp2_session_data_backoff_by_high_pri_frame) || !CU_add_test(pSuite, "session_pack_data_with_padding", test_nghttp2_session_pack_data_with_padding) || !CU_add_test(pSuite, "session_pack_headers_with_padding", test_nghttp2_session_pack_headers_with_padding) || !CU_add_test(pSuite, "pack_settings_payload", test_nghttp2_pack_settings_payload) || !CU_add_test(pSuite, "session_stream_dep_add", test_nghttp2_session_stream_dep_add) || !CU_add_test(pSuite, "session_stream_dep_remove", test_nghttp2_session_stream_dep_remove) || !CU_add_test(pSuite, "session_stream_dep_add_subtree", test_nghttp2_session_stream_dep_add_subtree) || !CU_add_test(pSuite, "session_stream_dep_remove_subtree", test_nghttp2_session_stream_dep_remove_subtree) || !CU_add_test( pSuite, "session_stream_dep_all_your_stream_are_belong_to_us", test_nghttp2_session_stream_dep_all_your_stream_are_belong_to_us) || !CU_add_test(pSuite, "session_stream_attach_item", test_nghttp2_session_stream_attach_item) || !CU_add_test(pSuite, "session_stream_attach_item_subtree", test_nghttp2_session_stream_attach_item_subtree) || !CU_add_test(pSuite, "session_stream_get_state", test_nghttp2_session_stream_get_state) || !CU_add_test(pSuite, "session_stream_get_something", test_nghttp2_session_stream_get_something) || !CU_add_test(pSuite, "session_find_stream", test_nghttp2_session_find_stream) || !CU_add_test(pSuite, "session_keep_closed_stream", test_nghttp2_session_keep_closed_stream) || !CU_add_test(pSuite, "session_keep_idle_stream", test_nghttp2_session_keep_idle_stream) || !CU_add_test(pSuite, "session_detach_idle_stream", test_nghttp2_session_detach_idle_stream) || !CU_add_test(pSuite, "session_large_dep_tree", test_nghttp2_session_large_dep_tree) || !CU_add_test(pSuite, "session_graceful_shutdown", test_nghttp2_session_graceful_shutdown) || !CU_add_test(pSuite, "session_on_header_temporal_failure", test_nghttp2_session_on_header_temporal_failure) || !CU_add_test(pSuite, "session_recv_client_magic", test_nghttp2_session_recv_client_magic) || !CU_add_test(pSuite, "session_delete_data_item", test_nghttp2_session_delete_data_item) || !CU_add_test(pSuite, "session_open_idle_stream", test_nghttp2_session_open_idle_stream) || !CU_add_test(pSuite, "session_cancel_reserved_remote", test_nghttp2_session_cancel_reserved_remote) || !CU_add_test(pSuite, "session_reset_pending_headers", test_nghttp2_session_reset_pending_headers) || !CU_add_test(pSuite, "session_send_data_callback", test_nghttp2_session_send_data_callback) || !CU_add_test(pSuite, "session_on_begin_headers_temporal_failure", test_nghttp2_session_on_begin_headers_temporal_failure) || !CU_add_test(pSuite, "session_defer_then_close", test_nghttp2_session_defer_then_close) || !CU_add_test(pSuite, "session_detach_item_from_closed_stream", test_nghttp2_session_detach_item_from_closed_stream) || !CU_add_test(pSuite, "session_flooding", test_nghttp2_session_flooding) || !CU_add_test(pSuite, "session_change_stream_priority", test_nghttp2_session_change_stream_priority) || !CU_add_test(pSuite, "session_create_idle_stream", test_nghttp2_session_create_idle_stream) || !CU_add_test(pSuite, "session_repeated_priority_change", test_nghttp2_session_repeated_priority_change) || !CU_add_test(pSuite, "session_repeated_priority_submission", test_nghttp2_session_repeated_priority_submission) || !CU_add_test(pSuite, "session_set_local_window_size", test_nghttp2_session_set_local_window_size) || !CU_add_test(pSuite, "session_cancel_from_before_frame_send", test_nghttp2_session_cancel_from_before_frame_send) || !CU_add_test(pSuite, "session_too_many_settings", test_nghttp2_session_too_many_settings) || !CU_add_test(pSuite, "session_removed_closed_stream", test_nghttp2_session_removed_closed_stream) || !CU_add_test(pSuite, "session_pause_data", test_nghttp2_session_pause_data) || !CU_add_test(pSuite, "session_no_closed_streams", test_nghttp2_session_no_closed_streams) || !CU_add_test(pSuite, "session_set_stream_user_data", test_nghttp2_session_set_stream_user_data) || !CU_add_test(pSuite, "http_mandatory_headers", test_nghttp2_http_mandatory_headers) || !CU_add_test(pSuite, "http_content_length", test_nghttp2_http_content_length) || !CU_add_test(pSuite, "http_content_length_mismatch", test_nghttp2_http_content_length_mismatch) || !CU_add_test(pSuite, "http_non_final_response", test_nghttp2_http_non_final_response) || !CU_add_test(pSuite, "http_trailer_headers", test_nghttp2_http_trailer_headers) || !CU_add_test(pSuite, "http_ignore_regular_header", test_nghttp2_http_ignore_regular_header) || !CU_add_test(pSuite, "http_ignore_content_length", test_nghttp2_http_ignore_content_length) || !CU_add_test(pSuite, "http_record_request_method", test_nghttp2_http_record_request_method) || !CU_add_test(pSuite, "http_push_promise", test_nghttp2_http_push_promise) || !CU_add_test(pSuite, "http_head_method_upgrade_workaround", test_nghttp2_http_head_method_upgrade_workaround) || !CU_add_test(pSuite, "frame_pack_headers", test_nghttp2_frame_pack_headers) || !CU_add_test(pSuite, "frame_pack_headers_frame_too_large", test_nghttp2_frame_pack_headers_frame_too_large) || !CU_add_test(pSuite, "frame_pack_priority", test_nghttp2_frame_pack_priority) || !CU_add_test(pSuite, "frame_pack_rst_stream", test_nghttp2_frame_pack_rst_stream) || !CU_add_test(pSuite, "frame_pack_settings", test_nghttp2_frame_pack_settings) || !CU_add_test(pSuite, "frame_pack_push_promise", test_nghttp2_frame_pack_push_promise) || !CU_add_test(pSuite, "frame_pack_ping", test_nghttp2_frame_pack_ping) || !CU_add_test(pSuite, "frame_pack_goaway", test_nghttp2_frame_pack_goaway) || !CU_add_test(pSuite, "frame_pack_window_update", test_nghttp2_frame_pack_window_update) || !CU_add_test(pSuite, "frame_pack_altsvc", test_nghttp2_frame_pack_altsvc) || !CU_add_test(pSuite, "frame_pack_origin", test_nghttp2_frame_pack_origin) || !CU_add_test(pSuite, "nv_array_copy", test_nghttp2_nv_array_copy) || !CU_add_test(pSuite, "iv_check", test_nghttp2_iv_check) || !CU_add_test(pSuite, "hd_deflate", test_nghttp2_hd_deflate) || !CU_add_test(pSuite, "hd_deflate_same_indexed_repr", test_nghttp2_hd_deflate_same_indexed_repr) || !CU_add_test(pSuite, "hd_inflate_indexed", test_nghttp2_hd_inflate_indexed) || !CU_add_test(pSuite, "hd_inflate_indname_noinc", test_nghttp2_hd_inflate_indname_noinc) || !CU_add_test(pSuite, "hd_inflate_indname_inc", test_nghttp2_hd_inflate_indname_inc) || !CU_add_test(pSuite, "hd_inflate_indname_inc_eviction", test_nghttp2_hd_inflate_indname_inc_eviction) || !CU_add_test(pSuite, "hd_inflate_newname_noinc", test_nghttp2_hd_inflate_newname_noinc) || !CU_add_test(pSuite, "hd_inflate_newname_inc", test_nghttp2_hd_inflate_newname_inc) || !CU_add_test(pSuite, "hd_inflate_clearall_inc", test_nghttp2_hd_inflate_clearall_inc) || !CU_add_test(pSuite, "hd_inflate_zero_length_huffman", test_nghttp2_hd_inflate_zero_length_huffman) || !CU_add_test(pSuite, "hd_inflate_expect_table_size_update", test_nghttp2_hd_inflate_expect_table_size_update) || !CU_add_test(pSuite, "hd_inflate_unexpected_table_size_update", test_nghttp2_hd_inflate_unexpected_table_size_update) || !CU_add_test(pSuite, "hd_ringbuf_reserve", test_nghttp2_hd_ringbuf_reserve) || !CU_add_test(pSuite, "hd_change_table_size", test_nghttp2_hd_change_table_size) || !CU_add_test(pSuite, "hd_deflate_inflate", test_nghttp2_hd_deflate_inflate) || !CU_add_test(pSuite, "hd_no_index", test_nghttp2_hd_no_index) || !CU_add_test(pSuite, "hd_deflate_bound", test_nghttp2_hd_deflate_bound) || !CU_add_test(pSuite, "hd_public_api", test_nghttp2_hd_public_api) || !CU_add_test(pSuite, "hd_deflate_hd_vec", test_nghttp2_hd_deflate_hd_vec) || !CU_add_test(pSuite, "hd_decode_length", test_nghttp2_hd_decode_length) || !CU_add_test(pSuite, "hd_huff_encode", test_nghttp2_hd_huff_encode) || !CU_add_test(pSuite, "hd_huff_decode", test_nghttp2_hd_huff_decode) || !CU_add_test(pSuite, "adjust_local_window_size", test_nghttp2_adjust_local_window_size) || !CU_add_test(pSuite, "check_header_name", test_nghttp2_check_header_name) || !CU_add_test(pSuite, "check_header_value", test_nghttp2_check_header_value) || !CU_add_test(pSuite, "bufs_add", test_nghttp2_bufs_add) || !CU_add_test(pSuite, "bufs_add_stack_buffer_overflow_bug", test_nghttp2_bufs_add_stack_buffer_overflow_bug) || !CU_add_test(pSuite, "bufs_addb", test_nghttp2_bufs_addb) || !CU_add_test(pSuite, "bufs_orb", test_nghttp2_bufs_orb) || !CU_add_test(pSuite, "bufs_remove", test_nghttp2_bufs_remove) || !CU_add_test(pSuite, "bufs_reset", test_nghttp2_bufs_reset) || !CU_add_test(pSuite, "bufs_advance", test_nghttp2_bufs_advance) || !CU_add_test(pSuite, "bufs_next_present", test_nghttp2_bufs_next_present) || !CU_add_test(pSuite, "bufs_realloc", test_nghttp2_bufs_realloc)) { CU_cleanup_registry(); return (int)CU_get_error(); } /* Run all tests using the CUnit Basic interface */ CU_basic_set_mode(CU_BRM_VERBOSE); CU_basic_run_tests(); num_tests_failed = CU_get_number_of_tests_failed(); CU_cleanup_registry(); if (CU_get_error() == CUE_SUCCESS) { return (int)num_tests_failed; } else { printf("CUnit Error: %s\n", CU_get_error_msg()); return (int)CU_get_error(); } }
./CrossVul/dataset_final_sorted/CWE-707/c/good_3936_8
crossvul-cpp_data_good_3936_4
/* * nghttp2 - HTTP/2 C Library * * Copyright (c) 2014 Tatsuhiro Tsujikawa * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_option.h" #include "nghttp2_session.h" int nghttp2_option_new(nghttp2_option **option_ptr) { *option_ptr = calloc(1, sizeof(nghttp2_option)); if (*option_ptr == NULL) { return NGHTTP2_ERR_NOMEM; } return 0; } void nghttp2_option_del(nghttp2_option *option) { free(option); } void nghttp2_option_set_no_auto_window_update(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE; option->no_auto_window_update = val; } void nghttp2_option_set_peer_max_concurrent_streams(nghttp2_option *option, uint32_t val) { option->opt_set_mask |= NGHTTP2_OPT_PEER_MAX_CONCURRENT_STREAMS; option->peer_max_concurrent_streams = val; } void nghttp2_option_set_no_recv_client_magic(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_RECV_CLIENT_MAGIC; option->no_recv_client_magic = val; } void nghttp2_option_set_no_http_messaging(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_HTTP_MESSAGING; option->no_http_messaging = val; } void nghttp2_option_set_max_reserved_remote_streams(nghttp2_option *option, uint32_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_RESERVED_REMOTE_STREAMS; option->max_reserved_remote_streams = val; } static void set_ext_type(uint8_t *ext_types, uint8_t type) { ext_types[type / 8] = (uint8_t)(ext_types[type / 8] | (1 << (type & 0x7))); } void nghttp2_option_set_user_recv_extension_type(nghttp2_option *option, uint8_t type) { if (type < 10) { return; } option->opt_set_mask |= NGHTTP2_OPT_USER_RECV_EXT_TYPES; set_ext_type(option->user_recv_ext_types, type); } void nghttp2_option_set_builtin_recv_extension_type(nghttp2_option *option, uint8_t type) { switch (type) { case NGHTTP2_ALTSVC: option->opt_set_mask |= NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES; option->builtin_recv_ext_types |= NGHTTP2_TYPEMASK_ALTSVC; return; case NGHTTP2_ORIGIN: option->opt_set_mask |= NGHTTP2_OPT_BUILTIN_RECV_EXT_TYPES; option->builtin_recv_ext_types |= NGHTTP2_TYPEMASK_ORIGIN; return; default: return; } } void nghttp2_option_set_no_auto_ping_ack(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_AUTO_PING_ACK; option->no_auto_ping_ack = val; } void nghttp2_option_set_max_send_header_block_length(nghttp2_option *option, size_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_SEND_HEADER_BLOCK_LENGTH; option->max_send_header_block_length = val; } void nghttp2_option_set_max_deflate_dynamic_table_size(nghttp2_option *option, size_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_DEFLATE_DYNAMIC_TABLE_SIZE; option->max_deflate_dynamic_table_size = val; } void nghttp2_option_set_no_closed_streams(nghttp2_option *option, int val) { option->opt_set_mask |= NGHTTP2_OPT_NO_CLOSED_STREAMS; option->no_closed_streams = val; } void nghttp2_option_set_max_outbound_ack(nghttp2_option *option, size_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_OUTBOUND_ACK; option->max_outbound_ack = val; } void nghttp2_option_set_max_settings(nghttp2_option *option, size_t val) { option->opt_set_mask |= NGHTTP2_OPT_MAX_SETTINGS; option->max_settings = val; }
./CrossVul/dataset_final_sorted/CWE-707/c/good_3936_4
crossvul-cpp_data_good_2491_1
/* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2016, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file rendservice.c * \brief The hidden-service side of rendezvous functionality. **/ #define RENDSERVICE_PRIVATE #include "or.h" #include "circpathbias.h" #include "circuitbuild.h" #include "circuitlist.h" #include "circuituse.h" #include "config.h" #include "control.h" #include "directory.h" #include "hs_common.h" #include "main.h" #include "networkstatus.h" #include "nodelist.h" #include "policies.h" #include "rendclient.h" #include "rendcommon.h" #include "rendservice.h" #include "router.h" #include "relay.h" #include "rephist.h" #include "replaycache.h" #include "routerlist.h" #include "routerparse.h" #include "routerset.h" struct rend_service_t; static origin_circuit_t *find_intro_circuit(rend_intro_point_t *intro, const char *pk_digest); static rend_intro_point_t *find_intro_point(origin_circuit_t *circ); static rend_intro_point_t *find_expiring_intro_point( struct rend_service_t *service, origin_circuit_t *circ); static extend_info_t *find_rp_for_intro( const rend_intro_cell_t *intro, char **err_msg_out); static int intro_point_accepted_intro_count(rend_intro_point_t *intro); static int intro_point_should_expire_now(rend_intro_point_t *intro, time_t now); static int rend_service_derive_key_digests(struct rend_service_t *s); static int rend_service_load_keys(struct rend_service_t *s); static int rend_service_load_auth_keys(struct rend_service_t *s, const char *hfname); static struct rend_service_t *rend_service_get_by_pk_digest( const char* digest); static struct rend_service_t *rend_service_get_by_service_id(const char *id); static const char *rend_service_escaped_dir( const struct rend_service_t *s); static ssize_t rend_service_parse_intro_for_v0_or_v1( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out); static ssize_t rend_service_parse_intro_for_v2( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out); static ssize_t rend_service_parse_intro_for_v3( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out); static int rend_service_check_private_dir(const or_options_t *options, const rend_service_t *s, int create); static int rend_service_check_private_dir_impl(const or_options_t *options, const rend_service_t *s, int create); static const smartlist_t* rend_get_service_list( const smartlist_t* substitute_service_list); static smartlist_t* rend_get_service_list_mutable( smartlist_t* substitute_service_list); /** Represents the mapping from a virtual port of a rendezvous service to * a real port on some IP. */ struct rend_service_port_config_s { /* The incoming HS virtual port we're mapping */ uint16_t virtual_port; /* Is this an AF_UNIX port? */ unsigned int is_unix_addr:1; /* The outgoing TCP port to use, if !is_unix_addr */ uint16_t real_port; /* The outgoing IPv4 or IPv6 address to use, if !is_unix_addr */ tor_addr_t real_addr; /* The socket path to connect to, if is_unix_addr */ char unix_addr[FLEXIBLE_ARRAY_MEMBER]; }; /** Try to maintain this many intro points per service by default. */ #define NUM_INTRO_POINTS_DEFAULT 3 /** Maximum number of intro points per service. */ #define NUM_INTRO_POINTS_MAX 10 /** Number of extra intro points we launch if our set of intro nodes is * empty. See proposal 155, section 4. */ #define NUM_INTRO_POINTS_EXTRA 2 /** If we can't build our intro circuits, don't retry for this long. */ #define INTRO_CIRC_RETRY_PERIOD (60*5) /** How many times will a hidden service operator attempt to connect to * a requested rendezvous point before giving up? */ #define MAX_REND_FAILURES 1 /** How many seconds should we spend trying to connect to a requested * rendezvous point before giving up? */ #define MAX_REND_TIMEOUT 30 /* Hidden service directory file names: * new file names should be added to rend_service_add_filenames_to_list() * for sandboxing purposes. */ static const char *private_key_fname = "private_key"; static const char *hostname_fname = "hostname"; static const char *client_keys_fname = "client_keys"; static const char *sos_poison_fname = "onion_service_non_anonymous"; /** A list of rend_service_t's for services run on this OP. */ static smartlist_t *rend_service_list = NULL; /* Like rend_get_service_list_mutable, but returns a read-only list. */ static const smartlist_t* rend_get_service_list(const smartlist_t* substitute_service_list) { /* It is safe to cast away the const here, because * rend_get_service_list_mutable does not actually modify the list */ return rend_get_service_list_mutable((smartlist_t*)substitute_service_list); } /* Return a mutable list of hidden services. * If substitute_service_list is not NULL, return it. * Otherwise, check if the global rend_service_list is non-NULL, and if so, * return it. * Otherwise, log a BUG message and return NULL. * */ static smartlist_t* rend_get_service_list_mutable(smartlist_t* substitute_service_list) { if (substitute_service_list) { return substitute_service_list; } /* If no special service list is provided, then just use the global one. */ if (BUG(!rend_service_list)) { /* No global HS list, which is a programmer error. */ return NULL; } return rend_service_list; } /** Tells if onion service <b>s</b> is ephemeral. */ static unsigned int rend_service_is_ephemeral(const struct rend_service_t *s) { return (s->directory == NULL); } /** Returns a escaped string representation of the service, <b>s</b>. */ static const char * rend_service_escaped_dir(const struct rend_service_t *s) { return rend_service_is_ephemeral(s) ? "[EPHEMERAL]" : escaped(s->directory); } /** Return the number of rendezvous services we have configured. */ int num_rend_services(void) { if (!rend_service_list) return 0; return smartlist_len(rend_service_list); } /** Helper: free storage held by a single service authorized client entry. */ void rend_authorized_client_free(rend_authorized_client_t *client) { if (!client) return; if (client->client_key) crypto_pk_free(client->client_key); if (client->client_name) memwipe(client->client_name, 0, strlen(client->client_name)); tor_free(client->client_name); memwipe(client->descriptor_cookie, 0, sizeof(client->descriptor_cookie)); tor_free(client); } /** Helper for strmap_free. */ static void rend_authorized_client_strmap_item_free(void *authorized_client) { rend_authorized_client_free(authorized_client); } /** Release the storage held by <b>service</b>. */ STATIC void rend_service_free(rend_service_t *service) { if (!service) return; tor_free(service->directory); if (service->ports) { SMARTLIST_FOREACH(service->ports, rend_service_port_config_t*, p, rend_service_port_config_free(p)); smartlist_free(service->ports); } if (service->private_key) crypto_pk_free(service->private_key); if (service->intro_nodes) { SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro, rend_intro_point_free(intro);); smartlist_free(service->intro_nodes); } if (service->expiring_nodes) { SMARTLIST_FOREACH(service->expiring_nodes, rend_intro_point_t *, intro, rend_intro_point_free(intro);); smartlist_free(service->expiring_nodes); } rend_service_descriptor_free(service->desc); if (service->clients) { SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, c, rend_authorized_client_free(c);); smartlist_free(service->clients); } if (service->accepted_intro_dh_parts) { replaycache_free(service->accepted_intro_dh_parts); } tor_free(service); } /** Release all the storage held in rend_service_list. */ void rend_service_free_all(void) { if (!rend_service_list) return; SMARTLIST_FOREACH(rend_service_list, rend_service_t*, ptr, rend_service_free(ptr)); smartlist_free(rend_service_list); rend_service_list = NULL; } /** Validate <b>service</b> and add it to <b>service_list</b>, or to * the global rend_service_list if <b>service_list</b> is NULL. * Return 0 on success. On failure, free <b>service</b> and return -1. * Takes ownership of <b>service</b>. */ static int rend_add_service(smartlist_t *service_list, rend_service_t *service) { int i; rend_service_port_config_t *p; tor_assert(service); smartlist_t *s_list = rend_get_service_list_mutable(service_list); /* We must have a service list, even if it's a temporary one, so we can * check for duplicate services */ if (BUG(!s_list)) { return -1; } service->intro_nodes = smartlist_new(); service->expiring_nodes = smartlist_new(); if (service->max_streams_per_circuit < 0) { log_warn(LD_CONFIG, "Hidden service (%s) configured with negative max " "streams per circuit.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } if (service->max_streams_close_circuit < 0 || service->max_streams_close_circuit > 1) { log_warn(LD_CONFIG, "Hidden service (%s) configured with invalid " "max streams handling.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } if (service->auth_type != REND_NO_AUTH && (!service->clients || smartlist_len(service->clients) == 0)) { log_warn(LD_CONFIG, "Hidden service (%s) with client authorization but no " "clients.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } if (!service->ports || !smartlist_len(service->ports)) { log_warn(LD_CONFIG, "Hidden service (%s) with no ports configured.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } else { int dupe = 0; /* XXX This duplicate check has two problems: * * a) It's O(n^2), but the same comment from the bottom of * rend_config_services() should apply. * * b) We only compare directory paths as strings, so we can't * detect two distinct paths that specify the same directory * (which can arise from symlinks, case-insensitivity, bind * mounts, etc.). * * It also can't detect that two separate Tor instances are trying * to use the same HiddenServiceDir; for that, we would need a * lock file. But this is enough to detect a simple mistake that * at least one person has actually made. */ tor_assert(s_list); if (!rend_service_is_ephemeral(service)) { /* Skip dupe for ephemeral services. */ SMARTLIST_FOREACH(s_list, rend_service_t*, ptr, dupe = dupe || !strcmp(ptr->directory, service->directory)); if (dupe) { log_warn(LD_REND, "Another hidden service is already configured for " "directory %s.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } } log_debug(LD_REND,"Configuring service with directory %s", rend_service_escaped_dir(service)); for (i = 0; i < smartlist_len(service->ports); ++i) { p = smartlist_get(service->ports, i); if (!(p->is_unix_addr)) { log_debug(LD_REND, "Service maps port %d to %s", p->virtual_port, fmt_addrport(&p->real_addr, p->real_port)); } else { #ifdef HAVE_SYS_UN_H log_debug(LD_REND, "Service maps port %d to socket at \"%s\"", p->virtual_port, p->unix_addr); #else log_warn(LD_BUG, "Service maps port %d to an AF_UNIX socket, but we " "have no AF_UNIX support on this platform. This is " "probably a bug.", p->virtual_port); rend_service_free(service); return -1; #endif /* defined(HAVE_SYS_UN_H) */ } } /* The service passed all the checks */ tor_assert(s_list); smartlist_add(s_list, service); return 0; } /* NOTREACHED */ } /** Return a new rend_service_port_config_t with its path set to * <b>socket_path</b> or empty if <b>socket_path</b> is NULL */ static rend_service_port_config_t * rend_service_port_config_new(const char *socket_path) { if (!socket_path) return tor_malloc_zero(sizeof(rend_service_port_config_t) + 1); const size_t pathlen = strlen(socket_path) + 1; rend_service_port_config_t *conf = tor_malloc_zero(sizeof(rend_service_port_config_t) + pathlen); memcpy(conf->unix_addr, socket_path, pathlen); conf->is_unix_addr = 1; return conf; } /** Parses a virtual-port to real-port/socket mapping separated by * the provided separator and returns a new rend_service_port_config_t, * or NULL and an optional error string on failure. * * The format is: VirtualPort SEP (IP|RealPort|IP:RealPort|'socket':path)? * * IP defaults to 127.0.0.1; RealPort defaults to VirtualPort. */ rend_service_port_config_t * rend_service_parse_port_config(const char *string, const char *sep, char **err_msg_out) { smartlist_t *sl; int virtport; int realport = 0; uint16_t p; tor_addr_t addr; rend_service_port_config_t *result = NULL; unsigned int is_unix_addr = 0; const char *socket_path = NULL; char *err_msg = NULL; char *addrport = NULL; sl = smartlist_new(); smartlist_split_string(sl, string, sep, SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 2); if (smartlist_len(sl) < 1 || BUG(smartlist_len(sl) > 2)) { err_msg = tor_strdup("Bad syntax in hidden service port configuration."); goto err; } virtport = (int)tor_parse_long(smartlist_get(sl,0), 10, 1, 65535, NULL,NULL); if (!virtport) { tor_asprintf(&err_msg, "Missing or invalid port %s in hidden service " "port configuration", escaped(smartlist_get(sl,0))); goto err; } if (smartlist_len(sl) == 1) { /* No addr:port part; use default. */ realport = virtport; tor_addr_from_ipv4h(&addr, 0x7F000001u); /* 127.0.0.1 */ } else { int ret; const char *addrport_element = smartlist_get(sl,1); const char *rest = NULL; int is_unix; ret = port_cfg_line_extract_addrport(addrport_element, &addrport, &is_unix, &rest); if (ret < 0) { tor_asprintf(&err_msg, "Couldn't process address <%s> from hidden " "service configuration", addrport_element); goto err; } if (is_unix) { socket_path = addrport; is_unix_addr = 1; } else if (strchr(addrport, ':') || strchr(addrport, '.')) { /* else try it as an IP:port pair if it has a : or . in it */ if (tor_addr_port_lookup(addrport, &addr, &p)<0) { err_msg = tor_strdup("Unparseable address in hidden service port " "configuration."); goto err; } realport = p?p:virtport; } else { /* No addr:port, no addr -- must be port. */ realport = (int)tor_parse_long(addrport, 10, 1, 65535, NULL, NULL); if (!realport) { tor_asprintf(&err_msg, "Unparseable or out-of-range port %s in " "hidden service port configuration.", escaped(addrport)); goto err; } tor_addr_from_ipv4h(&addr, 0x7F000001u); /* Default to 127.0.0.1 */ } } /* Allow room for unix_addr */ result = rend_service_port_config_new(socket_path); result->virtual_port = virtport; result->is_unix_addr = is_unix_addr; if (!is_unix_addr) { result->real_port = realport; tor_addr_copy(&result->real_addr, &addr); result->unix_addr[0] = '\0'; } err: tor_free(addrport); if (err_msg_out != NULL) { *err_msg_out = err_msg; } else { tor_free(err_msg); } SMARTLIST_FOREACH(sl, char *, c, tor_free(c)); smartlist_free(sl); return result; } /** Release all storage held in a rend_service_port_config_t. */ void rend_service_port_config_free(rend_service_port_config_t *p) { tor_free(p); } /* Check the directory for <b>service</b>, and add the service to * <b>service_list</b>, or to the global list if <b>service_list</b> is NULL. * Only add the service to the list if <b>validate_only</b> is false. * If <b>validate_only</b> is true, free the service. * If <b>service</b> is NULL, ignore it, and return 0. * Returns 0 on success, and -1 on failure. * Takes ownership of <b>service</b>, either freeing it, or adding it to the * global service list. */ STATIC int rend_service_check_dir_and_add(smartlist_t *service_list, const or_options_t *options, rend_service_t *service, int validate_only) { if (!service) { /* It is ok for a service to be NULL, this means there are no services */ return 0; } if (rend_service_check_private_dir(options, service, !validate_only) < 0) { rend_service_free(service); return -1; } smartlist_t *s_list = rend_get_service_list_mutable(service_list); /* We must have a service list, even if it's a temporary one, so we can * check for duplicate services */ if (BUG(!s_list)) { return -1; } return rend_add_service(s_list, service); } /* If this is a reload and there were hidden services configured before, * keep the introduction points that are still needed and close the * other ones. */ STATIC void prune_services_on_reload(smartlist_t *old_service_list, smartlist_t *new_service_list) { origin_circuit_t *ocirc = NULL; smartlist_t *surviving_services = NULL; tor_assert(old_service_list); tor_assert(new_service_list); /* This contains all _existing_ services that survives the relaod that is * that haven't been removed from the configuration. The difference between * this list and the new service list is that the new list can possibly * contain newly configured service that have no introduction points opened * yet nor key material loaded or generated. */ surviving_services = smartlist_new(); /* Preserve the existing ephemeral services. * * This is the ephemeral service equivalent of the "Copy introduction * points to new services" block, except there's no copy required since * the service structure isn't regenerated. * * After this is done, all ephemeral services will be: * * Removed from old_service_list, so the equivalent non-ephemeral code * will not attempt to preserve them. * * Added to the new_service_list (that previously only had the * services listed in the configuration). * * Added to surviving_services, which is the list of services that * will NOT have their intro point closed. */ SMARTLIST_FOREACH_BEGIN(old_service_list, rend_service_t *, old) { if (rend_service_is_ephemeral(old)) { SMARTLIST_DEL_CURRENT(old_service_list, old); smartlist_add(surviving_services, old); smartlist_add(new_service_list, old); } } SMARTLIST_FOREACH_END(old); /* Copy introduction points to new services. This is O(n^2), but it's only * called on reconfigure, so it's ok performance wise. */ SMARTLIST_FOREACH_BEGIN(new_service_list, rend_service_t *, new) { SMARTLIST_FOREACH_BEGIN(old_service_list, rend_service_t *, old) { /* Skip ephemeral services as we only want to copy introduction points * from current services to newly configured one that already exists. * The same directory means it's the same service. */ if (rend_service_is_ephemeral(new) || rend_service_is_ephemeral(old) || strcmp(old->directory, new->directory)) { continue; } smartlist_add_all(new->intro_nodes, old->intro_nodes); smartlist_clear(old->intro_nodes); smartlist_add_all(new->expiring_nodes, old->expiring_nodes); smartlist_clear(old->expiring_nodes); /* This regular service will survive the closing IPs step after. */ smartlist_add(surviving_services, old); break; } SMARTLIST_FOREACH_END(old); } SMARTLIST_FOREACH_END(new); /* For every service introduction circuit we can find, see if we have a * matching surviving configured service. If not, close the circuit. */ while ((ocirc = circuit_get_next_service_intro_circ(ocirc))) { int keep_it = 0; tor_assert(ocirc->rend_data); SMARTLIST_FOREACH_BEGIN(surviving_services, const rend_service_t *, s) { if (rend_circuit_pk_digest_eq(ocirc, (uint8_t *) s->pk_digest)) { /* Keep this circuit as we have a matching configured service. */ keep_it = 1; break; } } SMARTLIST_FOREACH_END(s); if (keep_it) { continue; } log_info(LD_REND, "Closing intro point %s for service %s.", safe_str_client(extend_info_describe( ocirc->build_state->chosen_exit)), safe_str_client(rend_data_get_address(ocirc->rend_data))); /* Reason is FINISHED because service has been removed and thus the * circuit is considered old/uneeded. */ circuit_mark_for_close(TO_CIRCUIT(ocirc), END_CIRC_REASON_FINISHED); } smartlist_free(surviving_services); } /** Set up rend_service_list, based on the values of HiddenServiceDir and * HiddenServicePort in <b>options</b>. Return 0 on success and -1 on * failure. (If <b>validate_only</b> is set, parse, warn and return as * normal, but don't actually change the configured services.) */ int rend_config_services(const or_options_t *options, int validate_only) { config_line_t *line; rend_service_t *service = NULL; rend_service_port_config_t *portcfg; smartlist_t *old_service_list = NULL; smartlist_t *temp_service_list = NULL; int ok = 0; int rv = -1; /* Use a temporary service list, so that we can check the new services' * consistency with each other */ temp_service_list = smartlist_new(); for (line = options->RendConfigLines; line; line = line->next) { if (!strcasecmp(line->key, "HiddenServiceDir")) { /* register the service we just finished parsing * this code registers every service except the last one parsed, * which is registered below the loop */ if (rend_service_check_dir_and_add(temp_service_list, options, service, validate_only) < 0) { service = NULL; goto free_and_return; } service = tor_malloc_zero(sizeof(rend_service_t)); service->directory = tor_strdup(line->value); service->ports = smartlist_new(); service->intro_period_started = time(NULL); service->n_intro_points_wanted = NUM_INTRO_POINTS_DEFAULT; continue; } if (!service) { log_warn(LD_CONFIG, "%s with no preceding HiddenServiceDir directive", line->key); goto free_and_return; } if (!strcasecmp(line->key, "HiddenServicePort")) { char *err_msg = NULL; portcfg = rend_service_parse_port_config(line->value, " ", &err_msg); if (!portcfg) { if (err_msg) log_warn(LD_CONFIG, "%s", err_msg); tor_free(err_msg); goto free_and_return; } tor_assert(!err_msg); smartlist_add(service->ports, portcfg); } else if (!strcasecmp(line->key, "HiddenServiceAllowUnknownPorts")) { service->allow_unknown_ports = (int)tor_parse_long(line->value, 10, 0, 1, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceAllowUnknownPorts should be 0 or 1, not %s", line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceAllowUnknownPorts=%d for %s", (int)service->allow_unknown_ports, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceDirGroupReadable")) { service->dir_group_readable = (int)tor_parse_long(line->value, 10, 0, 1, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceDirGroupReadable should be 0 or 1, not %s", line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceDirGroupReadable=%d for %s", service->dir_group_readable, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceMaxStreams")) { service->max_streams_per_circuit = (int)tor_parse_long(line->value, 10, 0, 65535, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceMaxStreams should be between 0 and %d, not %s", 65535, line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceMaxStreams=%d for %s", service->max_streams_per_circuit, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceMaxStreamsCloseCircuit")) { service->max_streams_close_circuit = (int)tor_parse_long(line->value, 10, 0, 1, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceMaxStreamsCloseCircuit should be 0 or 1, " "not %s", line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceMaxStreamsCloseCircuit=%d for %s", (int)service->max_streams_close_circuit, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceNumIntroductionPoints")) { service->n_intro_points_wanted = (unsigned int) tor_parse_long(line->value, 10, 0, NUM_INTRO_POINTS_MAX, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceNumIntroductionPoints " "should be between %d and %d, not %s", 0, NUM_INTRO_POINTS_MAX, line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceNumIntroductionPoints=%d for %s", service->n_intro_points_wanted, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceAuthorizeClient")) { /* Parse auth type and comma-separated list of client names and add a * rend_authorized_client_t for each client to the service's list * of authorized clients. */ smartlist_t *type_names_split, *clients; const char *authname; int num_clients; if (service->auth_type != REND_NO_AUTH) { log_warn(LD_CONFIG, "Got multiple HiddenServiceAuthorizeClient " "lines for a single service."); goto free_and_return; } type_names_split = smartlist_new(); smartlist_split_string(type_names_split, line->value, " ", 0, 2); if (smartlist_len(type_names_split) < 1) { log_warn(LD_BUG, "HiddenServiceAuthorizeClient has no value. This " "should have been prevented when parsing the " "configuration."); goto free_and_return; } authname = smartlist_get(type_names_split, 0); if (!strcasecmp(authname, "basic")) { service->auth_type = REND_BASIC_AUTH; } else if (!strcasecmp(authname, "stealth")) { service->auth_type = REND_STEALTH_AUTH; } else { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains " "unrecognized auth-type '%s'. Only 'basic' or 'stealth' " "are recognized.", (char *) smartlist_get(type_names_split, 0)); SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp)); smartlist_free(type_names_split); goto free_and_return; } service->clients = smartlist_new(); if (smartlist_len(type_names_split) < 2) { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains " "auth-type '%s', but no client names.", service->auth_type == REND_BASIC_AUTH ? "basic" : "stealth"); SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp)); smartlist_free(type_names_split); continue; } clients = smartlist_new(); smartlist_split_string(clients, smartlist_get(type_names_split, 1), ",", SPLIT_SKIP_SPACE, 0); SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp)); smartlist_free(type_names_split); /* Remove duplicate client names. */ num_clients = smartlist_len(clients); smartlist_sort_strings(clients); smartlist_uniq_strings(clients); if (smartlist_len(clients) < num_clients) { log_info(LD_CONFIG, "HiddenServiceAuthorizeClient contains %d " "duplicate client name(s); removing.", num_clients - smartlist_len(clients)); num_clients = smartlist_len(clients); } SMARTLIST_FOREACH_BEGIN(clients, const char *, client_name) { rend_authorized_client_t *client; if (!rend_valid_client_name(client_name)) { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains an " "illegal client name: '%s'. Names must be " "between 1 and %d characters and contain " "only [A-Za-z0-9+_-].", client_name, REND_CLIENTNAME_MAX_LEN); SMARTLIST_FOREACH(clients, char *, cp, tor_free(cp)); smartlist_free(clients); goto free_and_return; } client = tor_malloc_zero(sizeof(rend_authorized_client_t)); client->client_name = tor_strdup(client_name); smartlist_add(service->clients, client); log_debug(LD_REND, "Adding client name '%s'", client_name); } SMARTLIST_FOREACH_END(client_name); SMARTLIST_FOREACH(clients, char *, cp, tor_free(cp)); smartlist_free(clients); /* Ensure maximum number of clients. */ if ((service->auth_type == REND_BASIC_AUTH && smartlist_len(service->clients) > 512) || (service->auth_type == REND_STEALTH_AUTH && smartlist_len(service->clients) > 16)) { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains %d " "client authorization entries, but only a " "maximum of %d entries is allowed for " "authorization type '%s'.", smartlist_len(service->clients), service->auth_type == REND_BASIC_AUTH ? 512 : 16, service->auth_type == REND_BASIC_AUTH ? "basic" : "stealth"); goto free_and_return; } } else { tor_assert(!strcasecmp(line->key, "HiddenServiceVersion")); if (strcmp(line->value, "2")) { log_warn(LD_CONFIG, "The only supported HiddenServiceVersion is 2."); goto free_and_return; } } } /* register the final service after we have finished parsing all services * this code only registers the last service, other services are registered * within the loop. It is ok for this service to be NULL, it is ignored. */ if (rend_service_check_dir_and_add(temp_service_list, options, service, validate_only) < 0) { service = NULL; goto free_and_return; } service = NULL; /* Free the newly added services if validating */ if (validate_only) { rv = 0; goto free_and_return; } /* Otherwise, use the newly added services as the new service list * Since we have now replaced the global service list, from this point on we * must succeed, or die trying. */ old_service_list = rend_service_list; rend_service_list = temp_service_list; temp_service_list = NULL; /* If this is a reload and there were hidden services configured before, * keep the introduction points that are still needed and close the * other ones. */ if (old_service_list && !validate_only) { prune_services_on_reload(old_service_list, rend_service_list); /* Every remaining service in the old list have been removed from the * configuration so clean them up safely. */ SMARTLIST_FOREACH(old_service_list, rend_service_t *, s, rend_service_free(s)); smartlist_free(old_service_list); } return 0; free_and_return: rend_service_free(service); SMARTLIST_FOREACH(temp_service_list, rend_service_t *, ptr, rend_service_free(ptr)); smartlist_free(temp_service_list); return rv; } /** Add the ephemeral service <b>pk</b>/<b>ports</b> if possible, using * client authorization <b>auth_type</b> and an optional list of * rend_authorized_client_t in <b>auth_clients</b>, with * <b>max_streams_per_circuit</b> streams allowed per rendezvous circuit, * and circuit closure on max streams being exceeded set by * <b>max_streams_close_circuit</b>. * * Ownership of pk, ports, and auth_clients is passed to this routine. * Regardless of success/failure, callers should not touch these values * after calling this routine, and may assume that correct cleanup has * been done on failure. * * Return an appropriate rend_service_add_ephemeral_status_t. */ rend_service_add_ephemeral_status_t rend_service_add_ephemeral(crypto_pk_t *pk, smartlist_t *ports, int max_streams_per_circuit, int max_streams_close_circuit, rend_auth_type_t auth_type, smartlist_t *auth_clients, char **service_id_out) { *service_id_out = NULL; /* Allocate the service structure, and initialize the key, and key derived * parameters. */ rend_service_t *s = tor_malloc_zero(sizeof(rend_service_t)); s->directory = NULL; /* This indicates the service is ephemeral. */ s->private_key = pk; s->auth_type = auth_type; s->clients = auth_clients; s->ports = ports; s->intro_period_started = time(NULL); s->n_intro_points_wanted = NUM_INTRO_POINTS_DEFAULT; s->max_streams_per_circuit = max_streams_per_circuit; s->max_streams_close_circuit = max_streams_close_circuit; if (rend_service_derive_key_digests(s) < 0) { rend_service_free(s); return RSAE_BADPRIVKEY; } if (!s->ports || smartlist_len(s->ports) == 0) { log_warn(LD_CONFIG, "At least one VIRTPORT/TARGET must be specified."); rend_service_free(s); return RSAE_BADVIRTPORT; } if (s->auth_type != REND_NO_AUTH && (!s->clients || smartlist_len(s->clients) == 0)) { log_warn(LD_CONFIG, "At least one authorized client must be specified."); rend_service_free(s); return RSAE_BADAUTH; } /* Enforcing pk/id uniqueness should be done by rend_service_load_keys(), but * it's not, see #14828. */ if (rend_service_get_by_pk_digest(s->pk_digest)) { log_warn(LD_CONFIG, "Onion Service private key collides with an " "existing service."); rend_service_free(s); return RSAE_ADDREXISTS; } if (rend_service_get_by_service_id(s->service_id)) { log_warn(LD_CONFIG, "Onion Service id collides with an existing service."); rend_service_free(s); return RSAE_ADDREXISTS; } /* Initialize the service. */ if (rend_add_service(NULL, s)) { return RSAE_INTERNAL; } *service_id_out = tor_strdup(s->service_id); log_debug(LD_CONFIG, "Added ephemeral Onion Service: %s", s->service_id); return RSAE_OKAY; } /** Remove the ephemeral service <b>service_id</b> if possible. Returns 0 on * success, and -1 on failure. */ int rend_service_del_ephemeral(const char *service_id) { rend_service_t *s; if (!rend_valid_service_id(service_id)) { log_warn(LD_CONFIG, "Requested malformed Onion Service id for removal."); return -1; } if ((s = rend_service_get_by_service_id(service_id)) == NULL) { log_warn(LD_CONFIG, "Requested non-existent Onion Service id for " "removal."); return -1; } if (!rend_service_is_ephemeral(s)) { log_warn(LD_CONFIG, "Requested non-ephemeral Onion Service for removal."); return -1; } /* Kill the intro point circuit for the Onion Service, and remove it from * the list. Closing existing connections is the application's problem. * * XXX: As with the comment in rend_config_services(), a nice abstraction * would be ideal here, but for now just duplicate the code. */ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) { if (!circ->marked_for_close && (circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || circ->purpose == CIRCUIT_PURPOSE_S_INTRO)) { origin_circuit_t *oc = TO_ORIGIN_CIRCUIT(circ); tor_assert(oc->rend_data); if (!rend_circuit_pk_digest_eq(oc, (uint8_t *) s->pk_digest)) { continue; } log_debug(LD_REND, "Closing intro point %s for service %s.", safe_str_client(extend_info_describe( oc->build_state->chosen_exit)), rend_data_get_address(oc->rend_data)); circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED); } } SMARTLIST_FOREACH_END(circ); smartlist_remove(rend_service_list, s); rend_service_free(s); log_debug(LD_CONFIG, "Removed ephemeral Onion Service: %s", service_id); return 0; } /** Replace the old value of <b>service</b>-\>desc with one that reflects * the other fields in service. */ static void rend_service_update_descriptor(rend_service_t *service) { rend_service_descriptor_t *d; origin_circuit_t *circ; int i; rend_service_descriptor_free(service->desc); service->desc = NULL; d = service->desc = tor_malloc_zero(sizeof(rend_service_descriptor_t)); d->pk = crypto_pk_dup_key(service->private_key); d->timestamp = time(NULL); d->timestamp -= d->timestamp % 3600; /* Round down to nearest hour */ d->intro_nodes = smartlist_new(); /* Support intro protocols 2 and 3. */ d->protocols = (1 << 2) + (1 << 3); for (i = 0; i < smartlist_len(service->intro_nodes); ++i) { rend_intro_point_t *intro_svc = smartlist_get(service->intro_nodes, i); rend_intro_point_t *intro_desc; /* This intro point won't be listed in the descriptor... */ intro_svc->listed_in_last_desc = 0; circ = find_intro_circuit(intro_svc, service->pk_digest); if (!circ || circ->base_.purpose != CIRCUIT_PURPOSE_S_INTRO) { /* This intro point's circuit isn't finished yet. Don't list it. */ continue; } /* ...unless this intro point is listed in the descriptor. */ intro_svc->listed_in_last_desc = 1; /* We have an entirely established intro circuit. Publish it in * our descriptor. */ intro_desc = tor_malloc_zero(sizeof(rend_intro_point_t)); intro_desc->extend_info = extend_info_dup(intro_svc->extend_info); if (intro_svc->intro_key) intro_desc->intro_key = crypto_pk_dup_key(intro_svc->intro_key); smartlist_add(d->intro_nodes, intro_desc); if (intro_svc->time_published == -1) { /* We are publishing this intro point in a descriptor for the * first time -- note the current time in the service's copy of * the intro point. */ intro_svc->time_published = time(NULL); } } } /* Allocate and return a string containing the path to file_name in * service->directory. Asserts that service has a directory. * This function will never return NULL. * The caller must free this path. */ static char * rend_service_path(const rend_service_t *service, const char *file_name) { char *file_path = NULL; tor_assert(service->directory); /* Can never fail: asserts rather than leaving file_path NULL. */ tor_asprintf(&file_path, "%s%s%s", service->directory, PATH_SEPARATOR, file_name); return file_path; } /* Allocate and return a string containing the path to the single onion * service poison file in service->directory. Asserts that service has a * directory. * The caller must free this path. */ STATIC char * rend_service_sos_poison_path(const rend_service_t *service) { return rend_service_path(service, sos_poison_fname); } /** Return True if hidden services <b>service</b> has been poisoned by single * onion mode. */ static int service_is_single_onion_poisoned(const rend_service_t *service) { char *poison_fname = NULL; file_status_t fstatus; /* Passing a NULL service is a bug */ if (BUG(!service)) { return 0; } if (rend_service_is_ephemeral(service)) { return 0; } poison_fname = rend_service_sos_poison_path(service); fstatus = file_status(poison_fname); tor_free(poison_fname); /* If this fname is occupied, the hidden service has been poisoned. * fstatus can be FN_ERROR if the service directory does not exist, in that * case, there is obviously no private key. */ if (fstatus == FN_FILE || fstatus == FN_EMPTY) { return 1; } return 0; } /* Return 1 if the private key file for service exists and has a non-zero size, * and 0 otherwise. */ static int rend_service_private_key_exists(const rend_service_t *service) { char *private_key_path = rend_service_path(service, private_key_fname); const file_status_t private_key_status = file_status(private_key_path); tor_free(private_key_path); /* Only non-empty regular private key files could have been used before. * fstatus can be FN_ERROR if the service directory does not exist, in that * case, there is obviously no private key. */ return private_key_status == FN_FILE; } /** Check the single onion service poison state of the directory for s: * - If the service is poisoned, and we are in Single Onion Mode, * return 0, * - If the service is not poisoned, and we are not in Single Onion Mode, * return 0, * - Otherwise, the poison state is invalid: the service was created in one * mode, and is being used in the other, return -1. * Hidden service directories without keys are always considered consistent. * They will be poisoned after their directory is created (if needed). */ STATIC int rend_service_verify_single_onion_poison(const rend_service_t* s, const or_options_t* options) { /* Passing a NULL service is a bug */ if (BUG(!s)) { return -1; } /* Ephemeral services are checked at ADD_ONION time */ if (BUG(rend_service_is_ephemeral(s))) { return -1; } /* Service is expected to have a directory */ if (BUG(!s->directory)) { return -1; } /* Services without keys are always ok - their keys will only ever be used * in the current mode */ if (!rend_service_private_key_exists(s)) { return 0; } /* The key has been used before in a different mode */ if (service_is_single_onion_poisoned(s) != rend_service_non_anonymous_mode_enabled(options)) { return -1; } /* The key exists and is consistent with the current mode */ return 0; } /*** Helper for rend_service_poison_new_single_onion_dir(). Add a file to * the hidden service directory for s that marks it as a single onion service. * Tor must be in single onion mode before calling this function, and the * service directory must already have been created. * Returns 0 when a directory is successfully poisoned, or if it is already * poisoned. Returns -1 on a failure to read the directory or write the poison * file, or if there is an existing private key file in the directory. (The * service should have been poisoned when the key was created.) */ static int poison_new_single_onion_hidden_service_dir_impl(const rend_service_t *service, const or_options_t* options) { /* Passing a NULL service is a bug */ if (BUG(!service)) { return -1; } /* We must only poison directories if we're in Single Onion mode */ tor_assert(rend_service_non_anonymous_mode_enabled(options)); int fd; int retval = -1; char *poison_fname = NULL; if (rend_service_is_ephemeral(service)) { log_info(LD_REND, "Ephemeral HS started in non-anonymous mode."); return 0; } /* Make sure we're only poisoning new hidden service directories */ if (rend_service_private_key_exists(service)) { log_warn(LD_BUG, "Tried to single onion poison a service directory after " "the private key was created."); return -1; } /* Make sure the directory was created before calling this function. */ if (BUG(rend_service_check_private_dir_impl(options, service, 0) < 0)) return -1; poison_fname = rend_service_sos_poison_path(service); switch (file_status(poison_fname)) { case FN_DIR: case FN_ERROR: log_warn(LD_FS, "Can't read single onion poison file \"%s\"", poison_fname); goto done; case FN_FILE: /* single onion poison file already exists. NOP. */ case FN_EMPTY: /* single onion poison file already exists. NOP. */ log_debug(LD_FS, "Tried to re-poison a single onion poisoned file \"%s\"", poison_fname); break; case FN_NOENT: fd = tor_open_cloexec(poison_fname, O_RDWR|O_CREAT|O_TRUNC, 0600); if (fd < 0) { log_warn(LD_FS, "Could not create single onion poison file %s", poison_fname); goto done; } close(fd); break; default: tor_assert(0); } retval = 0; done: tor_free(poison_fname); return retval; } /** We just got launched in Single Onion Mode. That's a non-anonymous mode for * hidden services. If s is new, we should mark its hidden service * directory appropriately so that it is never launched as a location-private * hidden service. (New directories don't have private key files.) * Return 0 on success, -1 on fail. */ STATIC int rend_service_poison_new_single_onion_dir(const rend_service_t *s, const or_options_t* options) { /* Passing a NULL service is a bug */ if (BUG(!s)) { return -1; } /* We must only poison directories if we're in Single Onion mode */ tor_assert(rend_service_non_anonymous_mode_enabled(options)); /* Ephemeral services aren't allowed in non-anonymous mode */ if (BUG(rend_service_is_ephemeral(s))) { return -1; } /* Service is expected to have a directory */ if (BUG(!s->directory)) { return -1; } if (!rend_service_private_key_exists(s)) { if (poison_new_single_onion_hidden_service_dir_impl(s, options) < 0) { return -1; } } return 0; } /** Load and/or generate private keys for all hidden services, possibly * including keys for client authorization. * If a <b>service_list</b> is provided, treat it as the list of hidden * services (used in unittests). Otherwise, require that rend_service_list is * not NULL. * Return 0 on success, -1 on failure. */ int rend_service_load_all_keys(const smartlist_t *service_list) { /* Use service_list for unit tests */ const smartlist_t *s_list = rend_get_service_list(service_list); if (BUG(!s_list)) { return -1; } SMARTLIST_FOREACH_BEGIN(s_list, rend_service_t *, s) { if (s->private_key) continue; log_info(LD_REND, "Loading hidden-service keys from %s", rend_service_escaped_dir(s)); if (rend_service_load_keys(s) < 0) return -1; } SMARTLIST_FOREACH_END(s); return 0; } /** Add to <b>lst</b> every filename used by <b>s</b>. */ static void rend_service_add_filenames_to_list(smartlist_t *lst, const rend_service_t *s) { tor_assert(lst); tor_assert(s); tor_assert(s->directory); smartlist_add(lst, rend_service_path(s, private_key_fname)); smartlist_add(lst, rend_service_path(s, hostname_fname)); smartlist_add(lst, rend_service_path(s, client_keys_fname)); smartlist_add(lst, rend_service_sos_poison_path(s)); } /** Add to <b>open_lst</b> every filename used by a configured hidden service, * and to <b>stat_lst</b> every directory used by a configured hidden * service */ void rend_services_add_filenames_to_lists(smartlist_t *open_lst, smartlist_t *stat_lst) { if (!rend_service_list) return; SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, s) { if (!rend_service_is_ephemeral(s)) { rend_service_add_filenames_to_list(open_lst, s); smartlist_add_strdup(stat_lst, s->directory); } } SMARTLIST_FOREACH_END(s); } /** Derive all rend_service_t internal material based on the service's key. * Returns 0 on sucess, -1 on failure. */ static int rend_service_derive_key_digests(struct rend_service_t *s) { if (rend_get_service_id(s->private_key, s->service_id)<0) { log_warn(LD_BUG, "Internal error: couldn't encode service ID."); return -1; } if (crypto_pk_get_digest(s->private_key, s->pk_digest)<0) { log_warn(LD_BUG, "Couldn't compute hash of public key."); return -1; } return 0; } /* Implements the directory check from rend_service_check_private_dir, * without doing the single onion poison checks. */ static int rend_service_check_private_dir_impl(const or_options_t *options, const rend_service_t *s, int create) { cpd_check_t check_opts = CPD_NONE; if (create) { check_opts |= CPD_CREATE; } else { check_opts |= CPD_CHECK_MODE_ONLY; check_opts |= CPD_CHECK; } if (s->dir_group_readable) { check_opts |= CPD_GROUP_READ; } /* Check/create directory */ if (check_private_dir(s->directory, check_opts, options->User) < 0) { log_warn(LD_REND, "Checking service directory %s failed.", s->directory); return -1; } return 0; } /** Make sure that the directory for <b>s</b> is private, using the config in * <b>options</b>. * If <b>create</b> is true: * - if the directory exists, change permissions if needed, * - if the directory does not exist, create it with the correct permissions. * If <b>create</b> is false: * - if the directory exists, check permissions, * - if the directory does not exist, check if we think we can create it. * Return 0 on success, -1 on failure. */ static int rend_service_check_private_dir(const or_options_t *options, const rend_service_t *s, int create) { /* Passing a NULL service is a bug */ if (BUG(!s)) { return -1; } /* Check/create directory */ if (rend_service_check_private_dir_impl(options, s, create) < 0) { return -1; } /* Check if the hidden service key exists, and was created in a different * single onion service mode, and refuse to launch if it has. * This is safe to call even when create is false, as it ignores missing * keys and directories: they are always valid. */ if (rend_service_verify_single_onion_poison(s, options) < 0) { /* We can't use s->service_id here, as the key may not have been loaded */ log_warn(LD_GENERAL, "We are configured with " "HiddenServiceNonAnonymousMode %d, but the hidden " "service key in directory %s was created in %s mode. " "This is not allowed.", rend_service_non_anonymous_mode_enabled(options) ? 1 : 0, rend_service_escaped_dir(s), rend_service_non_anonymous_mode_enabled(options) ? "an anonymous" : "a non-anonymous" ); return -1; } /* Poison new single onion directories immediately after they are created, * so that we never accidentally launch non-anonymous hidden services * thinking they are anonymous. Any keys created later will end up with the * correct poisoning state. */ if (create && rend_service_non_anonymous_mode_enabled(options)) { static int logged_warning = 0; if (rend_service_poison_new_single_onion_dir(s, options) < 0) { log_warn(LD_GENERAL,"Failed to mark new hidden services as non-anonymous" "."); return -1; } if (!logged_warning) { /* The keys for these services are linked to the server IP address */ log_notice(LD_REND, "The configured onion service directories have been " "used in single onion mode. They can not be used for " "anonymous hidden services."); logged_warning = 1; } } return 0; } /** Load and/or generate private keys for the hidden service <b>s</b>, * possibly including keys for client authorization. Return 0 on success, -1 * on failure. */ static int rend_service_load_keys(rend_service_t *s) { char *fname = NULL; char buf[128]; /* Make sure the directory was created and single onion poisoning was * checked before calling this function */ if (BUG(rend_service_check_private_dir(get_options(), s, 0) < 0)) goto err; /* Load key */ fname = rend_service_path(s, private_key_fname); s->private_key = init_key_from_file(fname, 1, LOG_ERR, 0); if (!s->private_key) goto err; if (rend_service_derive_key_digests(s) < 0) goto err; tor_free(fname); /* Create service file */ fname = rend_service_path(s, hostname_fname); tor_snprintf(buf, sizeof(buf),"%s.onion\n", s->service_id); if (write_str_to_file(fname,buf,0)<0) { log_warn(LD_CONFIG, "Could not write onion address to hostname file."); goto err; } #ifndef _WIN32 if (s->dir_group_readable) { /* Also verify hostname file created with group read. */ if (chmod(fname, 0640)) log_warn(LD_FS,"Unable to make hidden hostname file %s group-readable.", fname); } #endif /* If client authorization is configured, load or generate keys. */ if (s->auth_type != REND_NO_AUTH) { if (rend_service_load_auth_keys(s, fname) < 0) { goto err; } } int r = 0; goto done; err: r = -1; done: memwipe(buf, 0, sizeof(buf)); tor_free(fname); return r; } /** Load and/or generate client authorization keys for the hidden service * <b>s</b>, which stores its hostname in <b>hfname</b>. Return 0 on success, * -1 on failure. */ static int rend_service_load_auth_keys(rend_service_t *s, const char *hfname) { int r = 0; char *cfname = NULL; char *client_keys_str = NULL; strmap_t *parsed_clients = strmap_new(); FILE *cfile, *hfile; open_file_t *open_cfile = NULL, *open_hfile = NULL; char desc_cook_out[3*REND_DESC_COOKIE_LEN_BASE64+1]; char service_id[16+1]; char buf[1500]; /* Load client keys and descriptor cookies, if available. */ cfname = rend_service_path(s, client_keys_fname); client_keys_str = read_file_to_str(cfname, RFTS_IGNORE_MISSING, NULL); if (client_keys_str) { if (rend_parse_client_keys(parsed_clients, client_keys_str) < 0) { log_warn(LD_CONFIG, "Previously stored client_keys file could not " "be parsed."); goto err; } else { log_info(LD_CONFIG, "Parsed %d previously stored client entries.", strmap_size(parsed_clients)); } } /* Prepare client_keys and hostname files. */ if (!(cfile = start_writing_to_stdio_file(cfname, OPEN_FLAGS_REPLACE | O_TEXT, 0600, &open_cfile))) { log_warn(LD_CONFIG, "Could not open client_keys file %s", escaped(cfname)); goto err; } if (!(hfile = start_writing_to_stdio_file(hfname, OPEN_FLAGS_REPLACE | O_TEXT, 0600, &open_hfile))) { log_warn(LD_CONFIG, "Could not open hostname file %s", escaped(hfname)); goto err; } /* Either use loaded keys for configured clients or generate new * ones if a client is new. */ SMARTLIST_FOREACH_BEGIN(s->clients, rend_authorized_client_t *, client) { rend_authorized_client_t *parsed = strmap_get(parsed_clients, client->client_name); int written; size_t len; /* Copy descriptor cookie from parsed entry or create new one. */ if (parsed) { memcpy(client->descriptor_cookie, parsed->descriptor_cookie, REND_DESC_COOKIE_LEN); } else { crypto_rand((char *) client->descriptor_cookie, REND_DESC_COOKIE_LEN); } /* For compatibility with older tor clients, this does not * truncate the padding characters, unlike rend_auth_encode_cookie. */ if (base64_encode(desc_cook_out, 3*REND_DESC_COOKIE_LEN_BASE64+1, (char *) client->descriptor_cookie, REND_DESC_COOKIE_LEN, 0) < 0) { log_warn(LD_BUG, "Could not base64-encode descriptor cookie."); goto err; } /* Copy client key from parsed entry or create new one if required. */ if (parsed && parsed->client_key) { client->client_key = crypto_pk_dup_key(parsed->client_key); } else if (s->auth_type == REND_STEALTH_AUTH) { /* Create private key for client. */ crypto_pk_t *prkey = NULL; if (!(prkey = crypto_pk_new())) { log_warn(LD_BUG,"Error constructing client key"); goto err; } if (crypto_pk_generate_key(prkey)) { log_warn(LD_BUG,"Error generating client key"); crypto_pk_free(prkey); goto err; } if (crypto_pk_check_key(prkey) <= 0) { log_warn(LD_BUG,"Generated client key seems invalid"); crypto_pk_free(prkey); goto err; } client->client_key = prkey; } /* Add entry to client_keys file. */ written = tor_snprintf(buf, sizeof(buf), "client-name %s\ndescriptor-cookie %s\n", client->client_name, desc_cook_out); if (written < 0) { log_warn(LD_BUG, "Could not write client entry."); goto err; } if (client->client_key) { char *client_key_out = NULL; if (crypto_pk_write_private_key_to_string(client->client_key, &client_key_out, &len) != 0) { log_warn(LD_BUG, "Internal error: " "crypto_pk_write_private_key_to_string() failed."); goto err; } if (rend_get_service_id(client->client_key, service_id)<0) { log_warn(LD_BUG, "Internal error: couldn't encode service ID."); /* * len is string length, not buffer length, but last byte is NUL * anyway. */ memwipe(client_key_out, 0, len); tor_free(client_key_out); goto err; } written = tor_snprintf(buf + written, sizeof(buf) - written, "client-key\n%s", client_key_out); memwipe(client_key_out, 0, len); tor_free(client_key_out); if (written < 0) { log_warn(LD_BUG, "Could not write client entry."); goto err; } } else { strlcpy(service_id, s->service_id, sizeof(service_id)); } if (fputs(buf, cfile) < 0) { log_warn(LD_FS, "Could not append client entry to file: %s", strerror(errno)); goto err; } /* Add line to hostname file. This is not the same encoding as in * client_keys. */ char *encoded_cookie = rend_auth_encode_cookie(client->descriptor_cookie, s->auth_type); if (!encoded_cookie) { log_warn(LD_BUG, "Could not base64-encode descriptor cookie."); goto err; } tor_snprintf(buf, sizeof(buf), "%s.onion %s # client: %s\n", service_id, encoded_cookie, client->client_name); memwipe(encoded_cookie, 0, strlen(encoded_cookie)); tor_free(encoded_cookie); if (fputs(buf, hfile)<0) { log_warn(LD_FS, "Could not append host entry to file: %s", strerror(errno)); goto err; } } SMARTLIST_FOREACH_END(client); finish_writing_to_file(open_cfile); finish_writing_to_file(open_hfile); goto done; err: r = -1; if (open_cfile) abort_writing_to_file(open_cfile); if (open_hfile) abort_writing_to_file(open_hfile); done: if (client_keys_str) { memwipe(client_keys_str, 0, strlen(client_keys_str)); tor_free(client_keys_str); } strmap_free(parsed_clients, rend_authorized_client_strmap_item_free); if (cfname) { memwipe(cfname, 0, strlen(cfname)); tor_free(cfname); } /* Clear stack buffers that held key-derived material. */ memwipe(buf, 0, sizeof(buf)); memwipe(desc_cook_out, 0, sizeof(desc_cook_out)); memwipe(service_id, 0, sizeof(service_id)); return r; } /** Return the service whose public key has a digest of <b>digest</b>, or * NULL if no such service exists. */ static rend_service_t * rend_service_get_by_pk_digest(const char* digest) { SMARTLIST_FOREACH(rend_service_list, rend_service_t*, s, if (tor_memeq(s->pk_digest,digest,DIGEST_LEN)) return s); return NULL; } /** Return the service whose service id is <b>id</b>, or NULL if no such * service exists. */ static struct rend_service_t * rend_service_get_by_service_id(const char *id) { tor_assert(strlen(id) == REND_SERVICE_ID_LEN_BASE32); SMARTLIST_FOREACH(rend_service_list, rend_service_t*, s, { if (tor_memeq(s->service_id, id, REND_SERVICE_ID_LEN_BASE32)) return s; }); return NULL; } /** Return 1 if any virtual port in <b>service</b> wants a circuit * to have good uptime. Else return 0. */ static int rend_service_requires_uptime(rend_service_t *service) { int i; rend_service_port_config_t *p; for (i=0; i < smartlist_len(service->ports); ++i) { p = smartlist_get(service->ports, i); if (smartlist_contains_int_as_string(get_options()->LongLivedPorts, p->virtual_port)) return 1; } return 0; } /** Check client authorization of a given <b>descriptor_cookie</b> of * length <b>cookie_len</b> for <b>service</b>. Return 1 for success * and 0 for failure. */ static int rend_check_authorization(rend_service_t *service, const char *descriptor_cookie, size_t cookie_len) { rend_authorized_client_t *auth_client = NULL; tor_assert(service); tor_assert(descriptor_cookie); if (!service->clients) { log_warn(LD_BUG, "Can't check authorization for a service that has no " "authorized clients configured."); return 0; } if (cookie_len != REND_DESC_COOKIE_LEN) { log_info(LD_REND, "Descriptor cookie is %lu bytes, but we expected " "%lu bytes. Dropping cell.", (unsigned long)cookie_len, (unsigned long)REND_DESC_COOKIE_LEN); return 0; } /* Look up client authorization by descriptor cookie. */ SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, client, { if (tor_memeq(client->descriptor_cookie, descriptor_cookie, REND_DESC_COOKIE_LEN)) { auth_client = client; break; } }); if (!auth_client) { char descriptor_cookie_base64[3*REND_DESC_COOKIE_LEN_BASE64]; base64_encode(descriptor_cookie_base64, sizeof(descriptor_cookie_base64), descriptor_cookie, REND_DESC_COOKIE_LEN, 0); log_info(LD_REND, "No authorization found for descriptor cookie '%s'! " "Dropping cell!", descriptor_cookie_base64); return 0; } /* Allow the request. */ log_info(LD_REND, "Client %s authorized for service %s.", auth_client->client_name, service->service_id); return 1; } /* Can this service make a direct connection to ei? * It must be a single onion service, and the firewall rules must allow ei. */ static int rend_service_use_direct_connection(const or_options_t* options, const extend_info_t* ei) { /* We'll connect directly all reachable addresses, whether preferred or not. * The prefer_ipv6 argument to fascist_firewall_allows_address_addr is * ignored, because pref_only is 0. */ return (rend_service_allow_non_anonymous_connection(options) && fascist_firewall_allows_address_addr(&ei->addr, ei->port, FIREWALL_OR_CONNECTION, 0, 0)); } /* Like rend_service_use_direct_connection, but to a node. */ static int rend_service_use_direct_connection_node(const or_options_t* options, const node_t* node) { /* We'll connect directly all reachable addresses, whether preferred or not. */ return (rend_service_allow_non_anonymous_connection(options) && fascist_firewall_allows_node(node, FIREWALL_OR_CONNECTION, 0)); } /****** * Handle cells ******/ /** Respond to an INTRODUCE2 cell by launching a circuit to the chosen * rendezvous point. */ int rend_service_receive_introduction(origin_circuit_t *circuit, const uint8_t *request, size_t request_len) { /* Global status stuff */ int status = 0, result; const or_options_t *options = get_options(); char *err_msg = NULL; int err_msg_severity = LOG_WARN; const char *stage_descr = NULL, *rend_pk_digest; int reason = END_CIRC_REASON_TORPROTOCOL; /* Service/circuit/key stuff we can learn before parsing */ char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; rend_service_t *service = NULL; rend_intro_point_t *intro_point = NULL; crypto_pk_t *intro_key = NULL; /* Parsed cell */ rend_intro_cell_t *parsed_req = NULL; /* Rendezvous point */ extend_info_t *rp = NULL; /* XXX not handled yet */ char buf[RELAY_PAYLOAD_SIZE]; char keys[DIGEST_LEN+CPATH_KEY_MATERIAL_LEN]; /* Holds KH, Df, Db, Kf, Kb */ int i; crypto_dh_t *dh = NULL; origin_circuit_t *launched = NULL; crypt_path_t *cpath = NULL; char hexcookie[9]; int circ_needs_uptime; time_t now = time(NULL); time_t elapsed; int replay; /* Do some initial validation and logging before we parse the cell */ if (circuit->base_.purpose != CIRCUIT_PURPOSE_S_INTRO) { log_warn(LD_PROTOCOL, "Got an INTRODUCE2 over a non-introduction circuit %u.", (unsigned) circuit->base_.n_circ_id); goto err; } assert_circ_anonymity_ok(circuit, options); tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only one supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); /* We'll use this in a bazillion log messages */ base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); /* look up service depending on circuit. */ service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_BUG, "Internal error: Got an INTRODUCE2 cell on an intro " "circ for an unrecognized service %s.", escaped(serviceid)); goto err; } intro_point = find_intro_point(circuit); if (intro_point == NULL) { intro_point = find_expiring_intro_point(service, circuit); if (intro_point == NULL) { log_warn(LD_BUG, "Internal error: Got an INTRODUCE2 cell on an " "intro circ (for service %s) with no corresponding " "rend_intro_point_t.", escaped(serviceid)); goto err; } } log_info(LD_REND, "Received INTRODUCE2 cell for service %s on circ %u.", escaped(serviceid), (unsigned)circuit->base_.n_circ_id); /* use intro key instead of service key. */ intro_key = circuit->intro_key; tor_free(err_msg); stage_descr = NULL; stage_descr = "early parsing"; /* Early parsing pass (get pk, ciphertext); type 2 is INTRODUCE2 */ parsed_req = rend_service_begin_parse_intro(request, request_len, 2, &err_msg); if (!parsed_req) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } /* make sure service replay caches are present */ if (!service->accepted_intro_dh_parts) { service->accepted_intro_dh_parts = replaycache_new(REND_REPLAY_TIME_INTERVAL, REND_REPLAY_TIME_INTERVAL); } if (!intro_point->accepted_intro_rsa_parts) { intro_point->accepted_intro_rsa_parts = replaycache_new(0, 0); } /* check for replay of PK-encrypted portion. */ replay = replaycache_add_test_and_elapsed( intro_point->accepted_intro_rsa_parts, parsed_req->ciphertext, parsed_req->ciphertext_len, &elapsed); if (replay) { log_warn(LD_REND, "Possible replay detected! We received an " "INTRODUCE2 cell with same PK-encrypted part %d " "seconds ago. Dropping cell.", (int)elapsed); goto err; } stage_descr = "decryption"; /* Now try to decrypt it */ result = rend_service_decrypt_intro(parsed_req, intro_key, &err_msg); if (result < 0) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } stage_descr = "late parsing"; /* Parse the plaintext */ result = rend_service_parse_intro_plaintext(parsed_req, &err_msg); if (result < 0) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } stage_descr = "late validation"; /* Validate the parsed plaintext parts */ result = rend_service_validate_intro_late(parsed_req, &err_msg); if (result < 0) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } stage_descr = NULL; /* Increment INTRODUCE2 counter */ ++(intro_point->accepted_introduce2_count); /* Find the rendezvous point */ rp = find_rp_for_intro(parsed_req, &err_msg); if (!rp) { err_msg_severity = LOG_PROTOCOL_WARN; goto log_error; } /* Check if we'd refuse to talk to this router */ if (options->StrictNodes && routerset_contains_extendinfo(options->ExcludeNodes, rp)) { log_warn(LD_REND, "Client asked to rendezvous at a relay that we " "exclude, and StrictNodes is set. Refusing service."); reason = END_CIRC_REASON_INTERNAL; /* XXX might leak why we refused */ goto err; } base16_encode(hexcookie, 9, (const char *)(parsed_req->rc), 4); /* Check whether there is a past request with the same Diffie-Hellman, * part 1. */ replay = replaycache_add_test_and_elapsed( service->accepted_intro_dh_parts, parsed_req->dh, DH_KEY_LEN, &elapsed); if (replay) { /* A Tor client will send a new INTRODUCE1 cell with the same rend * cookie and DH public key as its previous one if its intro circ * times out while in state CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT . * If we received the first INTRODUCE1 cell (the intro-point relay * converts it into an INTRODUCE2 cell), we are already trying to * connect to that rend point (and may have already succeeded); * drop this cell. */ log_info(LD_REND, "We received an " "INTRODUCE2 cell with same first part of " "Diffie-Hellman handshake %d seconds ago. Dropping " "cell.", (int) elapsed); goto err; } /* If the service performs client authorization, check included auth data. */ if (service->clients) { if (parsed_req->version == 3 && parsed_req->u.v3.auth_len > 0) { if (rend_check_authorization(service, (const char*)parsed_req->u.v3.auth_data, parsed_req->u.v3.auth_len)) { log_info(LD_REND, "Authorization data in INTRODUCE2 cell are valid."); } else { log_info(LD_REND, "The authorization data that are contained in " "the INTRODUCE2 cell are invalid. Dropping cell."); reason = END_CIRC_REASON_CONNECTFAILED; goto err; } } else { log_info(LD_REND, "INTRODUCE2 cell does not contain authentication " "data, but we require client authorization. Dropping cell."); reason = END_CIRC_REASON_CONNECTFAILED; goto err; } } /* Try DH handshake... */ dh = crypto_dh_new(DH_TYPE_REND); if (!dh || crypto_dh_generate_public(dh)<0) { log_warn(LD_BUG,"Internal error: couldn't build DH state " "or generate public key."); reason = END_CIRC_REASON_INTERNAL; goto err; } if (crypto_dh_compute_secret(LOG_PROTOCOL_WARN, dh, (char *)(parsed_req->dh), DH_KEY_LEN, keys, DIGEST_LEN+CPATH_KEY_MATERIAL_LEN)<0) { log_warn(LD_BUG, "Internal error: couldn't complete DH handshake"); reason = END_CIRC_REASON_INTERNAL; goto err; } circ_needs_uptime = rend_service_requires_uptime(service); /* help predict this next time */ rep_hist_note_used_internal(now, circ_needs_uptime, 1); /* Launch a circuit to the client's chosen rendezvous point. */ for (i=0;i<MAX_REND_FAILURES;i++) { int flags = CIRCLAUNCH_NEED_CAPACITY | CIRCLAUNCH_IS_INTERNAL; if (circ_needs_uptime) flags |= CIRCLAUNCH_NEED_UPTIME; /* A Single Onion Service only uses a direct connection if its * firewall rules permit direct connections to the address. */ if (rend_service_use_direct_connection(options, rp)) { flags = flags | CIRCLAUNCH_ONEHOP_TUNNEL; } launched = circuit_launch_by_extend_info( CIRCUIT_PURPOSE_S_CONNECT_REND, rp, flags); if (launched) break; } if (!launched) { /* give up */ log_warn(LD_REND, "Giving up launching first hop of circuit to rendezvous " "point %s for service %s.", safe_str_client(extend_info_describe(rp)), serviceid); reason = END_CIRC_REASON_CONNECTFAILED; goto err; } log_info(LD_REND, "Accepted intro; launching circuit to %s " "(cookie %s) for service %s.", safe_str_client(extend_info_describe(rp)), hexcookie, serviceid); tor_assert(launched->build_state); /* Fill in the circuit's state. */ launched->rend_data = rend_data_service_create(service->service_id, rend_pk_digest, parsed_req->rc, service->auth_type); launched->build_state->service_pending_final_cpath_ref = tor_malloc_zero(sizeof(crypt_path_reference_t)); launched->build_state->service_pending_final_cpath_ref->refcount = 1; launched->build_state->service_pending_final_cpath_ref->cpath = cpath = tor_malloc_zero(sizeof(crypt_path_t)); cpath->magic = CRYPT_PATH_MAGIC; launched->build_state->expiry_time = now + MAX_REND_TIMEOUT; cpath->rend_dh_handshake_state = dh; dh = NULL; if (circuit_init_cpath_crypto(cpath,keys+DIGEST_LEN,1)<0) goto err; memcpy(cpath->rend_circ_nonce, keys, DIGEST_LEN); goto done; log_error: if (!err_msg) { if (stage_descr) { tor_asprintf(&err_msg, "unknown %s error for INTRODUCE2", stage_descr); } else { err_msg = tor_strdup("unknown error for INTRODUCE2"); } } log_fn(err_msg_severity, LD_REND, "%s on circ %u", err_msg, (unsigned)circuit->base_.n_circ_id); err: status = -1; if (dh) crypto_dh_free(dh); if (launched) { circuit_mark_for_close(TO_CIRCUIT(launched), reason); } tor_free(err_msg); done: memwipe(keys, 0, sizeof(keys)); memwipe(buf, 0, sizeof(buf)); memwipe(serviceid, 0, sizeof(serviceid)); memwipe(hexcookie, 0, sizeof(hexcookie)); /* Free the parsed cell */ rend_service_free_intro(parsed_req); /* Free rp */ extend_info_free(rp); return status; } /** Given a parsed and decrypted INTRODUCE2, find the rendezvous point or * return NULL and an error string if we can't. Return a newly allocated * extend_info_t* for the rendezvous point. */ static extend_info_t * find_rp_for_intro(const rend_intro_cell_t *intro, char **err_msg_out) { extend_info_t *rp = NULL; char *err_msg = NULL; const char *rp_nickname = NULL; const node_t *node = NULL; if (!intro) { if (err_msg_out) err_msg = tor_strdup("Bad parameters to find_rp_for_intro()"); goto err; } if (intro->version == 0 || intro->version == 1) { rp_nickname = (const char *)(intro->u.v0_v1.rp); node = node_get_by_nickname(rp_nickname, 0); if (!node) { if (err_msg_out) { tor_asprintf(&err_msg, "Couldn't find router %s named in INTRODUCE2 cell", escaped_safe_str_client(rp_nickname)); } goto err; } /* Are we in single onion mode? */ const int allow_direct = rend_service_allow_non_anonymous_connection( get_options()); rp = extend_info_from_node(node, allow_direct); if (!rp) { if (err_msg_out) { tor_asprintf(&err_msg, "Couldn't build extend_info_t for router %s named " "in INTRODUCE2 cell", escaped_safe_str_client(rp_nickname)); } goto err; } } else if (intro->version == 2) { rp = extend_info_dup(intro->u.v2.extend_info); } else if (intro->version == 3) { rp = extend_info_dup(intro->u.v3.extend_info); } else { if (err_msg_out) { tor_asprintf(&err_msg, "Unknown version %d in INTRODUCE2 cell", (int)(intro->version)); } goto err; } /* rp is always set here: extend_info_dup guarantees a non-NULL result, and * the other cases goto err. */ tor_assert(rp); /* Make sure the RP we are being asked to connect to is _not_ a private * address unless it's allowed. Let's avoid to build a circuit to our * second middle node and fail right after when extending to the RP. */ if (!extend_info_addr_is_allowed(&rp->addr)) { if (err_msg_out) { tor_asprintf(&err_msg, "Relay IP in INTRODUCE2 cell is private address."); } extend_info_free(rp); rp = NULL; goto err; } goto done; err: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); done: return rp; } /** Free a parsed INTRODUCE1 or INTRODUCE2 cell that was allocated by * rend_service_parse_intro(). */ void rend_service_free_intro(rend_intro_cell_t *request) { if (!request) { return; } /* Free ciphertext */ tor_free(request->ciphertext); request->ciphertext_len = 0; /* Have plaintext? */ if (request->plaintext) { /* Zero it out just to be safe */ memwipe(request->plaintext, 0, request->plaintext_len); tor_free(request->plaintext); request->plaintext_len = 0; } /* Have parsed plaintext? */ if (request->parsed) { switch (request->version) { case 0: case 1: /* * Nothing more to do; these formats have no further pointers * in them. */ break; case 2: extend_info_free(request->u.v2.extend_info); request->u.v2.extend_info = NULL; break; case 3: if (request->u.v3.auth_data) { memwipe(request->u.v3.auth_data, 0, request->u.v3.auth_len); tor_free(request->u.v3.auth_data); } extend_info_free(request->u.v3.extend_info); request->u.v3.extend_info = NULL; break; default: log_info(LD_BUG, "rend_service_free_intro() saw unknown protocol " "version %d.", request->version); } } /* Zero it out to make sure sensitive stuff doesn't hang around in memory */ memwipe(request, 0, sizeof(*request)); tor_free(request); } /** Parse an INTRODUCE1 or INTRODUCE2 cell into a newly allocated * rend_intro_cell_t structure. Free it with rend_service_free_intro() * when finished. The type parameter should be 1 or 2 to indicate whether * this is INTRODUCE1 or INTRODUCE2. This parses only the non-encrypted * parts; after this, call rend_service_decrypt_intro() with a key, then * rend_service_parse_intro_plaintext() to finish parsing. The optional * err_msg_out parameter is set to a string suitable for log output * if parsing fails. This function does some validation, but only * that which depends solely on the contents of the cell and the * key; it can be unit-tested. Further validation is done in * rend_service_validate_intro(). */ rend_intro_cell_t * rend_service_begin_parse_intro(const uint8_t *request, size_t request_len, uint8_t type, char **err_msg_out) { rend_intro_cell_t *rv = NULL; char *err_msg = NULL; if (!request || request_len <= 0) goto err; if (!(type == 1 || type == 2)) goto err; /* First, check that the cell is long enough to be a sensible INTRODUCE */ /* min key length plus digest length plus nickname length */ if (request_len < (DIGEST_LEN + REND_COOKIE_LEN + (MAX_NICKNAME_LEN + 1) + DH_KEY_LEN + 42)) { if (err_msg_out) { tor_asprintf(&err_msg, "got a truncated INTRODUCE%d cell", (int)type); } goto err; } /* Allocate a new parsed cell structure */ rv = tor_malloc_zero(sizeof(*rv)); /* Set the type */ rv->type = type; /* Copy in the ID */ memcpy(rv->pk, request, DIGEST_LEN); /* Copy in the ciphertext */ rv->ciphertext = tor_malloc(request_len - DIGEST_LEN); memcpy(rv->ciphertext, request + DIGEST_LEN, request_len - DIGEST_LEN); rv->ciphertext_len = request_len - DIGEST_LEN; goto done; err: rend_service_free_intro(rv); rv = NULL; if (err_msg_out && !err_msg) { tor_asprintf(&err_msg, "unknown INTRODUCE%d error", (int)type); } done: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); return rv; } /** Parse the version-specific parts of a v0 or v1 INTRODUCE1 or INTRODUCE2 * cell */ static ssize_t rend_service_parse_intro_for_v0_or_v1( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out) { const char *rp_nickname, *endptr; size_t nickname_field_len, ver_specific_len; if (intro->version == 1) { ver_specific_len = MAX_HEX_NICKNAME_LEN + 2; rp_nickname = ((const char *)buf) + 1; nickname_field_len = MAX_HEX_NICKNAME_LEN + 1; } else if (intro->version == 0) { ver_specific_len = MAX_NICKNAME_LEN + 1; rp_nickname = (const char *)buf; nickname_field_len = MAX_NICKNAME_LEN + 1; } else { if (err_msg_out) tor_asprintf(err_msg_out, "rend_service_parse_intro_for_v0_or_v1() called with " "bad version %d on INTRODUCE%d cell (this is a bug)", intro->version, (int)(intro->type)); goto err; } if (plaintext_len < ver_specific_len) { if (err_msg_out) tor_asprintf(err_msg_out, "short plaintext of encrypted part in v1 INTRODUCE%d " "cell (%lu bytes, needed %lu)", (int)(intro->type), (unsigned long)plaintext_len, (unsigned long)ver_specific_len); goto err; } endptr = memchr(rp_nickname, 0, nickname_field_len); if (!endptr || endptr == rp_nickname) { if (err_msg_out) { tor_asprintf(err_msg_out, "couldn't find a nul-padded nickname in " "INTRODUCE%d cell", (int)(intro->type)); } goto err; } if ((intro->version == 0 && !is_legal_nickname(rp_nickname)) || (intro->version == 1 && !is_legal_nickname_or_hexdigest(rp_nickname))) { if (err_msg_out) { tor_asprintf(err_msg_out, "bad nickname in INTRODUCE%d cell", (int)(intro->type)); } goto err; } memcpy(intro->u.v0_v1.rp, rp_nickname, endptr - rp_nickname + 1); return ver_specific_len; err: return -1; } /** Parse the version-specific parts of a v2 INTRODUCE1 or INTRODUCE2 cell */ static ssize_t rend_service_parse_intro_for_v2( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out) { unsigned int klen; extend_info_t *extend_info = NULL; ssize_t ver_specific_len; /* * We accept version 3 too so that the v3 parser can call this with * an adjusted buffer for the latter part of a v3 cell, which is * identical to a v2 cell. */ if (!(intro->version == 2 || intro->version == 3)) { if (err_msg_out) tor_asprintf(err_msg_out, "rend_service_parse_intro_for_v2() called with " "bad version %d on INTRODUCE%d cell (this is a bug)", intro->version, (int)(intro->type)); goto err; } /* 7 == version, IP and port, DIGEST_LEN == id, 2 == key length */ if (plaintext_len < 7 + DIGEST_LEN + 2) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } extend_info = tor_malloc_zero(sizeof(extend_info_t)); tor_addr_from_ipv4n(&extend_info->addr, get_uint32(buf + 1)); extend_info->port = ntohs(get_uint16(buf + 5)); memcpy(extend_info->identity_digest, buf + 7, DIGEST_LEN); extend_info->nickname[0] = '$'; base16_encode(extend_info->nickname + 1, sizeof(extend_info->nickname) - 1, extend_info->identity_digest, DIGEST_LEN); klen = ntohs(get_uint16(buf + 7 + DIGEST_LEN)); /* 7 == version, IP and port, DIGEST_LEN == id, 2 == key length */ if (plaintext_len < 7 + DIGEST_LEN + 2 + klen) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } extend_info->onion_key = crypto_pk_asn1_decode((const char *)(buf + 7 + DIGEST_LEN + 2), klen); if (!extend_info->onion_key) { if (err_msg_out) { tor_asprintf(err_msg_out, "error decoding onion key in version %d " "INTRODUCE%d cell", intro->version, (intro->type)); } goto err; } if (128 != crypto_pk_keysize(extend_info->onion_key)) { if (err_msg_out) { tor_asprintf(err_msg_out, "invalid onion key size in version %d INTRODUCE%d cell", intro->version, (intro->type)); } goto err; } ver_specific_len = 7+DIGEST_LEN+2+klen; if (intro->version == 2) intro->u.v2.extend_info = extend_info; else intro->u.v3.extend_info = extend_info; return ver_specific_len; err: extend_info_free(extend_info); return -1; } /** Parse the version-specific parts of a v3 INTRODUCE1 or INTRODUCE2 cell */ static ssize_t rend_service_parse_intro_for_v3( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out) { ssize_t adjust, v2_ver_specific_len, ts_offset; /* This should only be called on v3 cells */ if (intro->version != 3) { if (err_msg_out) tor_asprintf(err_msg_out, "rend_service_parse_intro_for_v3() called with " "bad version %d on INTRODUCE%d cell (this is a bug)", intro->version, (int)(intro->type)); goto err; } /* * Check that we have at least enough to get auth_len: * * 1 octet for version, 1 for auth_type, 2 for auth_len */ if (plaintext_len < 4) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } /* * The rend_client_send_introduction() function over in rendclient.c is * broken (i.e., fails to match the spec) in such a way that we can't * change it without breaking the protocol. Specifically, it doesn't * emit auth_len when auth-type is REND_NO_AUTH, so everything is off * by two bytes after that. Calculate ts_offset and do everything from * the timestamp on relative to that to handle this dain bramage. */ intro->u.v3.auth_type = buf[1]; if (intro->u.v3.auth_type != REND_NO_AUTH) { intro->u.v3.auth_len = ntohs(get_uint16(buf + 2)); ts_offset = 4 + intro->u.v3.auth_len; } else { intro->u.v3.auth_len = 0; ts_offset = 2; } /* Check that auth len makes sense for this auth type */ if (intro->u.v3.auth_type == REND_BASIC_AUTH || intro->u.v3.auth_type == REND_STEALTH_AUTH) { if (intro->u.v3.auth_len != REND_DESC_COOKIE_LEN) { if (err_msg_out) { tor_asprintf(err_msg_out, "wrong auth data size %d for INTRODUCE%d cell, " "should be %d", (int)(intro->u.v3.auth_len), (int)(intro->type), REND_DESC_COOKIE_LEN); } goto err; } } /* Check that we actually have everything up through the timestamp */ if (plaintext_len < (size_t)(ts_offset)+4) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } if (intro->u.v3.auth_type != REND_NO_AUTH && intro->u.v3.auth_len > 0) { /* Okay, we can go ahead and copy auth_data */ intro->u.v3.auth_data = tor_malloc(intro->u.v3.auth_len); /* * We know we had an auth_len field in this case, so 4 is * always right. */ memcpy(intro->u.v3.auth_data, buf + 4, intro->u.v3.auth_len); } /* * From here on, the format is as in v2, so we call the v2 parser with * adjusted buffer and length. We are 4 + ts_offset octets in, but the * v2 parser expects to skip over a version byte at the start, so we * adjust by 3 + ts_offset. */ adjust = 3 + ts_offset; v2_ver_specific_len = rend_service_parse_intro_for_v2(intro, buf + adjust, plaintext_len - adjust, err_msg_out); /* Success in v2 parser */ if (v2_ver_specific_len >= 0) return v2_ver_specific_len + adjust; /* Failure in v2 parser; it will have provided an err_msg */ else return v2_ver_specific_len; err: return -1; } /** Table of parser functions for version-specific parts of an INTRODUCE2 * cell. */ static ssize_t (*intro_version_handlers[])( rend_intro_cell_t *, const uint8_t *, size_t, char **) = { rend_service_parse_intro_for_v0_or_v1, rend_service_parse_intro_for_v0_or_v1, rend_service_parse_intro_for_v2, rend_service_parse_intro_for_v3 }; /** Decrypt the encrypted part of an INTRODUCE1 or INTRODUCE2 cell, * return 0 if successful, or < 0 and write an error message to * *err_msg_out if provided. */ int rend_service_decrypt_intro( rend_intro_cell_t *intro, crypto_pk_t *key, char **err_msg_out) { char *err_msg = NULL; uint8_t key_digest[DIGEST_LEN]; char service_id[REND_SERVICE_ID_LEN_BASE32+1]; ssize_t key_len; uint8_t buf[RELAY_PAYLOAD_SIZE]; int result, status = -1; if (!intro || !key) { if (err_msg_out) { err_msg = tor_strdup("rend_service_decrypt_intro() called with bad " "parameters"); } status = -2; goto err; } /* Make sure we have ciphertext */ if (!(intro->ciphertext) || intro->ciphertext_len <= 0) { if (err_msg_out) { tor_asprintf(&err_msg, "rend_intro_cell_t was missing ciphertext for " "INTRODUCE%d cell", (int)(intro->type)); } status = -3; goto err; } /* Check that this cell actually matches this service key */ /* first DIGEST_LEN bytes of request is intro or service pk digest */ crypto_pk_get_digest(key, (char *)key_digest); if (tor_memneq(key_digest, intro->pk, DIGEST_LEN)) { if (err_msg_out) { base32_encode(service_id, REND_SERVICE_ID_LEN_BASE32 + 1, (char*)(intro->pk), REND_SERVICE_ID_LEN); tor_asprintf(&err_msg, "got an INTRODUCE%d cell for the wrong service (%s)", (int)(intro->type), escaped(service_id)); } status = -4; goto err; } /* Make sure the encrypted part is long enough to decrypt */ key_len = crypto_pk_keysize(key); if (intro->ciphertext_len < key_len) { if (err_msg_out) { tor_asprintf(&err_msg, "got an INTRODUCE%d cell with a truncated PK-encrypted " "part", (int)(intro->type)); } status = -5; goto err; } /* Decrypt the encrypted part */ note_crypto_pk_op(REND_SERVER); result = crypto_pk_private_hybrid_decrypt( key, (char *)buf, sizeof(buf), (const char *)(intro->ciphertext), intro->ciphertext_len, PK_PKCS1_OAEP_PADDING, 1); if (result < 0) { if (err_msg_out) { tor_asprintf(&err_msg, "couldn't decrypt INTRODUCE%d cell", (int)(intro->type)); } status = -6; goto err; } intro->plaintext_len = result; intro->plaintext = tor_malloc(intro->plaintext_len); memcpy(intro->plaintext, buf, intro->plaintext_len); status = 0; goto done; err: if (err_msg_out && !err_msg) { tor_asprintf(&err_msg, "unknown INTRODUCE%d error decrypting encrypted part", intro ? (int)(intro->type) : -1); } done: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); /* clean up potentially sensitive material */ memwipe(buf, 0, sizeof(buf)); memwipe(key_digest, 0, sizeof(key_digest)); memwipe(service_id, 0, sizeof(service_id)); return status; } /** Parse the plaintext of the encrypted part of an INTRODUCE1 or * INTRODUCE2 cell, return 0 if successful, or < 0 and write an error * message to *err_msg_out if provided. */ int rend_service_parse_intro_plaintext( rend_intro_cell_t *intro, char **err_msg_out) { char *err_msg = NULL; ssize_t ver_specific_len, ver_invariant_len; uint8_t version; int status = -1; if (!intro) { if (err_msg_out) { err_msg = tor_strdup("rend_service_parse_intro_plaintext() called with NULL " "rend_intro_cell_t"); } status = -2; goto err; } /* Check that we have plaintext */ if (!(intro->plaintext) || intro->plaintext_len <= 0) { if (err_msg_out) { err_msg = tor_strdup("rend_intro_cell_t was missing plaintext"); } status = -3; goto err; } /* In all formats except v0, the first byte is a version number */ version = intro->plaintext[0]; /* v0 has no version byte (stupid...), so handle it as a fallback */ if (version > 3) version = 0; /* Copy the version into the parsed cell structure */ intro->version = version; /* Call the version-specific parser from the table */ ver_specific_len = intro_version_handlers[version](intro, intro->plaintext, intro->plaintext_len, &err_msg); if (ver_specific_len < 0) { status = -4; goto err; } /** The rendezvous cookie and Diffie-Hellman stuff are version-invariant * and at the end of the plaintext of the encrypted part of the cell. */ ver_invariant_len = intro->plaintext_len - ver_specific_len; if (ver_invariant_len < REND_COOKIE_LEN + DH_KEY_LEN) { tor_asprintf(&err_msg, "decrypted plaintext of INTRODUCE%d cell was truncated (%ld bytes)", (int)(intro->type), (long)(intro->plaintext_len)); status = -5; goto err; } else if (ver_invariant_len > REND_COOKIE_LEN + DH_KEY_LEN) { tor_asprintf(&err_msg, "decrypted plaintext of INTRODUCE%d cell was too long (%ld bytes)", (int)(intro->type), (long)(intro->plaintext_len)); status = -6; goto err; } else { memcpy(intro->rc, intro->plaintext + ver_specific_len, REND_COOKIE_LEN); memcpy(intro->dh, intro->plaintext + ver_specific_len + REND_COOKIE_LEN, DH_KEY_LEN); } /* Flag it as being fully parsed */ intro->parsed = 1; status = 0; goto done; err: if (err_msg_out && !err_msg) { tor_asprintf(&err_msg, "unknown INTRODUCE%d error parsing encrypted part", intro ? (int)(intro->type) : -1); } done: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); return status; } /** Do validity checks on a parsed intro cell after decryption; some of * these are not done in rend_service_parse_intro_plaintext() itself because * they depend on a lot of other state and would make it hard to unit test. * Returns >= 0 if successful or < 0 if the intro cell is invalid, and * optionally writes out an error message for logging. If an err_msg * pointer is provided, it is the caller's responsibility to free any * provided message. */ int rend_service_validate_intro_late(const rend_intro_cell_t *intro, char **err_msg_out) { int status = 0; if (!intro) { if (err_msg_out) *err_msg_out = tor_strdup("NULL intro cell passed to " "rend_service_validate_intro_late()"); status = -1; goto err; } if (intro->version == 3 && intro->parsed) { if (!(intro->u.v3.auth_type == REND_NO_AUTH || intro->u.v3.auth_type == REND_BASIC_AUTH || intro->u.v3.auth_type == REND_STEALTH_AUTH)) { /* This is an informative message, not an error, as in the old code */ if (err_msg_out) tor_asprintf(err_msg_out, "unknown authorization type %d", intro->u.v3.auth_type); } } err: return status; } /** Called when we fail building a rendezvous circuit at some point other * than the last hop: launches a new circuit to the same rendezvous point. */ void rend_service_relaunch_rendezvous(origin_circuit_t *oldcirc) { origin_circuit_t *newcirc; cpath_build_state_t *newstate, *oldstate; tor_assert(oldcirc->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND); /* Don't relaunch the same rend circ twice. */ if (oldcirc->hs_service_side_rend_circ_has_been_relaunched) { log_info(LD_REND, "Rendezvous circuit to %s has already been relaunched; " "not relaunching it again.", oldcirc->build_state ? safe_str(extend_info_describe(oldcirc->build_state->chosen_exit)) : "*unknown*"); return; } oldcirc->hs_service_side_rend_circ_has_been_relaunched = 1; if (!oldcirc->build_state || oldcirc->build_state->failure_count > MAX_REND_FAILURES || oldcirc->build_state->expiry_time < time(NULL)) { log_info(LD_REND, "Attempt to build circuit to %s for rendezvous has failed " "too many times or expired; giving up.", oldcirc->build_state ? safe_str(extend_info_describe(oldcirc->build_state->chosen_exit)) : "*unknown*"); return; } oldstate = oldcirc->build_state; tor_assert(oldstate); if (oldstate->service_pending_final_cpath_ref == NULL) { log_info(LD_REND,"Skipping relaunch of circ that failed on its first hop. " "Initiator will retry."); return; } log_info(LD_REND,"Reattempting rendezvous circuit to '%s'", safe_str(extend_info_describe(oldstate->chosen_exit))); /* You'd think Single Onion Services would want to retry the rendezvous * using a direct connection. But if it's blocked by a firewall, or the * service is IPv6-only, or the rend point avoiding becoming a one-hop * proxy, we need a 3-hop connection. */ newcirc = circuit_launch_by_extend_info(CIRCUIT_PURPOSE_S_CONNECT_REND, oldstate->chosen_exit, CIRCLAUNCH_NEED_CAPACITY|CIRCLAUNCH_IS_INTERNAL); if (!newcirc) { log_warn(LD_REND,"Couldn't relaunch rendezvous circuit to '%s'.", safe_str(extend_info_describe(oldstate->chosen_exit))); return; } newstate = newcirc->build_state; tor_assert(newstate); newstate->failure_count = oldstate->failure_count+1; newstate->expiry_time = oldstate->expiry_time; newstate->service_pending_final_cpath_ref = oldstate->service_pending_final_cpath_ref; ++(newstate->service_pending_final_cpath_ref->refcount); newcirc->rend_data = rend_data_dup(oldcirc->rend_data); } /** Launch a circuit to serve as an introduction point for the service * <b>service</b> at the introduction point <b>nickname</b> */ static int rend_service_launch_establish_intro(rend_service_t *service, rend_intro_point_t *intro) { origin_circuit_t *launched; int flags = CIRCLAUNCH_NEED_UPTIME|CIRCLAUNCH_IS_INTERNAL; const or_options_t *options = get_options(); extend_info_t *launch_ei = intro->extend_info; extend_info_t *direct_ei = NULL; /* Are we in single onion mode? */ if (rend_service_allow_non_anonymous_connection(options)) { /* Do we have a descriptor for the node? * We've either just chosen it from the consensus, or we've just reviewed * our intro points to see which ones are still valid, and deleted the ones * that aren't in the consensus any more. */ const node_t *node = node_get_by_id(launch_ei->identity_digest); if (BUG(!node)) { /* The service has kept an intro point after it went missing from the * consensus. If we did anything else here, it would be a consensus * distinguisher. Which are less of an issue for single onion services, * but still a bug. */ return -1; } /* Can we connect to the node directly? If so, replace launch_ei * (a multi-hop extend_info) with one suitable for direct connection. */ if (rend_service_use_direct_connection_node(options, node)) { direct_ei = extend_info_from_node(node, 1); if (BUG(!direct_ei)) { /* rend_service_use_direct_connection_node and extend_info_from_node * disagree about which addresses on this node are permitted. This * should never happen. Avoiding the connection is a safe response. */ return -1; } flags = flags | CIRCLAUNCH_ONEHOP_TUNNEL; launch_ei = direct_ei; } } /* launch_ei is either intro->extend_info, or has been replaced with a valid * extend_info for single onion service direct connection. */ tor_assert(launch_ei); /* We must have the same intro when making a direct connection. */ tor_assert(tor_memeq(intro->extend_info->identity_digest, launch_ei->identity_digest, DIGEST_LEN)); log_info(LD_REND, "Launching circuit to introduction point %s%s%s for service %s", safe_str_client(extend_info_describe(intro->extend_info)), direct_ei ? " via direct address " : "", direct_ei ? safe_str_client(extend_info_describe(direct_ei)) : "", service->service_id); rep_hist_note_used_internal(time(NULL), 1, 0); ++service->n_intro_circuits_launched; launched = circuit_launch_by_extend_info(CIRCUIT_PURPOSE_S_ESTABLISH_INTRO, launch_ei, flags); if (!launched) { log_info(LD_REND, "Can't launch circuit to establish introduction at %s%s%s.", safe_str_client(extend_info_describe(intro->extend_info)), direct_ei ? " via direct address " : "", direct_ei ? safe_str_client(extend_info_describe(direct_ei)) : "" ); extend_info_free(direct_ei); return -1; } /* We must have the same exit node even if cannibalized or direct connection. */ tor_assert(tor_memeq(intro->extend_info->identity_digest, launched->build_state->chosen_exit->identity_digest, DIGEST_LEN)); launched->rend_data = rend_data_service_create(service->service_id, service->pk_digest, NULL, service->auth_type); launched->intro_key = crypto_pk_dup_key(intro->intro_key); if (launched->base_.state == CIRCUIT_STATE_OPEN) rend_service_intro_has_opened(launched); extend_info_free(direct_ei); return 0; } /** Return the number of introduction points that are established for the * given service. */ static unsigned int count_established_intro_points(const rend_service_t *service) { unsigned int num = 0; SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro, num += intro->circuit_established ); return num; } /** Return the number of introduction points that are or are being * established for the given service. This function iterates over all * circuit and count those that are linked to the service and are waiting * for the intro point to respond. */ static unsigned int count_intro_point_circuits(const rend_service_t *service) { unsigned int num_ipos = 0; SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) { if (!circ->marked_for_close && circ->state == CIRCUIT_STATE_OPEN && (circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || circ->purpose == CIRCUIT_PURPOSE_S_INTRO)) { origin_circuit_t *oc = TO_ORIGIN_CIRCUIT(circ); if (oc->rend_data && rend_circuit_pk_digest_eq(oc, (uint8_t *) service->pk_digest)) { num_ipos++; } } } SMARTLIST_FOREACH_END(circ); return num_ipos; } /* Given a buffer of at least RELAY_PAYLOAD_SIZE bytes in <b>cell_body_out</b>, write the body of a legacy ESTABLISH_INTRO cell in it. Use <b>intro_key</b> as the intro point auth key, and <b>rend_circ_nonce</b> as the circuit crypto material. On success, fill <b>cell_body_out</b> and return the number of bytes written. On fail, return -1. */ STATIC ssize_t encode_establish_intro_cell_legacy(char *cell_body_out, size_t cell_body_out_len, crypto_pk_t *intro_key, char *rend_circ_nonce) { int retval = -1; int r; int len = 0; char auth[DIGEST_LEN + 9]; tor_assert(intro_key); tor_assert(rend_circ_nonce); /* Build the payload for a RELAY_ESTABLISH_INTRO cell. */ r = crypto_pk_asn1_encode(intro_key, cell_body_out+2, RELAY_PAYLOAD_SIZE-2); if (r < 0) { log_warn(LD_BUG, "Internal error; failed to establish intro point."); goto err; } len = r; set_uint16(cell_body_out, htons((uint16_t)len)); len += 2; memcpy(auth, rend_circ_nonce, DIGEST_LEN); memcpy(auth+DIGEST_LEN, "INTRODUCE", 9); if (crypto_digest(cell_body_out+len, auth, DIGEST_LEN+9)) goto err; len += 20; note_crypto_pk_op(REND_SERVER); r = crypto_pk_private_sign_digest(intro_key, cell_body_out+len, cell_body_out_len - len, cell_body_out, len); if (r<0) { log_warn(LD_BUG, "Internal error: couldn't sign introduction request."); goto err; } len += r; retval = len; err: memwipe(auth, 0, sizeof(auth)); return retval; } /** Called when we're done building a circuit to an introduction point: * sends a RELAY_ESTABLISH_INTRO cell. */ void rend_service_intro_has_opened(origin_circuit_t *circuit) { rend_service_t *service; char buf[RELAY_PAYLOAD_SIZE]; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; unsigned int expiring_nodes_len, num_ip_circuits, valid_ip_circuits = 0; int reason = END_CIRC_REASON_TORPROTOCOL; const char *rend_pk_digest; tor_assert(circuit->base_.purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO); assert_circ_anonymity_ok(circuit, get_options()); tor_assert(circuit->cpath); tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only on supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_REND, "Unrecognized service ID %s on introduction circuit %u.", safe_str_client(serviceid), (unsigned)circuit->base_.n_circ_id); reason = END_CIRC_REASON_NOSUCHSERVICE; goto err; } /* Take the current amount of expiring nodes and the current amount of IP * circuits and compute how many valid IP circuits we have. */ expiring_nodes_len = (unsigned int) smartlist_len(service->expiring_nodes); num_ip_circuits = count_intro_point_circuits(service); /* Let's avoid an underflow. The valid_ip_circuits is initialized to 0 in * case this condition turns out false because it means that all circuits * are expiring so we need to keep this circuit. */ if (num_ip_circuits > expiring_nodes_len) { valid_ip_circuits = num_ip_circuits - expiring_nodes_len; } /* If we already have enough introduction circuits for this service, * redefine this one as a general circuit or close it, depending. * Substract the amount of expiring nodes here because the circuits are * still opened. */ if (valid_ip_circuits > service->n_intro_points_wanted) { const or_options_t *options = get_options(); /* Remove the intro point associated with this circuit, it's being * repurposed or closed thus cleanup memory. */ rend_intro_point_t *intro = find_intro_point(circuit); if (intro != NULL) { smartlist_remove(service->intro_nodes, intro); rend_intro_point_free(intro); } if (options->ExcludeNodes) { /* XXXX in some future version, we can test whether the transition is allowed or not given the actual nodes in the circuit. But for now, this case, we might as well close the thing. */ log_info(LD_CIRC|LD_REND, "We have just finished an introduction " "circuit, but we already have enough. Closing it."); reason = END_CIRC_REASON_NONE; goto err; } else { tor_assert(circuit->build_state->is_internal); log_info(LD_CIRC|LD_REND, "We have just finished an introduction " "circuit, but we already have enough. Redefining purpose to " "general; leaving as internal."); circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_C_GENERAL); { rend_data_free(circuit->rend_data); circuit->rend_data = NULL; } { crypto_pk_t *intro_key = circuit->intro_key; circuit->intro_key = NULL; crypto_pk_free(intro_key); } circuit_has_opened(circuit); goto done; } } log_info(LD_REND, "Established circuit %u as introduction point for service %s", (unsigned)circuit->base_.n_circ_id, serviceid); circuit_log_path(LOG_INFO, LD_REND, circuit); /* Send the ESTABLISH_INTRO cell */ { ssize_t len; len = encode_establish_intro_cell_legacy(buf, sizeof(buf), circuit->intro_key, circuit->cpath->prev->rend_circ_nonce); if (len < 0) { reason = END_CIRC_REASON_INTERNAL; goto err; } if (relay_send_command_from_edge(0, TO_CIRCUIT(circuit), RELAY_COMMAND_ESTABLISH_INTRO, buf, len, circuit->cpath->prev)<0) { log_info(LD_GENERAL, "Couldn't send introduction request for service %s on circuit %u", serviceid, (unsigned)circuit->base_.n_circ_id); goto done; } } /* We've attempted to use this circuit */ pathbias_count_use_attempt(circuit); goto done; err: circuit_mark_for_close(TO_CIRCUIT(circuit), reason); done: memwipe(buf, 0, sizeof(buf)); memwipe(serviceid, 0, sizeof(serviceid)); return; } /** Called when we get an INTRO_ESTABLISHED cell; mark the circuit as a * live introduction point, and note that the service descriptor is * now out-of-date. */ int rend_service_intro_established(origin_circuit_t *circuit, const uint8_t *request, size_t request_len) { rend_service_t *service; rend_intro_point_t *intro; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; (void) request; (void) request_len; tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only supported one for now). */ const char *rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); if (circuit->base_.purpose != CIRCUIT_PURPOSE_S_ESTABLISH_INTRO) { log_warn(LD_PROTOCOL, "received INTRO_ESTABLISHED cell on non-intro circuit."); goto err; } service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_REND, "Unknown service on introduction circuit %u.", (unsigned)circuit->base_.n_circ_id); goto err; } base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32 + 1, rend_pk_digest, REND_SERVICE_ID_LEN); /* We've just successfully established a intro circuit to one of our * introduction point, account for it. */ intro = find_intro_point(circuit); if (intro == NULL) { log_warn(LD_REND, "Introduction circuit established without a rend_intro_point_t " "object for service %s on circuit %u", safe_str_client(serviceid), (unsigned)circuit->base_.n_circ_id); goto err; } intro->circuit_established = 1; /* We might not have every introduction point ready but at this point we * know that the descriptor needs to be uploaded. */ service->desc_is_dirty = time(NULL); circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_S_INTRO); log_info(LD_REND, "Received INTRO_ESTABLISHED cell on circuit %u for service %s", (unsigned)circuit->base_.n_circ_id, serviceid); /* Getting a valid INTRODUCE_ESTABLISHED means we've successfully * used the circ */ pathbias_mark_use_success(circuit); return 0; err: circuit_mark_for_close(TO_CIRCUIT(circuit), END_CIRC_REASON_TORPROTOCOL); return -1; } /** Called once a circuit to a rendezvous point is established: sends a * RELAY_COMMAND_RENDEZVOUS1 cell. */ void rend_service_rendezvous_has_opened(origin_circuit_t *circuit) { rend_service_t *service; char buf[RELAY_PAYLOAD_SIZE]; crypt_path_t *hop; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; char hexcookie[9]; int reason; const char *rend_cookie, *rend_pk_digest; tor_assert(circuit->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND); tor_assert(circuit->cpath); tor_assert(circuit->build_state); assert_circ_anonymity_ok(circuit, get_options()); tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only one supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); rend_cookie = circuit->rend_data->rend_cookie; /* Declare the circuit dirty to avoid reuse, and for path-bias */ if (!circuit->base_.timestamp_dirty) circuit->base_.timestamp_dirty = time(NULL); /* This may be redundant */ pathbias_count_use_attempt(circuit); hop = circuit->build_state->service_pending_final_cpath_ref->cpath; base16_encode(hexcookie,9, rend_cookie,4); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); log_info(LD_REND, "Done building circuit %u to rendezvous with " "cookie %s for service %s", (unsigned)circuit->base_.n_circ_id, hexcookie, serviceid); circuit_log_path(LOG_INFO, LD_REND, circuit); /* Clear the 'in-progress HS circ has timed out' flag for * consistency with what happens on the client side; this line has * no effect on Tor's behaviour. */ circuit->hs_circ_has_timed_out = 0; /* If hop is NULL, another rend circ has already connected to this * rend point. Close this circ. */ if (hop == NULL) { log_info(LD_REND, "Another rend circ has already reached this rend point; " "closing this rend circ."); reason = END_CIRC_REASON_NONE; goto err; } /* Remove our final cpath element from the reference, so that no * other circuit will try to use it. Store it in * pending_final_cpath for now to ensure that it will be freed if * our rendezvous attempt fails. */ circuit->build_state->pending_final_cpath = hop; circuit->build_state->service_pending_final_cpath_ref->cpath = NULL; service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_GENERAL, "Internal error: unrecognized service ID on " "rendezvous circuit."); reason = END_CIRC_REASON_INTERNAL; goto err; } /* All we need to do is send a RELAY_RENDEZVOUS1 cell... */ memcpy(buf, rend_cookie, REND_COOKIE_LEN); if (crypto_dh_get_public(hop->rend_dh_handshake_state, buf+REND_COOKIE_LEN, DH_KEY_LEN)<0) { log_warn(LD_GENERAL,"Couldn't get DH public key."); reason = END_CIRC_REASON_INTERNAL; goto err; } memcpy(buf+REND_COOKIE_LEN+DH_KEY_LEN, hop->rend_circ_nonce, DIGEST_LEN); /* Send the cell */ if (relay_send_command_from_edge(0, TO_CIRCUIT(circuit), RELAY_COMMAND_RENDEZVOUS1, buf, REND_COOKIE_LEN+DH_KEY_LEN+DIGEST_LEN, circuit->cpath->prev)<0) { log_warn(LD_GENERAL, "Couldn't send RENDEZVOUS1 cell."); goto done; } crypto_dh_free(hop->rend_dh_handshake_state); hop->rend_dh_handshake_state = NULL; /* Append the cpath entry. */ hop->state = CPATH_STATE_OPEN; /* set the windows to default. these are the windows * that the service thinks the client has. */ hop->package_window = circuit_initial_package_window(); hop->deliver_window = CIRCWINDOW_START; onion_append_to_cpath(&circuit->cpath, hop); circuit->build_state->pending_final_cpath = NULL; /* prevent double-free */ /* Change the circuit purpose. */ circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_S_REND_JOINED); goto done; err: circuit_mark_for_close(TO_CIRCUIT(circuit), reason); done: memwipe(buf, 0, sizeof(buf)); memwipe(serviceid, 0, sizeof(serviceid)); memwipe(hexcookie, 0, sizeof(hexcookie)); return; } /* * Manage introduction points */ /** Return the (possibly non-open) introduction circuit ending at * <b>intro</b> for the service whose public key is <b>pk_digest</b>. * (<b>desc_version</b> is ignored). Return NULL if no such service is * found. */ static origin_circuit_t * find_intro_circuit(rend_intro_point_t *intro, const char *pk_digest) { origin_circuit_t *circ = NULL; tor_assert(intro); while ((circ = circuit_get_next_by_pk_and_purpose(circ, (uint8_t *) pk_digest, CIRCUIT_PURPOSE_S_INTRO))) { if (tor_memeq(circ->build_state->chosen_exit->identity_digest, intro->extend_info->identity_digest, DIGEST_LEN) && circ->rend_data) { return circ; } } circ = NULL; while ((circ = circuit_get_next_by_pk_and_purpose(circ, (uint8_t *) pk_digest, CIRCUIT_PURPOSE_S_ESTABLISH_INTRO))) { if (tor_memeq(circ->build_state->chosen_exit->identity_digest, intro->extend_info->identity_digest, DIGEST_LEN) && circ->rend_data) { return circ; } } return NULL; } /** Return the corresponding introdution point using the circuit <b>circ</b> * found in the <b>service</b>. NULL is returned if not found. */ static rend_intro_point_t * find_expiring_intro_point(rend_service_t *service, origin_circuit_t *circ) { tor_assert(service); tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_INTRO); SMARTLIST_FOREACH(service->expiring_nodes, rend_intro_point_t *, intro_point, if (crypto_pk_eq_keys(intro_point->intro_key, circ->intro_key)) { return intro_point; }); return NULL; } /** Return a pointer to the rend_intro_point_t corresponding to the * service-side introduction circuit <b>circ</b>. */ static rend_intro_point_t * find_intro_point(origin_circuit_t *circ) { const char *serviceid; rend_service_t *service = NULL; tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_INTRO); tor_assert(circ->rend_data); serviceid = rend_data_get_address(circ->rend_data); SMARTLIST_FOREACH(rend_service_list, rend_service_t *, s, if (tor_memeq(s->service_id, serviceid, REND_SERVICE_ID_LEN_BASE32)) { service = s; break; }); if (service == NULL) return NULL; SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro_point, if (crypto_pk_eq_keys(intro_point->intro_key, circ->intro_key)) { return intro_point; }); return NULL; } /** Upload the rend_encoded_v2_service_descriptor_t's in <b>descs</b> * associated with the rend_service_descriptor_t <b>renddesc</b> to * the responsible hidden service directories OR the hidden service * directories specified by <b>hs_dirs</b>; <b>service_id</b> and * <b>seconds_valid</b> are only passed for logging purposes. */ void directory_post_to_hs_dir(rend_service_descriptor_t *renddesc, smartlist_t *descs, smartlist_t *hs_dirs, const char *service_id, int seconds_valid) { int i, j, failed_upload = 0; smartlist_t *responsible_dirs = smartlist_new(); smartlist_t *successful_uploads = smartlist_new(); routerstatus_t *hs_dir; for (i = 0; i < smartlist_len(descs); i++) { rend_encoded_v2_service_descriptor_t *desc = smartlist_get(descs, i); /** If any HSDirs are specified, they should be used instead of * the responsible directories */ if (hs_dirs && smartlist_len(hs_dirs) > 0) { smartlist_add_all(responsible_dirs, hs_dirs); } else { /* Determine responsible dirs. */ if (hid_serv_get_responsible_directories(responsible_dirs, desc->desc_id) < 0) { log_warn(LD_REND, "Could not determine the responsible hidden service " "directories to post descriptors to."); control_event_hs_descriptor_upload(service_id, "UNKNOWN", "UNKNOWN"); goto done; } } for (j = 0; j < smartlist_len(responsible_dirs); j++) { char desc_id_base32[REND_DESC_ID_V2_LEN_BASE32 + 1]; char *hs_dir_ip; const node_t *node; rend_data_t *rend_data; hs_dir = smartlist_get(responsible_dirs, j); if (smartlist_contains_digest(renddesc->successful_uploads, hs_dir->identity_digest)) /* Don't upload descriptor if we succeeded in doing so last time. */ continue; node = node_get_by_id(hs_dir->identity_digest); if (!node || !node_has_descriptor(node)) { log_info(LD_REND, "Not launching upload for for v2 descriptor to " "hidden service directory %s; we don't have its " "router descriptor. Queuing for later upload.", safe_str_client(routerstatus_describe(hs_dir))); failed_upload = -1; continue; } /* Send publish request. */ /* We need the service ID to identify which service did the upload * request. Lookup is made in rend_service_desc_has_uploaded(). */ rend_data = rend_data_client_create(service_id, desc->desc_id, NULL, REND_NO_AUTH); directory_initiate_command_routerstatus_rend(hs_dir, DIR_PURPOSE_UPLOAD_RENDDESC_V2, ROUTER_PURPOSE_GENERAL, DIRIND_ANONYMOUS, NULL, desc->desc_str, strlen(desc->desc_str), 0, rend_data, NULL); rend_data_free(rend_data); base32_encode(desc_id_base32, sizeof(desc_id_base32), desc->desc_id, DIGEST_LEN); hs_dir_ip = tor_dup_ip(hs_dir->addr); log_info(LD_REND, "Launching upload for v2 descriptor for " "service '%s' with descriptor ID '%s' with validity " "of %d seconds to hidden service directory '%s' on " "%s:%d.", safe_str_client(service_id), safe_str_client(desc_id_base32), seconds_valid, hs_dir->nickname, hs_dir_ip, hs_dir->or_port); control_event_hs_descriptor_upload(service_id, hs_dir->identity_digest, desc_id_base32); tor_free(hs_dir_ip); /* Remember successful upload to this router for next time. */ if (!smartlist_contains_digest(successful_uploads, hs_dir->identity_digest)) smartlist_add(successful_uploads, hs_dir->identity_digest); } smartlist_clear(responsible_dirs); } if (!failed_upload) { if (renddesc->successful_uploads) { SMARTLIST_FOREACH(renddesc->successful_uploads, char *, c, tor_free(c);); smartlist_free(renddesc->successful_uploads); renddesc->successful_uploads = NULL; } renddesc->all_uploads_performed = 1; } else { /* Remember which routers worked this time, so that we don't upload the * descriptor to them again. */ if (!renddesc->successful_uploads) renddesc->successful_uploads = smartlist_new(); SMARTLIST_FOREACH(successful_uploads, const char *, c, { if (!smartlist_contains_digest(renddesc->successful_uploads, c)) { char *hsdir_id = tor_memdup(c, DIGEST_LEN); smartlist_add(renddesc->successful_uploads, hsdir_id); } }); } done: smartlist_free(responsible_dirs); smartlist_free(successful_uploads); } /** Encode and sign an up-to-date service descriptor for <b>service</b>, * and upload it/them to the responsible hidden service directories. */ static void upload_service_descriptor(rend_service_t *service) { time_t now = time(NULL); int rendpostperiod; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; int uploaded = 0; rendpostperiod = get_options()->RendPostPeriod; networkstatus_t *c = networkstatus_get_latest_consensus(); if (c && smartlist_len(c->routerstatus_list) > 0) { int seconds_valid, i, j, num_descs; smartlist_t *descs = smartlist_new(); smartlist_t *client_cookies = smartlist_new(); /* Either upload a single descriptor (including replicas) or one * descriptor for each authorized client in case of authorization * type 'stealth'. */ num_descs = service->auth_type == REND_STEALTH_AUTH ? smartlist_len(service->clients) : 1; for (j = 0; j < num_descs; j++) { crypto_pk_t *client_key = NULL; rend_authorized_client_t *client = NULL; smartlist_clear(client_cookies); switch (service->auth_type) { case REND_NO_AUTH: /* Do nothing here. */ break; case REND_BASIC_AUTH: SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, cl, smartlist_add(client_cookies, cl->descriptor_cookie)); break; case REND_STEALTH_AUTH: client = smartlist_get(service->clients, j); client_key = client->client_key; smartlist_add(client_cookies, client->descriptor_cookie); break; } /* Encode the current descriptor. */ seconds_valid = rend_encode_v2_descriptors(descs, service->desc, now, 0, service->auth_type, client_key, client_cookies); if (seconds_valid < 0) { log_warn(LD_BUG, "Internal error: couldn't encode service " "descriptor; not uploading."); smartlist_free(descs); smartlist_free(client_cookies); return; } rend_get_service_id(service->desc->pk, serviceid); if (get_options()->PublishHidServDescriptors) { /* Post the current descriptors to the hidden service directories. */ log_info(LD_REND, "Launching upload for hidden service %s", serviceid); directory_post_to_hs_dir(service->desc, descs, NULL, serviceid, seconds_valid); } /* Free memory for descriptors. */ for (i = 0; i < smartlist_len(descs); i++) rend_encoded_v2_service_descriptor_free(smartlist_get(descs, i)); smartlist_clear(descs); /* Update next upload time. */ if (seconds_valid - REND_TIME_PERIOD_OVERLAPPING_V2_DESCS > rendpostperiod) service->next_upload_time = now + rendpostperiod; else if (seconds_valid < REND_TIME_PERIOD_OVERLAPPING_V2_DESCS) service->next_upload_time = now + seconds_valid + 1; else service->next_upload_time = now + seconds_valid - REND_TIME_PERIOD_OVERLAPPING_V2_DESCS + 1; /* Post also the next descriptors, if necessary. */ if (seconds_valid < REND_TIME_PERIOD_OVERLAPPING_V2_DESCS) { seconds_valid = rend_encode_v2_descriptors(descs, service->desc, now, 1, service->auth_type, client_key, client_cookies); if (seconds_valid < 0) { log_warn(LD_BUG, "Internal error: couldn't encode service " "descriptor; not uploading."); smartlist_free(descs); smartlist_free(client_cookies); return; } if (get_options()->PublishHidServDescriptors) { directory_post_to_hs_dir(service->desc, descs, NULL, serviceid, seconds_valid); } /* Free memory for descriptors. */ for (i = 0; i < smartlist_len(descs); i++) rend_encoded_v2_service_descriptor_free(smartlist_get(descs, i)); smartlist_clear(descs); } } smartlist_free(descs); smartlist_free(client_cookies); uploaded = 1; if (get_options()->PublishHidServDescriptors) { log_info(LD_REND, "Successfully uploaded v2 rend descriptors!"); } else { log_info(LD_REND, "Successfully stored created v2 rend descriptors!"); } } /* If not uploaded, try again in one minute. */ if (!uploaded) service->next_upload_time = now + 60; /* Unmark dirty flag of this service. */ service->desc_is_dirty = 0; } /** Return the number of INTRODUCE2 cells this hidden service has received * from this intro point. */ static int intro_point_accepted_intro_count(rend_intro_point_t *intro) { return intro->accepted_introduce2_count; } /** Return non-zero iff <b>intro</b> should 'expire' now (i.e. we * should stop publishing it in new descriptors and eventually close * it). */ static int intro_point_should_expire_now(rend_intro_point_t *intro, time_t now) { tor_assert(intro != NULL); if (intro->time_published == -1) { /* Don't expire an intro point if we haven't even published it yet. */ return 0; } if (intro_point_accepted_intro_count(intro) >= intro->max_introductions) { /* This intro point has been used too many times. Expire it now. */ return 1; } if (intro->time_to_expire == -1) { /* This intro point has been published, but we haven't picked an * expiration time for it. Pick one now. */ int intro_point_lifetime_seconds = crypto_rand_int_range(INTRO_POINT_LIFETIME_MIN_SECONDS, INTRO_POINT_LIFETIME_MAX_SECONDS); /* Start the expiration timer now, rather than when the intro * point was first published. There shouldn't be much of a time * difference. */ intro->time_to_expire = now + intro_point_lifetime_seconds; return 0; } /* This intro point has a time to expire set already. Use it. */ return (now >= intro->time_to_expire); } /** Iterate over intro points in the given service and remove the invalid * ones. For an intro point object to be considered invalid, the circuit * _and_ node need to have disappeared. * * If the intro point should expire, it's placed into the expiring_nodes * list of the service and removed from the active intro nodes list. * * If <b>exclude_nodes</b> is not NULL, add the valid nodes to it. * * If <b>retry_nodes</b> is not NULL, add the valid node to it if the * circuit disappeared but the node is still in the consensus. */ static void remove_invalid_intro_points(rend_service_t *service, smartlist_t *exclude_nodes, smartlist_t *retry_nodes, time_t now) { tor_assert(service); /* Remove any expired nodes that doesn't have a circuit. */ SMARTLIST_FOREACH_BEGIN(service->expiring_nodes, rend_intro_point_t *, intro) { origin_circuit_t *intro_circ = find_intro_circuit(intro, service->pk_digest); if (intro_circ) { continue; } /* No more circuit, cleanup the into point object. */ SMARTLIST_DEL_CURRENT(service->expiring_nodes, intro); rend_intro_point_free(intro); } SMARTLIST_FOREACH_END(intro); SMARTLIST_FOREACH_BEGIN(service->intro_nodes, rend_intro_point_t *, intro) { /* Find the introduction point node object. */ const node_t *node = node_get_by_id(intro->extend_info->identity_digest); /* Find the intro circuit, this might be NULL. */ origin_circuit_t *intro_circ = find_intro_circuit(intro, service->pk_digest); /* Add the valid node to the exclusion list so we don't try to establish * an introduction point to it again. */ if (node && exclude_nodes) { smartlist_add(exclude_nodes, (void*) node); } /* First, make sure we still have a valid circuit for this intro point. * If we dont, we'll give up on it and make a new one. */ if (intro_circ == NULL) { log_info(LD_REND, "Attempting to retry on %s as intro point for %s" " (circuit disappeared).", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); /* We've lost the circuit for this intro point, flag it so it can be * accounted for when considiring uploading a descriptor. */ intro->circuit_established = 0; /* Node is gone or we've reached our maximum circuit creationg retry * count, clean up everything, we'll find a new one. */ if (node == NULL || intro->circuit_retries >= MAX_INTRO_POINT_CIRCUIT_RETRIES) { rend_intro_point_free(intro); SMARTLIST_DEL_CURRENT(service->intro_nodes, intro); /* We've just killed the intro point, nothing left to do. */ continue; } /* The intro point is still alive so let's try to use it again because * we have a published descriptor containing it. Keep the intro point * in the intro_nodes list because it's still valid, we are rebuilding * a circuit to it. */ if (retry_nodes) { smartlist_add(retry_nodes, intro); } } /* else, the circuit is valid so in both cases, node being alive or not, * we leave the circuit and intro point object as is. Closing the * circuit here would leak new consensus timing and freeing the intro * point object would make the intro circuit unusable. */ /* Now, check if intro point should expire. If it does, queue it so * it can be cleaned up once it has been replaced properly. */ if (intro_point_should_expire_now(intro, now)) { log_info(LD_REND, "Expiring %s as intro point for %s.", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); smartlist_add(service->expiring_nodes, intro); SMARTLIST_DEL_CURRENT(service->intro_nodes, intro); /* Intro point is expired, we need a new one thus don't consider it * anymore has a valid established intro point. */ intro->circuit_established = 0; } } SMARTLIST_FOREACH_END(intro); } /** A new descriptor has been successfully uploaded for the given * <b>rend_data</b>. Remove and free the expiring nodes from the associated * service. */ void rend_service_desc_has_uploaded(const rend_data_t *rend_data) { rend_service_t *service; const char *onion_address; tor_assert(rend_data); onion_address = rend_data_get_address(rend_data); service = rend_service_get_by_service_id(onion_address); if (service == NULL) { return; } SMARTLIST_FOREACH_BEGIN(service->expiring_nodes, rend_intro_point_t *, intro) { origin_circuit_t *intro_circ = find_intro_circuit(intro, service->pk_digest); if (intro_circ != NULL) { circuit_mark_for_close(TO_CIRCUIT(intro_circ), END_CIRC_REASON_FINISHED); } SMARTLIST_DEL_CURRENT(service->expiring_nodes, intro); rend_intro_point_free(intro); } SMARTLIST_FOREACH_END(intro); } /** Don't try to build more than this many circuits before giving up * for a while. Dynamically calculated based on the configured number of * introduction points for the service, n_intro_points_wanted. */ static int rend_max_intro_circs_per_period(unsigned int n_intro_points_wanted) { /* Allow all but one of the initial connections to fail and be * retried. (If all fail, we *want* to wait, because something is broken.) */ tor_assert(n_intro_points_wanted <= NUM_INTRO_POINTS_MAX); return (int)(2*n_intro_points_wanted + NUM_INTRO_POINTS_EXTRA); } /** For every service, check how many intro points it currently has, and: * - Invalidate introdution points based on specific criteria, see * remove_invalid_intro_points comments. * - Pick new intro points as necessary. * - Launch circuits to any new intro points. * * This is called once a second by the main loop. */ void rend_consider_services_intro_points(void) { int i; time_t now; const or_options_t *options = get_options(); /* Are we in single onion mode? */ const int allow_direct = rend_service_allow_non_anonymous_connection( get_options()); /* List of nodes we need to _exclude_ when choosing a new node to * establish an intro point to. */ smartlist_t *exclude_nodes; /* List of nodes we need to retry to build a circuit on them because the * node is valid but circuit died. */ smartlist_t *retry_nodes; if (!have_completed_a_circuit()) return; exclude_nodes = smartlist_new(); retry_nodes = smartlist_new(); now = time(NULL); SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, service) { int r; /* Number of intro points we want to open and add to the intro nodes * list of the service. */ unsigned int n_intro_points_to_open; /* Have an unsigned len so we can use it to compare values else gcc is * not happy with unmatching signed comparaison. */ unsigned int intro_nodes_len; /* Different service are allowed to have the same introduction point as * long as they are on different circuit thus why we clear this list. */ smartlist_clear(exclude_nodes); smartlist_clear(retry_nodes); /* Cleanup the invalid intro points and save the node objects, if any, * in the exclude_nodes and retry_nodes lists. */ remove_invalid_intro_points(service, exclude_nodes, retry_nodes, now); /* This retry period is important here so we don't stress circuit * creation. */ if (now > service->intro_period_started + INTRO_CIRC_RETRY_PERIOD) { /* One period has elapsed; we can try building circuits again. */ service->intro_period_started = now; service->n_intro_circuits_launched = 0; } else if (service->n_intro_circuits_launched >= rend_max_intro_circs_per_period( service->n_intro_points_wanted)) { /* We have failed too many times in this period; wait for the next * one before we try to initiate any more connections. */ continue; } /* Let's try to rebuild circuit on the nodes we want to retry on. */ SMARTLIST_FOREACH_BEGIN(retry_nodes, rend_intro_point_t *, intro) { r = rend_service_launch_establish_intro(service, intro); if (r < 0) { log_warn(LD_REND, "Error launching circuit to node %s for service %s.", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); /* Unable to launch a circuit to that intro point, remove it from * the valid list so we can create a new one. */ smartlist_remove(service->intro_nodes, intro); rend_intro_point_free(intro); continue; } intro->circuit_retries++; } SMARTLIST_FOREACH_END(intro); /* Avoid mismatched signed comparaison below. */ intro_nodes_len = (unsigned int) smartlist_len(service->intro_nodes); /* Quiescent state, we have more or the equal amount of wanted node for * this service. Proceed to the next service. We can have more nodes * because we launch extra preemptive circuits if our intro nodes list was * originally empty for performance reasons. */ if (intro_nodes_len >= service->n_intro_points_wanted) { continue; } /* Number of intro points we want to open which is the wanted amount minus * the current amount of valid nodes. We know that this won't underflow * because of the check above. */ n_intro_points_to_open = service->n_intro_points_wanted - intro_nodes_len; if (intro_nodes_len == 0) { /* We want to end up with n_intro_points_wanted intro points, but if * we have no intro points at all (chances are they all cycled or we * are starting up), we launch NUM_INTRO_POINTS_EXTRA extra circuits * and use the first n_intro_points_wanted that complete. See proposal * #155, section 4 for the rationale of this which is purely for * performance. * * The ones after the first n_intro_points_to_open will be converted * to 'general' internal circuits in rend_service_intro_has_opened(), * and then we'll drop them from the list of intro points. */ n_intro_points_to_open += NUM_INTRO_POINTS_EXTRA; } for (i = 0; i < (int) n_intro_points_to_open; i++) { const node_t *node; rend_intro_point_t *intro; router_crn_flags_t flags = CRN_NEED_UPTIME|CRN_NEED_DESC; if (get_options()->AllowInvalid_ & ALLOW_INVALID_INTRODUCTION) flags |= CRN_ALLOW_INVALID; router_crn_flags_t direct_flags = flags; direct_flags |= CRN_PREF_ADDR; direct_flags |= CRN_DIRECT_CONN; node = router_choose_random_node(exclude_nodes, options->ExcludeNodes, allow_direct ? direct_flags : flags); /* If we are in single onion mode, retry node selection for a 3-hop * path */ if (allow_direct && !node) { log_info(LD_REND, "Unable to find an intro point that we can connect to " "directly for %s, falling back to a 3-hop path.", safe_str_client(service->service_id)); node = router_choose_random_node(exclude_nodes, options->ExcludeNodes, flags); } if (!node) { log_warn(LD_REND, "We only have %d introduction points established for %s; " "wanted %u.", smartlist_len(service->intro_nodes), safe_str_client(service->service_id), n_intro_points_to_open); break; } /* Add the choosen node to the exclusion list in order to avoid picking * it again in the next iteration. */ smartlist_add(exclude_nodes, (void*)node); intro = tor_malloc_zero(sizeof(rend_intro_point_t)); /* extend_info is for clients, so we want the multi-hop primary ORPort, * even if we are a single onion service and intend to connect to it * directly ourselves. */ intro->extend_info = extend_info_from_node(node, 0); if (BUG(intro->extend_info == NULL)) { break; } intro->intro_key = crypto_pk_new(); const int fail = crypto_pk_generate_key(intro->intro_key); tor_assert(!fail); intro->time_published = -1; intro->time_to_expire = -1; intro->max_introductions = crypto_rand_int_range(INTRO_POINT_MIN_LIFETIME_INTRODUCTIONS, INTRO_POINT_MAX_LIFETIME_INTRODUCTIONS); smartlist_add(service->intro_nodes, intro); log_info(LD_REND, "Picked router %s as an intro point for %s.", safe_str_client(node_describe(node)), safe_str_client(service->service_id)); /* Establish new introduction circuit to our chosen intro point. */ r = rend_service_launch_establish_intro(service, intro); if (r < 0) { log_warn(LD_REND, "Error launching circuit to node %s for service %s.", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); /* This funcion will be called again by the main loop so this intro * point without a intro circuit will be retried on or removed after * a maximum number of attempts. */ } } } SMARTLIST_FOREACH_END(service); smartlist_free(exclude_nodes); smartlist_free(retry_nodes); } #define MIN_REND_INITIAL_POST_DELAY (30) #define MIN_REND_INITIAL_POST_DELAY_TESTING (5) /** Regenerate and upload rendezvous service descriptors for all * services, if necessary. If the descriptor has been dirty enough * for long enough, definitely upload; else only upload when the * periodic timeout has expired. * * For the first upload, pick a random time between now and two periods * from now, and pick it independently for each service. */ void rend_consider_services_upload(time_t now) { int i; rend_service_t *service; const or_options_t *options = get_options(); int rendpostperiod = options->RendPostPeriod; int rendinitialpostdelay = (options->TestingTorNetwork ? MIN_REND_INITIAL_POST_DELAY_TESTING : MIN_REND_INITIAL_POST_DELAY); for (i=0; i < smartlist_len(rend_service_list); ++i) { service = smartlist_get(rend_service_list, i); if (!service->next_upload_time) { /* never been uploaded yet */ /* The fixed lower bound of rendinitialpostdelay seconds ensures that * the descriptor is stable before being published. See comment below. */ service->next_upload_time = now + rendinitialpostdelay + crypto_rand_int(2*rendpostperiod); /* Single Onion Services prioritise availability over hiding their * startup time, as their IP address is publicly discoverable anyway. */ if (rend_service_reveal_startup_time(options)) { service->next_upload_time = now + rendinitialpostdelay; } } /* Does every introduction points have been established? */ unsigned int intro_points_ready = count_established_intro_points(service) >= service->n_intro_points_wanted; if (intro_points_ready && (service->next_upload_time < now || (service->desc_is_dirty && service->desc_is_dirty < now-rendinitialpostdelay))) { /* if it's time, or if the directory servers have a wrong service * descriptor and ours has been stable for rendinitialpostdelay seconds, * upload a new one of each format. */ rend_service_update_descriptor(service); upload_service_descriptor(service); } } } /** True if the list of available router descriptors might have changed so * that we should have a look whether we can republish previously failed * rendezvous service descriptors. */ static int consider_republishing_rend_descriptors = 1; /** Called when our internal view of the directory has changed, so that we * might have router descriptors of hidden service directories available that * we did not have before. */ void rend_hsdir_routers_changed(void) { consider_republishing_rend_descriptors = 1; } /** Consider republication of v2 rendezvous service descriptors that failed * previously, but without regenerating descriptor contents. */ void rend_consider_descriptor_republication(void) { int i; rend_service_t *service; if (!consider_republishing_rend_descriptors) return; consider_republishing_rend_descriptors = 0; if (!get_options()->PublishHidServDescriptors) return; for (i=0; i < smartlist_len(rend_service_list); ++i) { service = smartlist_get(rend_service_list, i); if (service->desc && !service->desc->all_uploads_performed) { /* If we failed in uploading a descriptor last time, try again *without* * updating the descriptor's contents. */ upload_service_descriptor(service); } } } /** Log the status of introduction points for all rendezvous services * at log severity <b>severity</b>. */ void rend_service_dump_stats(int severity) { int i,j; rend_service_t *service; rend_intro_point_t *intro; const char *safe_name; origin_circuit_t *circ; for (i=0; i < smartlist_len(rend_service_list); ++i) { service = smartlist_get(rend_service_list, i); tor_log(severity, LD_GENERAL, "Service configured in %s:", rend_service_escaped_dir(service)); for (j=0; j < smartlist_len(service->intro_nodes); ++j) { intro = smartlist_get(service->intro_nodes, j); safe_name = safe_str_client(intro->extend_info->nickname); circ = find_intro_circuit(intro, service->pk_digest); if (!circ) { tor_log(severity, LD_GENERAL, " Intro point %d at %s: no circuit", j, safe_name); continue; } tor_log(severity, LD_GENERAL, " Intro point %d at %s: circuit is %s", j, safe_name, circuit_state_to_string(circ->base_.state)); } } } #ifdef HAVE_SYS_UN_H /** Given <b>ports</b>, a smarlist containing rend_service_port_config_t, * add the given <b>p</b>, a AF_UNIX port to the list. Return 0 on success * else return -ENOSYS if AF_UNIX is not supported (see function in the * #else statement below). */ static int add_unix_port(smartlist_t *ports, rend_service_port_config_t *p) { tor_assert(ports); tor_assert(p); tor_assert(p->is_unix_addr); smartlist_add(ports, p); return 0; } /** Given <b>conn</b> set it to use the given port <b>p</b> values. Return 0 * on success else return -ENOSYS if AF_UNIX is not supported (see function * in the #else statement below). */ static int set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p) { tor_assert(conn); tor_assert(p); tor_assert(p->is_unix_addr); conn->base_.socket_family = AF_UNIX; tor_addr_make_unspec(&conn->base_.addr); conn->base_.port = 1; conn->base_.address = tor_strdup(p->unix_addr); return 0; } #else /* defined(HAVE_SYS_UN_H) */ static int set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p) { (void) conn; (void) p; return -ENOSYS; } static int add_unix_port(smartlist_t *ports, rend_service_port_config_t *p) { (void) ports; (void) p; return -ENOSYS; } #endif /* HAVE_SYS_UN_H */ /** Given <b>conn</b>, a rendezvous exit stream, look up the hidden service for * 'circ', and look up the port and address based on conn-\>port. * Assign the actual conn-\>addr and conn-\>port. Return -2 on failure * for which the circuit should be closed, -1 on other failure, * or 0 for success. */ int rend_service_set_connection_addr_port(edge_connection_t *conn, origin_circuit_t *circ) { rend_service_t *service; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; smartlist_t *matching_ports; rend_service_port_config_t *chosen_port; unsigned int warn_once = 0; const char *rend_pk_digest; tor_assert(circ->base_.purpose == CIRCUIT_PURPOSE_S_REND_JOINED); tor_assert(circ->rend_data); log_debug(LD_REND,"beginning to hunt for addr/port"); /* XXX: This is version 2 specific (only one supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circ->rend_data, NULL); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_REND, "Couldn't find any service associated with pk %s on " "rendezvous circuit %u; closing.", serviceid, (unsigned)circ->base_.n_circ_id); return -2; } if (service->max_streams_per_circuit > 0) { /* Enforce the streams-per-circuit limit, and refuse to provide a * mapping if this circuit will exceed the limit. */ #define MAX_STREAM_WARN_INTERVAL 600 static struct ratelim_t stream_ratelim = RATELIM_INIT(MAX_STREAM_WARN_INTERVAL); if (circ->rend_data->nr_streams >= service->max_streams_per_circuit) { log_fn_ratelim(&stream_ratelim, LOG_WARN, LD_REND, "Maximum streams per circuit limit reached on rendezvous " "circuit %u; %s. Circuit has %d out of %d streams.", (unsigned)circ->base_.n_circ_id, service->max_streams_close_circuit ? "closing circuit" : "ignoring open stream request", circ->rend_data->nr_streams, service->max_streams_per_circuit); return service->max_streams_close_circuit ? -2 : -1; } } matching_ports = smartlist_new(); SMARTLIST_FOREACH(service->ports, rend_service_port_config_t *, p, { if (conn->base_.port != p->virtual_port) { continue; } if (!(p->is_unix_addr)) { smartlist_add(matching_ports, p); } else { if (add_unix_port(matching_ports, p)) { if (!warn_once) { /* Unix port not supported so warn only once. */ log_warn(LD_REND, "Saw AF_UNIX virtual port mapping for port %d on service " "%s, which is unsupported on this platform. Ignoring it.", conn->base_.port, serviceid); } warn_once++; } } }); chosen_port = smartlist_choose(matching_ports); smartlist_free(matching_ports); if (chosen_port) { if (!(chosen_port->is_unix_addr)) { /* Get a non-AF_UNIX connection ready for connection_exit_connect() */ tor_addr_copy(&conn->base_.addr, &chosen_port->real_addr); conn->base_.port = chosen_port->real_port; } else { if (set_unix_port(conn, chosen_port)) { /* Simply impossible to end up here else we were able to add a Unix * port without AF_UNIX support... ? */ tor_assert(0); } } return 0; } log_info(LD_REND, "No virtual port mapping exists for port %d on service %s", conn->base_.port, serviceid); if (service->allow_unknown_ports) return -1; else return -2; } /* Are HiddenServiceSingleHopMode and HiddenServiceNonAnonymousMode consistent? */ static int rend_service_non_anonymous_mode_consistent(const or_options_t *options) { /* !! is used to make these options boolean */ return (!! options->HiddenServiceSingleHopMode == !! options->HiddenServiceNonAnonymousMode); } /* Do the options allow onion services to make direct (non-anonymous) * connections to introduction or rendezvous points? * Must only be called after options_validate_single_onion() has successfully * checked onion service option consistency. * Returns true if tor is in HiddenServiceSingleHopMode. */ int rend_service_allow_non_anonymous_connection(const or_options_t *options) { tor_assert(rend_service_non_anonymous_mode_consistent(options)); return options->HiddenServiceSingleHopMode ? 1 : 0; } /* Do the options allow us to reveal the exact startup time of the onion * service? * Single Onion Services prioritise availability over hiding their * startup time, as their IP address is publicly discoverable anyway. * Must only be called after options_validate_single_onion() has successfully * checked onion service option consistency. * Returns true if tor is in non-anonymous hidden service mode. */ int rend_service_reveal_startup_time(const or_options_t *options) { tor_assert(rend_service_non_anonymous_mode_consistent(options)); return rend_service_non_anonymous_mode_enabled(options); } /* Is non-anonymous mode enabled using the HiddenServiceNonAnonymousMode * config option? * Must only be called after options_validate_single_onion() has successfully * checked onion service option consistency. */ int rend_service_non_anonymous_mode_enabled(const or_options_t *options) { tor_assert(rend_service_non_anonymous_mode_consistent(options)); return options->HiddenServiceNonAnonymousMode ? 1 : 0; }
./CrossVul/dataset_final_sorted/CWE-532/c/good_2491_1
crossvul-cpp_data_bad_2491_1
/* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. * Copyright (c) 2007-2016, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file rendservice.c * \brief The hidden-service side of rendezvous functionality. **/ #define RENDSERVICE_PRIVATE #include "or.h" #include "circpathbias.h" #include "circuitbuild.h" #include "circuitlist.h" #include "circuituse.h" #include "config.h" #include "control.h" #include "directory.h" #include "hs_common.h" #include "main.h" #include "networkstatus.h" #include "nodelist.h" #include "policies.h" #include "rendclient.h" #include "rendcommon.h" #include "rendservice.h" #include "router.h" #include "relay.h" #include "rephist.h" #include "replaycache.h" #include "routerlist.h" #include "routerparse.h" #include "routerset.h" struct rend_service_t; static origin_circuit_t *find_intro_circuit(rend_intro_point_t *intro, const char *pk_digest); static rend_intro_point_t *find_intro_point(origin_circuit_t *circ); static rend_intro_point_t *find_expiring_intro_point( struct rend_service_t *service, origin_circuit_t *circ); static extend_info_t *find_rp_for_intro( const rend_intro_cell_t *intro, char **err_msg_out); static int intro_point_accepted_intro_count(rend_intro_point_t *intro); static int intro_point_should_expire_now(rend_intro_point_t *intro, time_t now); static int rend_service_derive_key_digests(struct rend_service_t *s); static int rend_service_load_keys(struct rend_service_t *s); static int rend_service_load_auth_keys(struct rend_service_t *s, const char *hfname); static struct rend_service_t *rend_service_get_by_pk_digest( const char* digest); static struct rend_service_t *rend_service_get_by_service_id(const char *id); static const char *rend_service_escaped_dir( const struct rend_service_t *s); static ssize_t rend_service_parse_intro_for_v0_or_v1( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out); static ssize_t rend_service_parse_intro_for_v2( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out); static ssize_t rend_service_parse_intro_for_v3( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out); static int rend_service_check_private_dir(const or_options_t *options, const rend_service_t *s, int create); static int rend_service_check_private_dir_impl(const or_options_t *options, const rend_service_t *s, int create); static const smartlist_t* rend_get_service_list( const smartlist_t* substitute_service_list); static smartlist_t* rend_get_service_list_mutable( smartlist_t* substitute_service_list); /** Represents the mapping from a virtual port of a rendezvous service to * a real port on some IP. */ struct rend_service_port_config_s { /* The incoming HS virtual port we're mapping */ uint16_t virtual_port; /* Is this an AF_UNIX port? */ unsigned int is_unix_addr:1; /* The outgoing TCP port to use, if !is_unix_addr */ uint16_t real_port; /* The outgoing IPv4 or IPv6 address to use, if !is_unix_addr */ tor_addr_t real_addr; /* The socket path to connect to, if is_unix_addr */ char unix_addr[FLEXIBLE_ARRAY_MEMBER]; }; /** Try to maintain this many intro points per service by default. */ #define NUM_INTRO_POINTS_DEFAULT 3 /** Maximum number of intro points per service. */ #define NUM_INTRO_POINTS_MAX 10 /** Number of extra intro points we launch if our set of intro nodes is * empty. See proposal 155, section 4. */ #define NUM_INTRO_POINTS_EXTRA 2 /** If we can't build our intro circuits, don't retry for this long. */ #define INTRO_CIRC_RETRY_PERIOD (60*5) /** How many times will a hidden service operator attempt to connect to * a requested rendezvous point before giving up? */ #define MAX_REND_FAILURES 1 /** How many seconds should we spend trying to connect to a requested * rendezvous point before giving up? */ #define MAX_REND_TIMEOUT 30 /* Hidden service directory file names: * new file names should be added to rend_service_add_filenames_to_list() * for sandboxing purposes. */ static const char *private_key_fname = "private_key"; static const char *hostname_fname = "hostname"; static const char *client_keys_fname = "client_keys"; static const char *sos_poison_fname = "onion_service_non_anonymous"; /** A list of rend_service_t's for services run on this OP. */ static smartlist_t *rend_service_list = NULL; /* Like rend_get_service_list_mutable, but returns a read-only list. */ static const smartlist_t* rend_get_service_list(const smartlist_t* substitute_service_list) { /* It is safe to cast away the const here, because * rend_get_service_list_mutable does not actually modify the list */ return rend_get_service_list_mutable((smartlist_t*)substitute_service_list); } /* Return a mutable list of hidden services. * If substitute_service_list is not NULL, return it. * Otherwise, check if the global rend_service_list is non-NULL, and if so, * return it. * Otherwise, log a BUG message and return NULL. * */ static smartlist_t* rend_get_service_list_mutable(smartlist_t* substitute_service_list) { if (substitute_service_list) { return substitute_service_list; } /* If no special service list is provided, then just use the global one. */ if (BUG(!rend_service_list)) { /* No global HS list, which is a programmer error. */ return NULL; } return rend_service_list; } /** Tells if onion service <b>s</b> is ephemeral. */ static unsigned int rend_service_is_ephemeral(const struct rend_service_t *s) { return (s->directory == NULL); } /** Returns a escaped string representation of the service, <b>s</b>. */ static const char * rend_service_escaped_dir(const struct rend_service_t *s) { return rend_service_is_ephemeral(s) ? "[EPHEMERAL]" : escaped(s->directory); } /** Return the number of rendezvous services we have configured. */ int num_rend_services(void) { if (!rend_service_list) return 0; return smartlist_len(rend_service_list); } /** Helper: free storage held by a single service authorized client entry. */ void rend_authorized_client_free(rend_authorized_client_t *client) { if (!client) return; if (client->client_key) crypto_pk_free(client->client_key); if (client->client_name) memwipe(client->client_name, 0, strlen(client->client_name)); tor_free(client->client_name); memwipe(client->descriptor_cookie, 0, sizeof(client->descriptor_cookie)); tor_free(client); } /** Helper for strmap_free. */ static void rend_authorized_client_strmap_item_free(void *authorized_client) { rend_authorized_client_free(authorized_client); } /** Release the storage held by <b>service</b>. */ STATIC void rend_service_free(rend_service_t *service) { if (!service) return; tor_free(service->directory); if (service->ports) { SMARTLIST_FOREACH(service->ports, rend_service_port_config_t*, p, rend_service_port_config_free(p)); smartlist_free(service->ports); } if (service->private_key) crypto_pk_free(service->private_key); if (service->intro_nodes) { SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro, rend_intro_point_free(intro);); smartlist_free(service->intro_nodes); } if (service->expiring_nodes) { SMARTLIST_FOREACH(service->expiring_nodes, rend_intro_point_t *, intro, rend_intro_point_free(intro);); smartlist_free(service->expiring_nodes); } rend_service_descriptor_free(service->desc); if (service->clients) { SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, c, rend_authorized_client_free(c);); smartlist_free(service->clients); } if (service->accepted_intro_dh_parts) { replaycache_free(service->accepted_intro_dh_parts); } tor_free(service); } /** Release all the storage held in rend_service_list. */ void rend_service_free_all(void) { if (!rend_service_list) return; SMARTLIST_FOREACH(rend_service_list, rend_service_t*, ptr, rend_service_free(ptr)); smartlist_free(rend_service_list); rend_service_list = NULL; } /** Validate <b>service</b> and add it to <b>service_list</b>, or to * the global rend_service_list if <b>service_list</b> is NULL. * Return 0 on success. On failure, free <b>service</b> and return -1. * Takes ownership of <b>service</b>. */ static int rend_add_service(smartlist_t *service_list, rend_service_t *service) { int i; rend_service_port_config_t *p; tor_assert(service); smartlist_t *s_list = rend_get_service_list_mutable(service_list); /* We must have a service list, even if it's a temporary one, so we can * check for duplicate services */ if (BUG(!s_list)) { return -1; } service->intro_nodes = smartlist_new(); service->expiring_nodes = smartlist_new(); if (service->max_streams_per_circuit < 0) { log_warn(LD_CONFIG, "Hidden service (%s) configured with negative max " "streams per circuit.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } if (service->max_streams_close_circuit < 0 || service->max_streams_close_circuit > 1) { log_warn(LD_CONFIG, "Hidden service (%s) configured with invalid " "max streams handling.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } if (service->auth_type != REND_NO_AUTH && (!service->clients || smartlist_len(service->clients) == 0)) { log_warn(LD_CONFIG, "Hidden service (%s) with client authorization but no " "clients.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } if (!service->ports || !smartlist_len(service->ports)) { log_warn(LD_CONFIG, "Hidden service (%s) with no ports configured.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } else { int dupe = 0; /* XXX This duplicate check has two problems: * * a) It's O(n^2), but the same comment from the bottom of * rend_config_services() should apply. * * b) We only compare directory paths as strings, so we can't * detect two distinct paths that specify the same directory * (which can arise from symlinks, case-insensitivity, bind * mounts, etc.). * * It also can't detect that two separate Tor instances are trying * to use the same HiddenServiceDir; for that, we would need a * lock file. But this is enough to detect a simple mistake that * at least one person has actually made. */ tor_assert(s_list); if (!rend_service_is_ephemeral(service)) { /* Skip dupe for ephemeral services. */ SMARTLIST_FOREACH(s_list, rend_service_t*, ptr, dupe = dupe || !strcmp(ptr->directory, service->directory)); if (dupe) { log_warn(LD_REND, "Another hidden service is already configured for " "directory %s.", rend_service_escaped_dir(service)); rend_service_free(service); return -1; } } log_debug(LD_REND,"Configuring service with directory %s", rend_service_escaped_dir(service)); for (i = 0; i < smartlist_len(service->ports); ++i) { p = smartlist_get(service->ports, i); if (!(p->is_unix_addr)) { log_debug(LD_REND, "Service maps port %d to %s", p->virtual_port, fmt_addrport(&p->real_addr, p->real_port)); } else { #ifdef HAVE_SYS_UN_H log_debug(LD_REND, "Service maps port %d to socket at \"%s\"", p->virtual_port, p->unix_addr); #else log_warn(LD_BUG, "Service maps port %d to an AF_UNIX socket, but we " "have no AF_UNIX support on this platform. This is " "probably a bug.", p->virtual_port); rend_service_free(service); return -1; #endif /* defined(HAVE_SYS_UN_H) */ } } /* The service passed all the checks */ tor_assert(s_list); smartlist_add(s_list, service); return 0; } /* NOTREACHED */ } /** Return a new rend_service_port_config_t with its path set to * <b>socket_path</b> or empty if <b>socket_path</b> is NULL */ static rend_service_port_config_t * rend_service_port_config_new(const char *socket_path) { if (!socket_path) return tor_malloc_zero(sizeof(rend_service_port_config_t) + 1); const size_t pathlen = strlen(socket_path) + 1; rend_service_port_config_t *conf = tor_malloc_zero(sizeof(rend_service_port_config_t) + pathlen); memcpy(conf->unix_addr, socket_path, pathlen); conf->is_unix_addr = 1; return conf; } /** Parses a virtual-port to real-port/socket mapping separated by * the provided separator and returns a new rend_service_port_config_t, * or NULL and an optional error string on failure. * * The format is: VirtualPort SEP (IP|RealPort|IP:RealPort|'socket':path)? * * IP defaults to 127.0.0.1; RealPort defaults to VirtualPort. */ rend_service_port_config_t * rend_service_parse_port_config(const char *string, const char *sep, char **err_msg_out) { smartlist_t *sl; int virtport; int realport = 0; uint16_t p; tor_addr_t addr; rend_service_port_config_t *result = NULL; unsigned int is_unix_addr = 0; const char *socket_path = NULL; char *err_msg = NULL; char *addrport = NULL; sl = smartlist_new(); smartlist_split_string(sl, string, sep, SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 2); if (smartlist_len(sl) < 1 || BUG(smartlist_len(sl) > 2)) { err_msg = tor_strdup("Bad syntax in hidden service port configuration."); goto err; } virtport = (int)tor_parse_long(smartlist_get(sl,0), 10, 1, 65535, NULL,NULL); if (!virtport) { tor_asprintf(&err_msg, "Missing or invalid port %s in hidden service " "port configuration", escaped(smartlist_get(sl,0))); goto err; } if (smartlist_len(sl) == 1) { /* No addr:port part; use default. */ realport = virtport; tor_addr_from_ipv4h(&addr, 0x7F000001u); /* 127.0.0.1 */ } else { int ret; const char *addrport_element = smartlist_get(sl,1); const char *rest = NULL; int is_unix; ret = port_cfg_line_extract_addrport(addrport_element, &addrport, &is_unix, &rest); if (ret < 0) { tor_asprintf(&err_msg, "Couldn't process address <%s> from hidden " "service configuration", addrport_element); goto err; } if (is_unix) { socket_path = addrport; is_unix_addr = 1; } else if (strchr(addrport, ':') || strchr(addrport, '.')) { /* else try it as an IP:port pair if it has a : or . in it */ if (tor_addr_port_lookup(addrport, &addr, &p)<0) { err_msg = tor_strdup("Unparseable address in hidden service port " "configuration."); goto err; } realport = p?p:virtport; } else { /* No addr:port, no addr -- must be port. */ realport = (int)tor_parse_long(addrport, 10, 1, 65535, NULL, NULL); if (!realport) { tor_asprintf(&err_msg, "Unparseable or out-of-range port %s in " "hidden service port configuration.", escaped(addrport)); goto err; } tor_addr_from_ipv4h(&addr, 0x7F000001u); /* Default to 127.0.0.1 */ } } /* Allow room for unix_addr */ result = rend_service_port_config_new(socket_path); result->virtual_port = virtport; result->is_unix_addr = is_unix_addr; if (!is_unix_addr) { result->real_port = realport; tor_addr_copy(&result->real_addr, &addr); result->unix_addr[0] = '\0'; } err: tor_free(addrport); if (err_msg_out != NULL) { *err_msg_out = err_msg; } else { tor_free(err_msg); } SMARTLIST_FOREACH(sl, char *, c, tor_free(c)); smartlist_free(sl); return result; } /** Release all storage held in a rend_service_port_config_t. */ void rend_service_port_config_free(rend_service_port_config_t *p) { tor_free(p); } /* Check the directory for <b>service</b>, and add the service to * <b>service_list</b>, or to the global list if <b>service_list</b> is NULL. * Only add the service to the list if <b>validate_only</b> is false. * If <b>validate_only</b> is true, free the service. * If <b>service</b> is NULL, ignore it, and return 0. * Returns 0 on success, and -1 on failure. * Takes ownership of <b>service</b>, either freeing it, or adding it to the * global service list. */ STATIC int rend_service_check_dir_and_add(smartlist_t *service_list, const or_options_t *options, rend_service_t *service, int validate_only) { if (!service) { /* It is ok for a service to be NULL, this means there are no services */ return 0; } if (rend_service_check_private_dir(options, service, !validate_only) < 0) { rend_service_free(service); return -1; } smartlist_t *s_list = rend_get_service_list_mutable(service_list); /* We must have a service list, even if it's a temporary one, so we can * check for duplicate services */ if (BUG(!s_list)) { return -1; } return rend_add_service(s_list, service); } /* If this is a reload and there were hidden services configured before, * keep the introduction points that are still needed and close the * other ones. */ STATIC void prune_services_on_reload(smartlist_t *old_service_list, smartlist_t *new_service_list) { origin_circuit_t *ocirc = NULL; smartlist_t *surviving_services = NULL; tor_assert(old_service_list); tor_assert(new_service_list); /* This contains all _existing_ services that survives the relaod that is * that haven't been removed from the configuration. The difference between * this list and the new service list is that the new list can possibly * contain newly configured service that have no introduction points opened * yet nor key material loaded or generated. */ surviving_services = smartlist_new(); /* Preserve the existing ephemeral services. * * This is the ephemeral service equivalent of the "Copy introduction * points to new services" block, except there's no copy required since * the service structure isn't regenerated. * * After this is done, all ephemeral services will be: * * Removed from old_service_list, so the equivalent non-ephemeral code * will not attempt to preserve them. * * Added to the new_service_list (that previously only had the * services listed in the configuration). * * Added to surviving_services, which is the list of services that * will NOT have their intro point closed. */ SMARTLIST_FOREACH_BEGIN(old_service_list, rend_service_t *, old) { if (rend_service_is_ephemeral(old)) { SMARTLIST_DEL_CURRENT(old_service_list, old); smartlist_add(surviving_services, old); smartlist_add(new_service_list, old); } } SMARTLIST_FOREACH_END(old); /* Copy introduction points to new services. This is O(n^2), but it's only * called on reconfigure, so it's ok performance wise. */ SMARTLIST_FOREACH_BEGIN(new_service_list, rend_service_t *, new) { SMARTLIST_FOREACH_BEGIN(old_service_list, rend_service_t *, old) { /* Skip ephemeral services as we only want to copy introduction points * from current services to newly configured one that already exists. * The same directory means it's the same service. */ if (rend_service_is_ephemeral(new) || rend_service_is_ephemeral(old) || strcmp(old->directory, new->directory)) { continue; } smartlist_add_all(new->intro_nodes, old->intro_nodes); smartlist_clear(old->intro_nodes); smartlist_add_all(new->expiring_nodes, old->expiring_nodes); smartlist_clear(old->expiring_nodes); /* This regular service will survive the closing IPs step after. */ smartlist_add(surviving_services, old); break; } SMARTLIST_FOREACH_END(old); } SMARTLIST_FOREACH_END(new); /* For every service introduction circuit we can find, see if we have a * matching surviving configured service. If not, close the circuit. */ while ((ocirc = circuit_get_next_service_intro_circ(ocirc))) { int keep_it = 0; tor_assert(ocirc->rend_data); SMARTLIST_FOREACH_BEGIN(surviving_services, const rend_service_t *, s) { if (rend_circuit_pk_digest_eq(ocirc, (uint8_t *) s->pk_digest)) { /* Keep this circuit as we have a matching configured service. */ keep_it = 1; break; } } SMARTLIST_FOREACH_END(s); if (keep_it) { continue; } log_info(LD_REND, "Closing intro point %s for service %s.", safe_str_client(extend_info_describe( ocirc->build_state->chosen_exit)), safe_str_client(rend_data_get_address(ocirc->rend_data))); /* Reason is FINISHED because service has been removed and thus the * circuit is considered old/uneeded. */ circuit_mark_for_close(TO_CIRCUIT(ocirc), END_CIRC_REASON_FINISHED); } smartlist_free(surviving_services); } /** Set up rend_service_list, based on the values of HiddenServiceDir and * HiddenServicePort in <b>options</b>. Return 0 on success and -1 on * failure. (If <b>validate_only</b> is set, parse, warn and return as * normal, but don't actually change the configured services.) */ int rend_config_services(const or_options_t *options, int validate_only) { config_line_t *line; rend_service_t *service = NULL; rend_service_port_config_t *portcfg; smartlist_t *old_service_list = NULL; smartlist_t *temp_service_list = NULL; int ok = 0; int rv = -1; /* Use a temporary service list, so that we can check the new services' * consistency with each other */ temp_service_list = smartlist_new(); for (line = options->RendConfigLines; line; line = line->next) { if (!strcasecmp(line->key, "HiddenServiceDir")) { /* register the service we just finished parsing * this code registers every service except the last one parsed, * which is registered below the loop */ if (rend_service_check_dir_and_add(temp_service_list, options, service, validate_only) < 0) { service = NULL; goto free_and_return; } service = tor_malloc_zero(sizeof(rend_service_t)); service->directory = tor_strdup(line->value); service->ports = smartlist_new(); service->intro_period_started = time(NULL); service->n_intro_points_wanted = NUM_INTRO_POINTS_DEFAULT; continue; } if (!service) { log_warn(LD_CONFIG, "%s with no preceding HiddenServiceDir directive", line->key); goto free_and_return; } if (!strcasecmp(line->key, "HiddenServicePort")) { char *err_msg = NULL; portcfg = rend_service_parse_port_config(line->value, " ", &err_msg); if (!portcfg) { if (err_msg) log_warn(LD_CONFIG, "%s", err_msg); tor_free(err_msg); goto free_and_return; } tor_assert(!err_msg); smartlist_add(service->ports, portcfg); } else if (!strcasecmp(line->key, "HiddenServiceAllowUnknownPorts")) { service->allow_unknown_ports = (int)tor_parse_long(line->value, 10, 0, 1, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceAllowUnknownPorts should be 0 or 1, not %s", line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceAllowUnknownPorts=%d for %s", (int)service->allow_unknown_ports, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceDirGroupReadable")) { service->dir_group_readable = (int)tor_parse_long(line->value, 10, 0, 1, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceDirGroupReadable should be 0 or 1, not %s", line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceDirGroupReadable=%d for %s", service->dir_group_readable, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceMaxStreams")) { service->max_streams_per_circuit = (int)tor_parse_long(line->value, 10, 0, 65535, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceMaxStreams should be between 0 and %d, not %s", 65535, line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceMaxStreams=%d for %s", service->max_streams_per_circuit, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceMaxStreamsCloseCircuit")) { service->max_streams_close_circuit = (int)tor_parse_long(line->value, 10, 0, 1, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceMaxStreamsCloseCircuit should be 0 or 1, " "not %s", line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceMaxStreamsCloseCircuit=%d for %s", (int)service->max_streams_close_circuit, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceNumIntroductionPoints")) { service->n_intro_points_wanted = (unsigned int) tor_parse_long(line->value, 10, 0, NUM_INTRO_POINTS_MAX, &ok, NULL); if (!ok) { log_warn(LD_CONFIG, "HiddenServiceNumIntroductionPoints " "should be between %d and %d, not %s", 0, NUM_INTRO_POINTS_MAX, line->value); goto free_and_return; } log_info(LD_CONFIG, "HiddenServiceNumIntroductionPoints=%d for %s", service->n_intro_points_wanted, rend_service_escaped_dir(service)); } else if (!strcasecmp(line->key, "HiddenServiceAuthorizeClient")) { /* Parse auth type and comma-separated list of client names and add a * rend_authorized_client_t for each client to the service's list * of authorized clients. */ smartlist_t *type_names_split, *clients; const char *authname; int num_clients; if (service->auth_type != REND_NO_AUTH) { log_warn(LD_CONFIG, "Got multiple HiddenServiceAuthorizeClient " "lines for a single service."); goto free_and_return; } type_names_split = smartlist_new(); smartlist_split_string(type_names_split, line->value, " ", 0, 2); if (smartlist_len(type_names_split) < 1) { log_warn(LD_BUG, "HiddenServiceAuthorizeClient has no value. This " "should have been prevented when parsing the " "configuration."); goto free_and_return; } authname = smartlist_get(type_names_split, 0); if (!strcasecmp(authname, "basic")) { service->auth_type = REND_BASIC_AUTH; } else if (!strcasecmp(authname, "stealth")) { service->auth_type = REND_STEALTH_AUTH; } else { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains " "unrecognized auth-type '%s'. Only 'basic' or 'stealth' " "are recognized.", (char *) smartlist_get(type_names_split, 0)); SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp)); smartlist_free(type_names_split); goto free_and_return; } service->clients = smartlist_new(); if (smartlist_len(type_names_split) < 2) { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains " "auth-type '%s', but no client names.", service->auth_type == REND_BASIC_AUTH ? "basic" : "stealth"); SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp)); smartlist_free(type_names_split); continue; } clients = smartlist_new(); smartlist_split_string(clients, smartlist_get(type_names_split, 1), ",", SPLIT_SKIP_SPACE, 0); SMARTLIST_FOREACH(type_names_split, char *, cp, tor_free(cp)); smartlist_free(type_names_split); /* Remove duplicate client names. */ num_clients = smartlist_len(clients); smartlist_sort_strings(clients); smartlist_uniq_strings(clients); if (smartlist_len(clients) < num_clients) { log_info(LD_CONFIG, "HiddenServiceAuthorizeClient contains %d " "duplicate client name(s); removing.", num_clients - smartlist_len(clients)); num_clients = smartlist_len(clients); } SMARTLIST_FOREACH_BEGIN(clients, const char *, client_name) { rend_authorized_client_t *client; if (!rend_valid_client_name(client_name)) { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains an " "illegal client name: '%s'. Names must be " "between 1 and %d characters and contain " "only [A-Za-z0-9+_-].", client_name, REND_CLIENTNAME_MAX_LEN); SMARTLIST_FOREACH(clients, char *, cp, tor_free(cp)); smartlist_free(clients); goto free_and_return; } client = tor_malloc_zero(sizeof(rend_authorized_client_t)); client->client_name = tor_strdup(client_name); smartlist_add(service->clients, client); log_debug(LD_REND, "Adding client name '%s'", client_name); } SMARTLIST_FOREACH_END(client_name); SMARTLIST_FOREACH(clients, char *, cp, tor_free(cp)); smartlist_free(clients); /* Ensure maximum number of clients. */ if ((service->auth_type == REND_BASIC_AUTH && smartlist_len(service->clients) > 512) || (service->auth_type == REND_STEALTH_AUTH && smartlist_len(service->clients) > 16)) { log_warn(LD_CONFIG, "HiddenServiceAuthorizeClient contains %d " "client authorization entries, but only a " "maximum of %d entries is allowed for " "authorization type '%s'.", smartlist_len(service->clients), service->auth_type == REND_BASIC_AUTH ? 512 : 16, service->auth_type == REND_BASIC_AUTH ? "basic" : "stealth"); goto free_and_return; } } else { tor_assert(!strcasecmp(line->key, "HiddenServiceVersion")); if (strcmp(line->value, "2")) { log_warn(LD_CONFIG, "The only supported HiddenServiceVersion is 2."); goto free_and_return; } } } /* register the final service after we have finished parsing all services * this code only registers the last service, other services are registered * within the loop. It is ok for this service to be NULL, it is ignored. */ if (rend_service_check_dir_and_add(temp_service_list, options, service, validate_only) < 0) { service = NULL; goto free_and_return; } service = NULL; /* Free the newly added services if validating */ if (validate_only) { rv = 0; goto free_and_return; } /* Otherwise, use the newly added services as the new service list * Since we have now replaced the global service list, from this point on we * must succeed, or die trying. */ old_service_list = rend_service_list; rend_service_list = temp_service_list; temp_service_list = NULL; /* If this is a reload and there were hidden services configured before, * keep the introduction points that are still needed and close the * other ones. */ if (old_service_list && !validate_only) { prune_services_on_reload(old_service_list, rend_service_list); /* Every remaining service in the old list have been removed from the * configuration so clean them up safely. */ SMARTLIST_FOREACH(old_service_list, rend_service_t *, s, rend_service_free(s)); smartlist_free(old_service_list); } return 0; free_and_return: rend_service_free(service); SMARTLIST_FOREACH(temp_service_list, rend_service_t *, ptr, rend_service_free(ptr)); smartlist_free(temp_service_list); return rv; } /** Add the ephemeral service <b>pk</b>/<b>ports</b> if possible, using * client authorization <b>auth_type</b> and an optional list of * rend_authorized_client_t in <b>auth_clients</b>, with * <b>max_streams_per_circuit</b> streams allowed per rendezvous circuit, * and circuit closure on max streams being exceeded set by * <b>max_streams_close_circuit</b>. * * Ownership of pk, ports, and auth_clients is passed to this routine. * Regardless of success/failure, callers should not touch these values * after calling this routine, and may assume that correct cleanup has * been done on failure. * * Return an appropriate rend_service_add_ephemeral_status_t. */ rend_service_add_ephemeral_status_t rend_service_add_ephemeral(crypto_pk_t *pk, smartlist_t *ports, int max_streams_per_circuit, int max_streams_close_circuit, rend_auth_type_t auth_type, smartlist_t *auth_clients, char **service_id_out) { *service_id_out = NULL; /* Allocate the service structure, and initialize the key, and key derived * parameters. */ rend_service_t *s = tor_malloc_zero(sizeof(rend_service_t)); s->directory = NULL; /* This indicates the service is ephemeral. */ s->private_key = pk; s->auth_type = auth_type; s->clients = auth_clients; s->ports = ports; s->intro_period_started = time(NULL); s->n_intro_points_wanted = NUM_INTRO_POINTS_DEFAULT; s->max_streams_per_circuit = max_streams_per_circuit; s->max_streams_close_circuit = max_streams_close_circuit; if (rend_service_derive_key_digests(s) < 0) { rend_service_free(s); return RSAE_BADPRIVKEY; } if (!s->ports || smartlist_len(s->ports) == 0) { log_warn(LD_CONFIG, "At least one VIRTPORT/TARGET must be specified."); rend_service_free(s); return RSAE_BADVIRTPORT; } if (s->auth_type != REND_NO_AUTH && (!s->clients || smartlist_len(s->clients) == 0)) { log_warn(LD_CONFIG, "At least one authorized client must be specified."); rend_service_free(s); return RSAE_BADAUTH; } /* Enforcing pk/id uniqueness should be done by rend_service_load_keys(), but * it's not, see #14828. */ if (rend_service_get_by_pk_digest(s->pk_digest)) { log_warn(LD_CONFIG, "Onion Service private key collides with an " "existing service."); rend_service_free(s); return RSAE_ADDREXISTS; } if (rend_service_get_by_service_id(s->service_id)) { log_warn(LD_CONFIG, "Onion Service id collides with an existing service."); rend_service_free(s); return RSAE_ADDREXISTS; } /* Initialize the service. */ if (rend_add_service(NULL, s)) { return RSAE_INTERNAL; } *service_id_out = tor_strdup(s->service_id); log_debug(LD_CONFIG, "Added ephemeral Onion Service: %s", s->service_id); return RSAE_OKAY; } /** Remove the ephemeral service <b>service_id</b> if possible. Returns 0 on * success, and -1 on failure. */ int rend_service_del_ephemeral(const char *service_id) { rend_service_t *s; if (!rend_valid_service_id(service_id)) { log_warn(LD_CONFIG, "Requested malformed Onion Service id for removal."); return -1; } if ((s = rend_service_get_by_service_id(service_id)) == NULL) { log_warn(LD_CONFIG, "Requested non-existent Onion Service id for " "removal."); return -1; } if (!rend_service_is_ephemeral(s)) { log_warn(LD_CONFIG, "Requested non-ephemeral Onion Service for removal."); return -1; } /* Kill the intro point circuit for the Onion Service, and remove it from * the list. Closing existing connections is the application's problem. * * XXX: As with the comment in rend_config_services(), a nice abstraction * would be ideal here, but for now just duplicate the code. */ SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) { if (!circ->marked_for_close && (circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || circ->purpose == CIRCUIT_PURPOSE_S_INTRO)) { origin_circuit_t *oc = TO_ORIGIN_CIRCUIT(circ); tor_assert(oc->rend_data); if (!rend_circuit_pk_digest_eq(oc, (uint8_t *) s->pk_digest)) { continue; } log_debug(LD_REND, "Closing intro point %s for service %s.", safe_str_client(extend_info_describe( oc->build_state->chosen_exit)), rend_data_get_address(oc->rend_data)); circuit_mark_for_close(circ, END_CIRC_REASON_FINISHED); } } SMARTLIST_FOREACH_END(circ); smartlist_remove(rend_service_list, s); rend_service_free(s); log_debug(LD_CONFIG, "Removed ephemeral Onion Service: %s", service_id); return 0; } /** Replace the old value of <b>service</b>-\>desc with one that reflects * the other fields in service. */ static void rend_service_update_descriptor(rend_service_t *service) { rend_service_descriptor_t *d; origin_circuit_t *circ; int i; rend_service_descriptor_free(service->desc); service->desc = NULL; d = service->desc = tor_malloc_zero(sizeof(rend_service_descriptor_t)); d->pk = crypto_pk_dup_key(service->private_key); d->timestamp = time(NULL); d->timestamp -= d->timestamp % 3600; /* Round down to nearest hour */ d->intro_nodes = smartlist_new(); /* Support intro protocols 2 and 3. */ d->protocols = (1 << 2) + (1 << 3); for (i = 0; i < smartlist_len(service->intro_nodes); ++i) { rend_intro_point_t *intro_svc = smartlist_get(service->intro_nodes, i); rend_intro_point_t *intro_desc; /* This intro point won't be listed in the descriptor... */ intro_svc->listed_in_last_desc = 0; circ = find_intro_circuit(intro_svc, service->pk_digest); if (!circ || circ->base_.purpose != CIRCUIT_PURPOSE_S_INTRO) { /* This intro point's circuit isn't finished yet. Don't list it. */ continue; } /* ...unless this intro point is listed in the descriptor. */ intro_svc->listed_in_last_desc = 1; /* We have an entirely established intro circuit. Publish it in * our descriptor. */ intro_desc = tor_malloc_zero(sizeof(rend_intro_point_t)); intro_desc->extend_info = extend_info_dup(intro_svc->extend_info); if (intro_svc->intro_key) intro_desc->intro_key = crypto_pk_dup_key(intro_svc->intro_key); smartlist_add(d->intro_nodes, intro_desc); if (intro_svc->time_published == -1) { /* We are publishing this intro point in a descriptor for the * first time -- note the current time in the service's copy of * the intro point. */ intro_svc->time_published = time(NULL); } } } /* Allocate and return a string containing the path to file_name in * service->directory. Asserts that service has a directory. * This function will never return NULL. * The caller must free this path. */ static char * rend_service_path(const rend_service_t *service, const char *file_name) { char *file_path = NULL; tor_assert(service->directory); /* Can never fail: asserts rather than leaving file_path NULL. */ tor_asprintf(&file_path, "%s%s%s", service->directory, PATH_SEPARATOR, file_name); return file_path; } /* Allocate and return a string containing the path to the single onion * service poison file in service->directory. Asserts that service has a * directory. * The caller must free this path. */ STATIC char * rend_service_sos_poison_path(const rend_service_t *service) { return rend_service_path(service, sos_poison_fname); } /** Return True if hidden services <b>service</b> has been poisoned by single * onion mode. */ static int service_is_single_onion_poisoned(const rend_service_t *service) { char *poison_fname = NULL; file_status_t fstatus; /* Passing a NULL service is a bug */ if (BUG(!service)) { return 0; } if (rend_service_is_ephemeral(service)) { return 0; } poison_fname = rend_service_sos_poison_path(service); fstatus = file_status(poison_fname); tor_free(poison_fname); /* If this fname is occupied, the hidden service has been poisoned. * fstatus can be FN_ERROR if the service directory does not exist, in that * case, there is obviously no private key. */ if (fstatus == FN_FILE || fstatus == FN_EMPTY) { return 1; } return 0; } /* Return 1 if the private key file for service exists and has a non-zero size, * and 0 otherwise. */ static int rend_service_private_key_exists(const rend_service_t *service) { char *private_key_path = rend_service_path(service, private_key_fname); const file_status_t private_key_status = file_status(private_key_path); tor_free(private_key_path); /* Only non-empty regular private key files could have been used before. * fstatus can be FN_ERROR if the service directory does not exist, in that * case, there is obviously no private key. */ return private_key_status == FN_FILE; } /** Check the single onion service poison state of the directory for s: * - If the service is poisoned, and we are in Single Onion Mode, * return 0, * - If the service is not poisoned, and we are not in Single Onion Mode, * return 0, * - Otherwise, the poison state is invalid: the service was created in one * mode, and is being used in the other, return -1. * Hidden service directories without keys are always considered consistent. * They will be poisoned after their directory is created (if needed). */ STATIC int rend_service_verify_single_onion_poison(const rend_service_t* s, const or_options_t* options) { /* Passing a NULL service is a bug */ if (BUG(!s)) { return -1; } /* Ephemeral services are checked at ADD_ONION time */ if (BUG(rend_service_is_ephemeral(s))) { return -1; } /* Service is expected to have a directory */ if (BUG(!s->directory)) { return -1; } /* Services without keys are always ok - their keys will only ever be used * in the current mode */ if (!rend_service_private_key_exists(s)) { return 0; } /* The key has been used before in a different mode */ if (service_is_single_onion_poisoned(s) != rend_service_non_anonymous_mode_enabled(options)) { return -1; } /* The key exists and is consistent with the current mode */ return 0; } /*** Helper for rend_service_poison_new_single_onion_dir(). Add a file to * the hidden service directory for s that marks it as a single onion service. * Tor must be in single onion mode before calling this function, and the * service directory must already have been created. * Returns 0 when a directory is successfully poisoned, or if it is already * poisoned. Returns -1 on a failure to read the directory or write the poison * file, or if there is an existing private key file in the directory. (The * service should have been poisoned when the key was created.) */ static int poison_new_single_onion_hidden_service_dir_impl(const rend_service_t *service, const or_options_t* options) { /* Passing a NULL service is a bug */ if (BUG(!service)) { return -1; } /* We must only poison directories if we're in Single Onion mode */ tor_assert(rend_service_non_anonymous_mode_enabled(options)); int fd; int retval = -1; char *poison_fname = NULL; if (rend_service_is_ephemeral(service)) { log_info(LD_REND, "Ephemeral HS started in non-anonymous mode."); return 0; } /* Make sure we're only poisoning new hidden service directories */ if (rend_service_private_key_exists(service)) { log_warn(LD_BUG, "Tried to single onion poison a service directory after " "the private key was created."); return -1; } /* Make sure the directory was created before calling this function. */ if (BUG(rend_service_check_private_dir_impl(options, service, 0) < 0)) return -1; poison_fname = rend_service_sos_poison_path(service); switch (file_status(poison_fname)) { case FN_DIR: case FN_ERROR: log_warn(LD_FS, "Can't read single onion poison file \"%s\"", poison_fname); goto done; case FN_FILE: /* single onion poison file already exists. NOP. */ case FN_EMPTY: /* single onion poison file already exists. NOP. */ log_debug(LD_FS, "Tried to re-poison a single onion poisoned file \"%s\"", poison_fname); break; case FN_NOENT: fd = tor_open_cloexec(poison_fname, O_RDWR|O_CREAT|O_TRUNC, 0600); if (fd < 0) { log_warn(LD_FS, "Could not create single onion poison file %s", poison_fname); goto done; } close(fd); break; default: tor_assert(0); } retval = 0; done: tor_free(poison_fname); return retval; } /** We just got launched in Single Onion Mode. That's a non-anonymous mode for * hidden services. If s is new, we should mark its hidden service * directory appropriately so that it is never launched as a location-private * hidden service. (New directories don't have private key files.) * Return 0 on success, -1 on fail. */ STATIC int rend_service_poison_new_single_onion_dir(const rend_service_t *s, const or_options_t* options) { /* Passing a NULL service is a bug */ if (BUG(!s)) { return -1; } /* We must only poison directories if we're in Single Onion mode */ tor_assert(rend_service_non_anonymous_mode_enabled(options)); /* Ephemeral services aren't allowed in non-anonymous mode */ if (BUG(rend_service_is_ephemeral(s))) { return -1; } /* Service is expected to have a directory */ if (BUG(!s->directory)) { return -1; } if (!rend_service_private_key_exists(s)) { if (poison_new_single_onion_hidden_service_dir_impl(s, options) < 0) { return -1; } } return 0; } /** Load and/or generate private keys for all hidden services, possibly * including keys for client authorization. * If a <b>service_list</b> is provided, treat it as the list of hidden * services (used in unittests). Otherwise, require that rend_service_list is * not NULL. * Return 0 on success, -1 on failure. */ int rend_service_load_all_keys(const smartlist_t *service_list) { /* Use service_list for unit tests */ const smartlist_t *s_list = rend_get_service_list(service_list); if (BUG(!s_list)) { return -1; } SMARTLIST_FOREACH_BEGIN(s_list, rend_service_t *, s) { if (s->private_key) continue; log_info(LD_REND, "Loading hidden-service keys from %s", rend_service_escaped_dir(s)); if (rend_service_load_keys(s) < 0) return -1; } SMARTLIST_FOREACH_END(s); return 0; } /** Add to <b>lst</b> every filename used by <b>s</b>. */ static void rend_service_add_filenames_to_list(smartlist_t *lst, const rend_service_t *s) { tor_assert(lst); tor_assert(s); tor_assert(s->directory); smartlist_add(lst, rend_service_path(s, private_key_fname)); smartlist_add(lst, rend_service_path(s, hostname_fname)); smartlist_add(lst, rend_service_path(s, client_keys_fname)); smartlist_add(lst, rend_service_sos_poison_path(s)); } /** Add to <b>open_lst</b> every filename used by a configured hidden service, * and to <b>stat_lst</b> every directory used by a configured hidden * service */ void rend_services_add_filenames_to_lists(smartlist_t *open_lst, smartlist_t *stat_lst) { if (!rend_service_list) return; SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, s) { if (!rend_service_is_ephemeral(s)) { rend_service_add_filenames_to_list(open_lst, s); smartlist_add_strdup(stat_lst, s->directory); } } SMARTLIST_FOREACH_END(s); } /** Derive all rend_service_t internal material based on the service's key. * Returns 0 on sucess, -1 on failure. */ static int rend_service_derive_key_digests(struct rend_service_t *s) { if (rend_get_service_id(s->private_key, s->service_id)<0) { log_warn(LD_BUG, "Internal error: couldn't encode service ID."); return -1; } if (crypto_pk_get_digest(s->private_key, s->pk_digest)<0) { log_warn(LD_BUG, "Couldn't compute hash of public key."); return -1; } return 0; } /* Implements the directory check from rend_service_check_private_dir, * without doing the single onion poison checks. */ static int rend_service_check_private_dir_impl(const or_options_t *options, const rend_service_t *s, int create) { cpd_check_t check_opts = CPD_NONE; if (create) { check_opts |= CPD_CREATE; } else { check_opts |= CPD_CHECK_MODE_ONLY; check_opts |= CPD_CHECK; } if (s->dir_group_readable) { check_opts |= CPD_GROUP_READ; } /* Check/create directory */ if (check_private_dir(s->directory, check_opts, options->User) < 0) { log_warn(LD_REND, "Checking service directory %s failed.", s->directory); return -1; } return 0; } /** Make sure that the directory for <b>s</b> is private, using the config in * <b>options</b>. * If <b>create</b> is true: * - if the directory exists, change permissions if needed, * - if the directory does not exist, create it with the correct permissions. * If <b>create</b> is false: * - if the directory exists, check permissions, * - if the directory does not exist, check if we think we can create it. * Return 0 on success, -1 on failure. */ static int rend_service_check_private_dir(const or_options_t *options, const rend_service_t *s, int create) { /* Passing a NULL service is a bug */ if (BUG(!s)) { return -1; } /* Check/create directory */ if (rend_service_check_private_dir_impl(options, s, create) < 0) { return -1; } /* Check if the hidden service key exists, and was created in a different * single onion service mode, and refuse to launch if it has. * This is safe to call even when create is false, as it ignores missing * keys and directories: they are always valid. */ if (rend_service_verify_single_onion_poison(s, options) < 0) { /* We can't use s->service_id here, as the key may not have been loaded */ log_warn(LD_GENERAL, "We are configured with " "HiddenServiceNonAnonymousMode %d, but the hidden " "service key in directory %s was created in %s mode. " "This is not allowed.", rend_service_non_anonymous_mode_enabled(options) ? 1 : 0, rend_service_escaped_dir(s), rend_service_non_anonymous_mode_enabled(options) ? "an anonymous" : "a non-anonymous" ); return -1; } /* Poison new single onion directories immediately after they are created, * so that we never accidentally launch non-anonymous hidden services * thinking they are anonymous. Any keys created later will end up with the * correct poisoning state. */ if (create && rend_service_non_anonymous_mode_enabled(options)) { static int logged_warning = 0; if (rend_service_poison_new_single_onion_dir(s, options) < 0) { log_warn(LD_GENERAL,"Failed to mark new hidden services as non-anonymous" "."); return -1; } if (!logged_warning) { /* The keys for these services are linked to the server IP address */ log_notice(LD_REND, "The configured onion service directories have been " "used in single onion mode. They can not be used for " "anonymous hidden services."); logged_warning = 1; } } return 0; } /** Load and/or generate private keys for the hidden service <b>s</b>, * possibly including keys for client authorization. Return 0 on success, -1 * on failure. */ static int rend_service_load_keys(rend_service_t *s) { char *fname = NULL; char buf[128]; /* Make sure the directory was created and single onion poisoning was * checked before calling this function */ if (BUG(rend_service_check_private_dir(get_options(), s, 0) < 0)) goto err; /* Load key */ fname = rend_service_path(s, private_key_fname); s->private_key = init_key_from_file(fname, 1, LOG_ERR, 0); if (!s->private_key) goto err; if (rend_service_derive_key_digests(s) < 0) goto err; tor_free(fname); /* Create service file */ fname = rend_service_path(s, hostname_fname); tor_snprintf(buf, sizeof(buf),"%s.onion\n", s->service_id); if (write_str_to_file(fname,buf,0)<0) { log_warn(LD_CONFIG, "Could not write onion address to hostname file."); goto err; } #ifndef _WIN32 if (s->dir_group_readable) { /* Also verify hostname file created with group read. */ if (chmod(fname, 0640)) log_warn(LD_FS,"Unable to make hidden hostname file %s group-readable.", fname); } #endif /* If client authorization is configured, load or generate keys. */ if (s->auth_type != REND_NO_AUTH) { if (rend_service_load_auth_keys(s, fname) < 0) { goto err; } } int r = 0; goto done; err: r = -1; done: memwipe(buf, 0, sizeof(buf)); tor_free(fname); return r; } /** Load and/or generate client authorization keys for the hidden service * <b>s</b>, which stores its hostname in <b>hfname</b>. Return 0 on success, * -1 on failure. */ static int rend_service_load_auth_keys(rend_service_t *s, const char *hfname) { int r = 0; char *cfname = NULL; char *client_keys_str = NULL; strmap_t *parsed_clients = strmap_new(); FILE *cfile, *hfile; open_file_t *open_cfile = NULL, *open_hfile = NULL; char desc_cook_out[3*REND_DESC_COOKIE_LEN_BASE64+1]; char service_id[16+1]; char buf[1500]; /* Load client keys and descriptor cookies, if available. */ cfname = rend_service_path(s, client_keys_fname); client_keys_str = read_file_to_str(cfname, RFTS_IGNORE_MISSING, NULL); if (client_keys_str) { if (rend_parse_client_keys(parsed_clients, client_keys_str) < 0) { log_warn(LD_CONFIG, "Previously stored client_keys file could not " "be parsed."); goto err; } else { log_info(LD_CONFIG, "Parsed %d previously stored client entries.", strmap_size(parsed_clients)); } } /* Prepare client_keys and hostname files. */ if (!(cfile = start_writing_to_stdio_file(cfname, OPEN_FLAGS_REPLACE | O_TEXT, 0600, &open_cfile))) { log_warn(LD_CONFIG, "Could not open client_keys file %s", escaped(cfname)); goto err; } if (!(hfile = start_writing_to_stdio_file(hfname, OPEN_FLAGS_REPLACE | O_TEXT, 0600, &open_hfile))) { log_warn(LD_CONFIG, "Could not open hostname file %s", escaped(hfname)); goto err; } /* Either use loaded keys for configured clients or generate new * ones if a client is new. */ SMARTLIST_FOREACH_BEGIN(s->clients, rend_authorized_client_t *, client) { rend_authorized_client_t *parsed = strmap_get(parsed_clients, client->client_name); int written; size_t len; /* Copy descriptor cookie from parsed entry or create new one. */ if (parsed) { memcpy(client->descriptor_cookie, parsed->descriptor_cookie, REND_DESC_COOKIE_LEN); } else { crypto_rand((char *) client->descriptor_cookie, REND_DESC_COOKIE_LEN); } /* For compatibility with older tor clients, this does not * truncate the padding characters, unlike rend_auth_encode_cookie. */ if (base64_encode(desc_cook_out, 3*REND_DESC_COOKIE_LEN_BASE64+1, (char *) client->descriptor_cookie, REND_DESC_COOKIE_LEN, 0) < 0) { log_warn(LD_BUG, "Could not base64-encode descriptor cookie."); goto err; } /* Copy client key from parsed entry or create new one if required. */ if (parsed && parsed->client_key) { client->client_key = crypto_pk_dup_key(parsed->client_key); } else if (s->auth_type == REND_STEALTH_AUTH) { /* Create private key for client. */ crypto_pk_t *prkey = NULL; if (!(prkey = crypto_pk_new())) { log_warn(LD_BUG,"Error constructing client key"); goto err; } if (crypto_pk_generate_key(prkey)) { log_warn(LD_BUG,"Error generating client key"); crypto_pk_free(prkey); goto err; } if (crypto_pk_check_key(prkey) <= 0) { log_warn(LD_BUG,"Generated client key seems invalid"); crypto_pk_free(prkey); goto err; } client->client_key = prkey; } /* Add entry to client_keys file. */ written = tor_snprintf(buf, sizeof(buf), "client-name %s\ndescriptor-cookie %s\n", client->client_name, desc_cook_out); if (written < 0) { log_warn(LD_BUG, "Could not write client entry."); goto err; } if (client->client_key) { char *client_key_out = NULL; if (crypto_pk_write_private_key_to_string(client->client_key, &client_key_out, &len) != 0) { log_warn(LD_BUG, "Internal error: " "crypto_pk_write_private_key_to_string() failed."); goto err; } if (rend_get_service_id(client->client_key, service_id)<0) { log_warn(LD_BUG, "Internal error: couldn't encode service ID."); /* * len is string length, not buffer length, but last byte is NUL * anyway. */ memwipe(client_key_out, 0, len); tor_free(client_key_out); goto err; } written = tor_snprintf(buf + written, sizeof(buf) - written, "client-key\n%s", client_key_out); memwipe(client_key_out, 0, len); tor_free(client_key_out); if (written < 0) { log_warn(LD_BUG, "Could not write client entry."); goto err; } } else { strlcpy(service_id, s->service_id, sizeof(service_id)); } if (fputs(buf, cfile) < 0) { log_warn(LD_FS, "Could not append client entry to file: %s", strerror(errno)); goto err; } /* Add line to hostname file. This is not the same encoding as in * client_keys. */ char *encoded_cookie = rend_auth_encode_cookie(client->descriptor_cookie, s->auth_type); if (!encoded_cookie) { log_warn(LD_BUG, "Could not base64-encode descriptor cookie."); goto err; } tor_snprintf(buf, sizeof(buf), "%s.onion %s # client: %s\n", service_id, encoded_cookie, client->client_name); memwipe(encoded_cookie, 0, strlen(encoded_cookie)); tor_free(encoded_cookie); if (fputs(buf, hfile)<0) { log_warn(LD_FS, "Could not append host entry to file: %s", strerror(errno)); goto err; } } SMARTLIST_FOREACH_END(client); finish_writing_to_file(open_cfile); finish_writing_to_file(open_hfile); goto done; err: r = -1; if (open_cfile) abort_writing_to_file(open_cfile); if (open_hfile) abort_writing_to_file(open_hfile); done: if (client_keys_str) { memwipe(client_keys_str, 0, strlen(client_keys_str)); tor_free(client_keys_str); } strmap_free(parsed_clients, rend_authorized_client_strmap_item_free); if (cfname) { memwipe(cfname, 0, strlen(cfname)); tor_free(cfname); } /* Clear stack buffers that held key-derived material. */ memwipe(buf, 0, sizeof(buf)); memwipe(desc_cook_out, 0, sizeof(desc_cook_out)); memwipe(service_id, 0, sizeof(service_id)); return r; } /** Return the service whose public key has a digest of <b>digest</b>, or * NULL if no such service exists. */ static rend_service_t * rend_service_get_by_pk_digest(const char* digest) { SMARTLIST_FOREACH(rend_service_list, rend_service_t*, s, if (tor_memeq(s->pk_digest,digest,DIGEST_LEN)) return s); return NULL; } /** Return the service whose service id is <b>id</b>, or NULL if no such * service exists. */ static struct rend_service_t * rend_service_get_by_service_id(const char *id) { tor_assert(strlen(id) == REND_SERVICE_ID_LEN_BASE32); SMARTLIST_FOREACH(rend_service_list, rend_service_t*, s, { if (tor_memeq(s->service_id, id, REND_SERVICE_ID_LEN_BASE32)) return s; }); return NULL; } /** Return 1 if any virtual port in <b>service</b> wants a circuit * to have good uptime. Else return 0. */ static int rend_service_requires_uptime(rend_service_t *service) { int i; rend_service_port_config_t *p; for (i=0; i < smartlist_len(service->ports); ++i) { p = smartlist_get(service->ports, i); if (smartlist_contains_int_as_string(get_options()->LongLivedPorts, p->virtual_port)) return 1; } return 0; } /** Check client authorization of a given <b>descriptor_cookie</b> of * length <b>cookie_len</b> for <b>service</b>. Return 1 for success * and 0 for failure. */ static int rend_check_authorization(rend_service_t *service, const char *descriptor_cookie, size_t cookie_len) { rend_authorized_client_t *auth_client = NULL; tor_assert(service); tor_assert(descriptor_cookie); if (!service->clients) { log_warn(LD_BUG, "Can't check authorization for a service that has no " "authorized clients configured."); return 0; } if (cookie_len != REND_DESC_COOKIE_LEN) { log_info(LD_REND, "Descriptor cookie is %lu bytes, but we expected " "%lu bytes. Dropping cell.", (unsigned long)cookie_len, (unsigned long)REND_DESC_COOKIE_LEN); return 0; } /* Look up client authorization by descriptor cookie. */ SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, client, { if (tor_memeq(client->descriptor_cookie, descriptor_cookie, REND_DESC_COOKIE_LEN)) { auth_client = client; break; } }); if (!auth_client) { char descriptor_cookie_base64[3*REND_DESC_COOKIE_LEN_BASE64]; base64_encode(descriptor_cookie_base64, sizeof(descriptor_cookie_base64), descriptor_cookie, REND_DESC_COOKIE_LEN, 0); log_info(LD_REND, "No authorization found for descriptor cookie '%s'! " "Dropping cell!", descriptor_cookie_base64); return 0; } /* Allow the request. */ log_info(LD_REND, "Client %s authorized for service %s.", auth_client->client_name, service->service_id); return 1; } /* Can this service make a direct connection to ei? * It must be a single onion service, and the firewall rules must allow ei. */ static int rend_service_use_direct_connection(const or_options_t* options, const extend_info_t* ei) { /* We'll connect directly all reachable addresses, whether preferred or not. * The prefer_ipv6 argument to fascist_firewall_allows_address_addr is * ignored, because pref_only is 0. */ return (rend_service_allow_non_anonymous_connection(options) && fascist_firewall_allows_address_addr(&ei->addr, ei->port, FIREWALL_OR_CONNECTION, 0, 0)); } /* Like rend_service_use_direct_connection, but to a node. */ static int rend_service_use_direct_connection_node(const or_options_t* options, const node_t* node) { /* We'll connect directly all reachable addresses, whether preferred or not. */ return (rend_service_allow_non_anonymous_connection(options) && fascist_firewall_allows_node(node, FIREWALL_OR_CONNECTION, 0)); } /****** * Handle cells ******/ /** Respond to an INTRODUCE2 cell by launching a circuit to the chosen * rendezvous point. */ int rend_service_receive_introduction(origin_circuit_t *circuit, const uint8_t *request, size_t request_len) { /* Global status stuff */ int status = 0, result; const or_options_t *options = get_options(); char *err_msg = NULL; int err_msg_severity = LOG_WARN; const char *stage_descr = NULL, *rend_pk_digest; int reason = END_CIRC_REASON_TORPROTOCOL; /* Service/circuit/key stuff we can learn before parsing */ char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; rend_service_t *service = NULL; rend_intro_point_t *intro_point = NULL; crypto_pk_t *intro_key = NULL; /* Parsed cell */ rend_intro_cell_t *parsed_req = NULL; /* Rendezvous point */ extend_info_t *rp = NULL; /* XXX not handled yet */ char buf[RELAY_PAYLOAD_SIZE]; char keys[DIGEST_LEN+CPATH_KEY_MATERIAL_LEN]; /* Holds KH, Df, Db, Kf, Kb */ int i; crypto_dh_t *dh = NULL; origin_circuit_t *launched = NULL; crypt_path_t *cpath = NULL; char hexcookie[9]; int circ_needs_uptime; time_t now = time(NULL); time_t elapsed; int replay; /* Do some initial validation and logging before we parse the cell */ if (circuit->base_.purpose != CIRCUIT_PURPOSE_S_INTRO) { log_warn(LD_PROTOCOL, "Got an INTRODUCE2 over a non-introduction circuit %u.", (unsigned) circuit->base_.n_circ_id); goto err; } assert_circ_anonymity_ok(circuit, options); tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only one supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); /* We'll use this in a bazillion log messages */ base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); /* look up service depending on circuit. */ service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_BUG, "Internal error: Got an INTRODUCE2 cell on an intro " "circ for an unrecognized service %s.", escaped(serviceid)); goto err; } intro_point = find_intro_point(circuit); if (intro_point == NULL) { intro_point = find_expiring_intro_point(service, circuit); if (intro_point == NULL) { log_warn(LD_BUG, "Internal error: Got an INTRODUCE2 cell on an " "intro circ (for service %s) with no corresponding " "rend_intro_point_t.", escaped(serviceid)); goto err; } } log_info(LD_REND, "Received INTRODUCE2 cell for service %s on circ %u.", escaped(serviceid), (unsigned)circuit->base_.n_circ_id); /* use intro key instead of service key. */ intro_key = circuit->intro_key; tor_free(err_msg); stage_descr = NULL; stage_descr = "early parsing"; /* Early parsing pass (get pk, ciphertext); type 2 is INTRODUCE2 */ parsed_req = rend_service_begin_parse_intro(request, request_len, 2, &err_msg); if (!parsed_req) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } /* make sure service replay caches are present */ if (!service->accepted_intro_dh_parts) { service->accepted_intro_dh_parts = replaycache_new(REND_REPLAY_TIME_INTERVAL, REND_REPLAY_TIME_INTERVAL); } if (!intro_point->accepted_intro_rsa_parts) { intro_point->accepted_intro_rsa_parts = replaycache_new(0, 0); } /* check for replay of PK-encrypted portion. */ replay = replaycache_add_test_and_elapsed( intro_point->accepted_intro_rsa_parts, parsed_req->ciphertext, parsed_req->ciphertext_len, &elapsed); if (replay) { log_warn(LD_REND, "Possible replay detected! We received an " "INTRODUCE2 cell with same PK-encrypted part %d " "seconds ago. Dropping cell.", (int)elapsed); goto err; } stage_descr = "decryption"; /* Now try to decrypt it */ result = rend_service_decrypt_intro(parsed_req, intro_key, &err_msg); if (result < 0) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } stage_descr = "late parsing"; /* Parse the plaintext */ result = rend_service_parse_intro_plaintext(parsed_req, &err_msg); if (result < 0) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } stage_descr = "late validation"; /* Validate the parsed plaintext parts */ result = rend_service_validate_intro_late(parsed_req, &err_msg); if (result < 0) { goto log_error; } else if (err_msg) { log_info(LD_REND, "%s on circ %u.", err_msg, (unsigned)circuit->base_.n_circ_id); tor_free(err_msg); } stage_descr = NULL; /* Increment INTRODUCE2 counter */ ++(intro_point->accepted_introduce2_count); /* Find the rendezvous point */ rp = find_rp_for_intro(parsed_req, &err_msg); if (!rp) { err_msg_severity = LOG_PROTOCOL_WARN; goto log_error; } /* Check if we'd refuse to talk to this router */ if (options->StrictNodes && routerset_contains_extendinfo(options->ExcludeNodes, rp)) { log_warn(LD_REND, "Client asked to rendezvous at a relay that we " "exclude, and StrictNodes is set. Refusing service."); reason = END_CIRC_REASON_INTERNAL; /* XXX might leak why we refused */ goto err; } base16_encode(hexcookie, 9, (const char *)(parsed_req->rc), 4); /* Check whether there is a past request with the same Diffie-Hellman, * part 1. */ replay = replaycache_add_test_and_elapsed( service->accepted_intro_dh_parts, parsed_req->dh, DH_KEY_LEN, &elapsed); if (replay) { /* A Tor client will send a new INTRODUCE1 cell with the same rend * cookie and DH public key as its previous one if its intro circ * times out while in state CIRCUIT_PURPOSE_C_INTRODUCE_ACK_WAIT . * If we received the first INTRODUCE1 cell (the intro-point relay * converts it into an INTRODUCE2 cell), we are already trying to * connect to that rend point (and may have already succeeded); * drop this cell. */ log_info(LD_REND, "We received an " "INTRODUCE2 cell with same first part of " "Diffie-Hellman handshake %d seconds ago. Dropping " "cell.", (int) elapsed); goto err; } /* If the service performs client authorization, check included auth data. */ if (service->clients) { if (parsed_req->version == 3 && parsed_req->u.v3.auth_len > 0) { if (rend_check_authorization(service, (const char*)parsed_req->u.v3.auth_data, parsed_req->u.v3.auth_len)) { log_info(LD_REND, "Authorization data in INTRODUCE2 cell are valid."); } else { log_info(LD_REND, "The authorization data that are contained in " "the INTRODUCE2 cell are invalid. Dropping cell."); reason = END_CIRC_REASON_CONNECTFAILED; goto err; } } else { log_info(LD_REND, "INTRODUCE2 cell does not contain authentication " "data, but we require client authorization. Dropping cell."); reason = END_CIRC_REASON_CONNECTFAILED; goto err; } } /* Try DH handshake... */ dh = crypto_dh_new(DH_TYPE_REND); if (!dh || crypto_dh_generate_public(dh)<0) { log_warn(LD_BUG,"Internal error: couldn't build DH state " "or generate public key."); reason = END_CIRC_REASON_INTERNAL; goto err; } if (crypto_dh_compute_secret(LOG_PROTOCOL_WARN, dh, (char *)(parsed_req->dh), DH_KEY_LEN, keys, DIGEST_LEN+CPATH_KEY_MATERIAL_LEN)<0) { log_warn(LD_BUG, "Internal error: couldn't complete DH handshake"); reason = END_CIRC_REASON_INTERNAL; goto err; } circ_needs_uptime = rend_service_requires_uptime(service); /* help predict this next time */ rep_hist_note_used_internal(now, circ_needs_uptime, 1); /* Launch a circuit to the client's chosen rendezvous point. */ for (i=0;i<MAX_REND_FAILURES;i++) { int flags = CIRCLAUNCH_NEED_CAPACITY | CIRCLAUNCH_IS_INTERNAL; if (circ_needs_uptime) flags |= CIRCLAUNCH_NEED_UPTIME; /* A Single Onion Service only uses a direct connection if its * firewall rules permit direct connections to the address. */ if (rend_service_use_direct_connection(options, rp)) { flags = flags | CIRCLAUNCH_ONEHOP_TUNNEL; } launched = circuit_launch_by_extend_info( CIRCUIT_PURPOSE_S_CONNECT_REND, rp, flags); if (launched) break; } if (!launched) { /* give up */ log_warn(LD_REND, "Giving up launching first hop of circuit to rendezvous " "point %s for service %s.", safe_str_client(extend_info_describe(rp)), serviceid); reason = END_CIRC_REASON_CONNECTFAILED; goto err; } log_info(LD_REND, "Accepted intro; launching circuit to %s " "(cookie %s) for service %s.", safe_str_client(extend_info_describe(rp)), hexcookie, serviceid); tor_assert(launched->build_state); /* Fill in the circuit's state. */ launched->rend_data = rend_data_service_create(service->service_id, rend_pk_digest, parsed_req->rc, service->auth_type); launched->build_state->service_pending_final_cpath_ref = tor_malloc_zero(sizeof(crypt_path_reference_t)); launched->build_state->service_pending_final_cpath_ref->refcount = 1; launched->build_state->service_pending_final_cpath_ref->cpath = cpath = tor_malloc_zero(sizeof(crypt_path_t)); cpath->magic = CRYPT_PATH_MAGIC; launched->build_state->expiry_time = now + MAX_REND_TIMEOUT; cpath->rend_dh_handshake_state = dh; dh = NULL; if (circuit_init_cpath_crypto(cpath,keys+DIGEST_LEN,1)<0) goto err; memcpy(cpath->rend_circ_nonce, keys, DIGEST_LEN); goto done; log_error: if (!err_msg) { if (stage_descr) { tor_asprintf(&err_msg, "unknown %s error for INTRODUCE2", stage_descr); } else { err_msg = tor_strdup("unknown error for INTRODUCE2"); } } log_fn(err_msg_severity, LD_REND, "%s on circ %u", err_msg, (unsigned)circuit->base_.n_circ_id); err: status = -1; if (dh) crypto_dh_free(dh); if (launched) { circuit_mark_for_close(TO_CIRCUIT(launched), reason); } tor_free(err_msg); done: memwipe(keys, 0, sizeof(keys)); memwipe(buf, 0, sizeof(buf)); memwipe(serviceid, 0, sizeof(serviceid)); memwipe(hexcookie, 0, sizeof(hexcookie)); /* Free the parsed cell */ rend_service_free_intro(parsed_req); /* Free rp */ extend_info_free(rp); return status; } /** Given a parsed and decrypted INTRODUCE2, find the rendezvous point or * return NULL and an error string if we can't. Return a newly allocated * extend_info_t* for the rendezvous point. */ static extend_info_t * find_rp_for_intro(const rend_intro_cell_t *intro, char **err_msg_out) { extend_info_t *rp = NULL; char *err_msg = NULL; const char *rp_nickname = NULL; const node_t *node = NULL; if (!intro) { if (err_msg_out) err_msg = tor_strdup("Bad parameters to find_rp_for_intro()"); goto err; } if (intro->version == 0 || intro->version == 1) { rp_nickname = (const char *)(intro->u.v0_v1.rp); node = node_get_by_nickname(rp_nickname, 0); if (!node) { if (err_msg_out) { tor_asprintf(&err_msg, "Couldn't find router %s named in INTRODUCE2 cell", escaped_safe_str_client(rp_nickname)); } goto err; } /* Are we in single onion mode? */ const int allow_direct = rend_service_allow_non_anonymous_connection( get_options()); rp = extend_info_from_node(node, allow_direct); if (!rp) { if (err_msg_out) { tor_asprintf(&err_msg, "Couldn't build extend_info_t for router %s named " "in INTRODUCE2 cell", escaped_safe_str_client(rp_nickname)); } goto err; } } else if (intro->version == 2) { rp = extend_info_dup(intro->u.v2.extend_info); } else if (intro->version == 3) { rp = extend_info_dup(intro->u.v3.extend_info); } else { if (err_msg_out) { tor_asprintf(&err_msg, "Unknown version %d in INTRODUCE2 cell", (int)(intro->version)); } goto err; } /* rp is always set here: extend_info_dup guarantees a non-NULL result, and * the other cases goto err. */ tor_assert(rp); /* Make sure the RP we are being asked to connect to is _not_ a private * address unless it's allowed. Let's avoid to build a circuit to our * second middle node and fail right after when extending to the RP. */ if (!extend_info_addr_is_allowed(&rp->addr)) { if (err_msg_out) { tor_asprintf(&err_msg, "Relay IP in INTRODUCE2 cell is private address."); } extend_info_free(rp); rp = NULL; goto err; } goto done; err: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); done: return rp; } /** Free a parsed INTRODUCE1 or INTRODUCE2 cell that was allocated by * rend_service_parse_intro(). */ void rend_service_free_intro(rend_intro_cell_t *request) { if (!request) { return; } /* Free ciphertext */ tor_free(request->ciphertext); request->ciphertext_len = 0; /* Have plaintext? */ if (request->plaintext) { /* Zero it out just to be safe */ memwipe(request->plaintext, 0, request->plaintext_len); tor_free(request->plaintext); request->plaintext_len = 0; } /* Have parsed plaintext? */ if (request->parsed) { switch (request->version) { case 0: case 1: /* * Nothing more to do; these formats have no further pointers * in them. */ break; case 2: extend_info_free(request->u.v2.extend_info); request->u.v2.extend_info = NULL; break; case 3: if (request->u.v3.auth_data) { memwipe(request->u.v3.auth_data, 0, request->u.v3.auth_len); tor_free(request->u.v3.auth_data); } extend_info_free(request->u.v3.extend_info); request->u.v3.extend_info = NULL; break; default: log_info(LD_BUG, "rend_service_free_intro() saw unknown protocol " "version %d.", request->version); } } /* Zero it out to make sure sensitive stuff doesn't hang around in memory */ memwipe(request, 0, sizeof(*request)); tor_free(request); } /** Parse an INTRODUCE1 or INTRODUCE2 cell into a newly allocated * rend_intro_cell_t structure. Free it with rend_service_free_intro() * when finished. The type parameter should be 1 or 2 to indicate whether * this is INTRODUCE1 or INTRODUCE2. This parses only the non-encrypted * parts; after this, call rend_service_decrypt_intro() with a key, then * rend_service_parse_intro_plaintext() to finish parsing. The optional * err_msg_out parameter is set to a string suitable for log output * if parsing fails. This function does some validation, but only * that which depends solely on the contents of the cell and the * key; it can be unit-tested. Further validation is done in * rend_service_validate_intro(). */ rend_intro_cell_t * rend_service_begin_parse_intro(const uint8_t *request, size_t request_len, uint8_t type, char **err_msg_out) { rend_intro_cell_t *rv = NULL; char *err_msg = NULL; if (!request || request_len <= 0) goto err; if (!(type == 1 || type == 2)) goto err; /* First, check that the cell is long enough to be a sensible INTRODUCE */ /* min key length plus digest length plus nickname length */ if (request_len < (DIGEST_LEN + REND_COOKIE_LEN + (MAX_NICKNAME_LEN + 1) + DH_KEY_LEN + 42)) { if (err_msg_out) { tor_asprintf(&err_msg, "got a truncated INTRODUCE%d cell", (int)type); } goto err; } /* Allocate a new parsed cell structure */ rv = tor_malloc_zero(sizeof(*rv)); /* Set the type */ rv->type = type; /* Copy in the ID */ memcpy(rv->pk, request, DIGEST_LEN); /* Copy in the ciphertext */ rv->ciphertext = tor_malloc(request_len - DIGEST_LEN); memcpy(rv->ciphertext, request + DIGEST_LEN, request_len - DIGEST_LEN); rv->ciphertext_len = request_len - DIGEST_LEN; goto done; err: rend_service_free_intro(rv); rv = NULL; if (err_msg_out && !err_msg) { tor_asprintf(&err_msg, "unknown INTRODUCE%d error", (int)type); } done: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); return rv; } /** Parse the version-specific parts of a v0 or v1 INTRODUCE1 or INTRODUCE2 * cell */ static ssize_t rend_service_parse_intro_for_v0_or_v1( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out) { const char *rp_nickname, *endptr; size_t nickname_field_len, ver_specific_len; if (intro->version == 1) { ver_specific_len = MAX_HEX_NICKNAME_LEN + 2; rp_nickname = ((const char *)buf) + 1; nickname_field_len = MAX_HEX_NICKNAME_LEN + 1; } else if (intro->version == 0) { ver_specific_len = MAX_NICKNAME_LEN + 1; rp_nickname = (const char *)buf; nickname_field_len = MAX_NICKNAME_LEN + 1; } else { if (err_msg_out) tor_asprintf(err_msg_out, "rend_service_parse_intro_for_v0_or_v1() called with " "bad version %d on INTRODUCE%d cell (this is a bug)", intro->version, (int)(intro->type)); goto err; } if (plaintext_len < ver_specific_len) { if (err_msg_out) tor_asprintf(err_msg_out, "short plaintext of encrypted part in v1 INTRODUCE%d " "cell (%lu bytes, needed %lu)", (int)(intro->type), (unsigned long)plaintext_len, (unsigned long)ver_specific_len); goto err; } endptr = memchr(rp_nickname, 0, nickname_field_len); if (!endptr || endptr == rp_nickname) { if (err_msg_out) { tor_asprintf(err_msg_out, "couldn't find a nul-padded nickname in " "INTRODUCE%d cell", (int)(intro->type)); } goto err; } if ((intro->version == 0 && !is_legal_nickname(rp_nickname)) || (intro->version == 1 && !is_legal_nickname_or_hexdigest(rp_nickname))) { if (err_msg_out) { tor_asprintf(err_msg_out, "bad nickname in INTRODUCE%d cell", (int)(intro->type)); } goto err; } memcpy(intro->u.v0_v1.rp, rp_nickname, endptr - rp_nickname + 1); return ver_specific_len; err: return -1; } /** Parse the version-specific parts of a v2 INTRODUCE1 or INTRODUCE2 cell */ static ssize_t rend_service_parse_intro_for_v2( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out) { unsigned int klen; extend_info_t *extend_info = NULL; ssize_t ver_specific_len; /* * We accept version 3 too so that the v3 parser can call this with * an adjusted buffer for the latter part of a v3 cell, which is * identical to a v2 cell. */ if (!(intro->version == 2 || intro->version == 3)) { if (err_msg_out) tor_asprintf(err_msg_out, "rend_service_parse_intro_for_v2() called with " "bad version %d on INTRODUCE%d cell (this is a bug)", intro->version, (int)(intro->type)); goto err; } /* 7 == version, IP and port, DIGEST_LEN == id, 2 == key length */ if (plaintext_len < 7 + DIGEST_LEN + 2) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } extend_info = tor_malloc_zero(sizeof(extend_info_t)); tor_addr_from_ipv4n(&extend_info->addr, get_uint32(buf + 1)); extend_info->port = ntohs(get_uint16(buf + 5)); memcpy(extend_info->identity_digest, buf + 7, DIGEST_LEN); extend_info->nickname[0] = '$'; base16_encode(extend_info->nickname + 1, sizeof(extend_info->nickname) - 1, extend_info->identity_digest, DIGEST_LEN); klen = ntohs(get_uint16(buf + 7 + DIGEST_LEN)); /* 7 == version, IP and port, DIGEST_LEN == id, 2 == key length */ if (plaintext_len < 7 + DIGEST_LEN + 2 + klen) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } extend_info->onion_key = crypto_pk_asn1_decode((const char *)(buf + 7 + DIGEST_LEN + 2), klen); if (!extend_info->onion_key) { if (err_msg_out) { tor_asprintf(err_msg_out, "error decoding onion key in version %d " "INTRODUCE%d cell", intro->version, (intro->type)); } goto err; } if (128 != crypto_pk_keysize(extend_info->onion_key)) { if (err_msg_out) { tor_asprintf(err_msg_out, "invalid onion key size in version %d INTRODUCE%d cell", intro->version, (intro->type)); } goto err; } ver_specific_len = 7+DIGEST_LEN+2+klen; if (intro->version == 2) intro->u.v2.extend_info = extend_info; else intro->u.v3.extend_info = extend_info; return ver_specific_len; err: extend_info_free(extend_info); return -1; } /** Parse the version-specific parts of a v3 INTRODUCE1 or INTRODUCE2 cell */ static ssize_t rend_service_parse_intro_for_v3( rend_intro_cell_t *intro, const uint8_t *buf, size_t plaintext_len, char **err_msg_out) { ssize_t adjust, v2_ver_specific_len, ts_offset; /* This should only be called on v3 cells */ if (intro->version != 3) { if (err_msg_out) tor_asprintf(err_msg_out, "rend_service_parse_intro_for_v3() called with " "bad version %d on INTRODUCE%d cell (this is a bug)", intro->version, (int)(intro->type)); goto err; } /* * Check that we have at least enough to get auth_len: * * 1 octet for version, 1 for auth_type, 2 for auth_len */ if (plaintext_len < 4) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } /* * The rend_client_send_introduction() function over in rendclient.c is * broken (i.e., fails to match the spec) in such a way that we can't * change it without breaking the protocol. Specifically, it doesn't * emit auth_len when auth-type is REND_NO_AUTH, so everything is off * by two bytes after that. Calculate ts_offset and do everything from * the timestamp on relative to that to handle this dain bramage. */ intro->u.v3.auth_type = buf[1]; if (intro->u.v3.auth_type != REND_NO_AUTH) { intro->u.v3.auth_len = ntohs(get_uint16(buf + 2)); ts_offset = 4 + intro->u.v3.auth_len; } else { intro->u.v3.auth_len = 0; ts_offset = 2; } /* Check that auth len makes sense for this auth type */ if (intro->u.v3.auth_type == REND_BASIC_AUTH || intro->u.v3.auth_type == REND_STEALTH_AUTH) { if (intro->u.v3.auth_len != REND_DESC_COOKIE_LEN) { if (err_msg_out) { tor_asprintf(err_msg_out, "wrong auth data size %d for INTRODUCE%d cell, " "should be %d", (int)(intro->u.v3.auth_len), (int)(intro->type), REND_DESC_COOKIE_LEN); } goto err; } } /* Check that we actually have everything up through the timestamp */ if (plaintext_len < (size_t)(ts_offset)+4) { if (err_msg_out) { tor_asprintf(err_msg_out, "truncated plaintext of encrypted parted of " "version %d INTRODUCE%d cell", intro->version, (int)(intro->type)); } goto err; } if (intro->u.v3.auth_type != REND_NO_AUTH && intro->u.v3.auth_len > 0) { /* Okay, we can go ahead and copy auth_data */ intro->u.v3.auth_data = tor_malloc(intro->u.v3.auth_len); /* * We know we had an auth_len field in this case, so 4 is * always right. */ memcpy(intro->u.v3.auth_data, buf + 4, intro->u.v3.auth_len); } /* * From here on, the format is as in v2, so we call the v2 parser with * adjusted buffer and length. We are 4 + ts_offset octets in, but the * v2 parser expects to skip over a version byte at the start, so we * adjust by 3 + ts_offset. */ adjust = 3 + ts_offset; v2_ver_specific_len = rend_service_parse_intro_for_v2(intro, buf + adjust, plaintext_len - adjust, err_msg_out); /* Success in v2 parser */ if (v2_ver_specific_len >= 0) return v2_ver_specific_len + adjust; /* Failure in v2 parser; it will have provided an err_msg */ else return v2_ver_specific_len; err: return -1; } /** Table of parser functions for version-specific parts of an INTRODUCE2 * cell. */ static ssize_t (*intro_version_handlers[])( rend_intro_cell_t *, const uint8_t *, size_t, char **) = { rend_service_parse_intro_for_v0_or_v1, rend_service_parse_intro_for_v0_or_v1, rend_service_parse_intro_for_v2, rend_service_parse_intro_for_v3 }; /** Decrypt the encrypted part of an INTRODUCE1 or INTRODUCE2 cell, * return 0 if successful, or < 0 and write an error message to * *err_msg_out if provided. */ int rend_service_decrypt_intro( rend_intro_cell_t *intro, crypto_pk_t *key, char **err_msg_out) { char *err_msg = NULL; uint8_t key_digest[DIGEST_LEN]; char service_id[REND_SERVICE_ID_LEN_BASE32+1]; ssize_t key_len; uint8_t buf[RELAY_PAYLOAD_SIZE]; int result, status = -1; if (!intro || !key) { if (err_msg_out) { err_msg = tor_strdup("rend_service_decrypt_intro() called with bad " "parameters"); } status = -2; goto err; } /* Make sure we have ciphertext */ if (!(intro->ciphertext) || intro->ciphertext_len <= 0) { if (err_msg_out) { tor_asprintf(&err_msg, "rend_intro_cell_t was missing ciphertext for " "INTRODUCE%d cell", (int)(intro->type)); } status = -3; goto err; } /* Check that this cell actually matches this service key */ /* first DIGEST_LEN bytes of request is intro or service pk digest */ crypto_pk_get_digest(key, (char *)key_digest); if (tor_memneq(key_digest, intro->pk, DIGEST_LEN)) { if (err_msg_out) { base32_encode(service_id, REND_SERVICE_ID_LEN_BASE32 + 1, (char*)(intro->pk), REND_SERVICE_ID_LEN); tor_asprintf(&err_msg, "got an INTRODUCE%d cell for the wrong service (%s)", (int)(intro->type), escaped(service_id)); } status = -4; goto err; } /* Make sure the encrypted part is long enough to decrypt */ key_len = crypto_pk_keysize(key); if (intro->ciphertext_len < key_len) { if (err_msg_out) { tor_asprintf(&err_msg, "got an INTRODUCE%d cell with a truncated PK-encrypted " "part", (int)(intro->type)); } status = -5; goto err; } /* Decrypt the encrypted part */ note_crypto_pk_op(REND_SERVER); result = crypto_pk_private_hybrid_decrypt( key, (char *)buf, sizeof(buf), (const char *)(intro->ciphertext), intro->ciphertext_len, PK_PKCS1_OAEP_PADDING, 1); if (result < 0) { if (err_msg_out) { tor_asprintf(&err_msg, "couldn't decrypt INTRODUCE%d cell", (int)(intro->type)); } status = -6; goto err; } intro->plaintext_len = result; intro->plaintext = tor_malloc(intro->plaintext_len); memcpy(intro->plaintext, buf, intro->plaintext_len); status = 0; goto done; err: if (err_msg_out && !err_msg) { tor_asprintf(&err_msg, "unknown INTRODUCE%d error decrypting encrypted part", intro ? (int)(intro->type) : -1); } done: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); /* clean up potentially sensitive material */ memwipe(buf, 0, sizeof(buf)); memwipe(key_digest, 0, sizeof(key_digest)); memwipe(service_id, 0, sizeof(service_id)); return status; } /** Parse the plaintext of the encrypted part of an INTRODUCE1 or * INTRODUCE2 cell, return 0 if successful, or < 0 and write an error * message to *err_msg_out if provided. */ int rend_service_parse_intro_plaintext( rend_intro_cell_t *intro, char **err_msg_out) { char *err_msg = NULL; ssize_t ver_specific_len, ver_invariant_len; uint8_t version; int status = -1; if (!intro) { if (err_msg_out) { err_msg = tor_strdup("rend_service_parse_intro_plaintext() called with NULL " "rend_intro_cell_t"); } status = -2; goto err; } /* Check that we have plaintext */ if (!(intro->plaintext) || intro->plaintext_len <= 0) { if (err_msg_out) { err_msg = tor_strdup("rend_intro_cell_t was missing plaintext"); } status = -3; goto err; } /* In all formats except v0, the first byte is a version number */ version = intro->plaintext[0]; /* v0 has no version byte (stupid...), so handle it as a fallback */ if (version > 3) version = 0; /* Copy the version into the parsed cell structure */ intro->version = version; /* Call the version-specific parser from the table */ ver_specific_len = intro_version_handlers[version](intro, intro->plaintext, intro->plaintext_len, &err_msg); if (ver_specific_len < 0) { status = -4; goto err; } /** The rendezvous cookie and Diffie-Hellman stuff are version-invariant * and at the end of the plaintext of the encrypted part of the cell. */ ver_invariant_len = intro->plaintext_len - ver_specific_len; if (ver_invariant_len < REND_COOKIE_LEN + DH_KEY_LEN) { tor_asprintf(&err_msg, "decrypted plaintext of INTRODUCE%d cell was truncated (%ld bytes)", (int)(intro->type), (long)(intro->plaintext_len)); status = -5; goto err; } else if (ver_invariant_len > REND_COOKIE_LEN + DH_KEY_LEN) { tor_asprintf(&err_msg, "decrypted plaintext of INTRODUCE%d cell was too long (%ld bytes)", (int)(intro->type), (long)(intro->plaintext_len)); status = -6; goto err; } else { memcpy(intro->rc, intro->plaintext + ver_specific_len, REND_COOKIE_LEN); memcpy(intro->dh, intro->plaintext + ver_specific_len + REND_COOKIE_LEN, DH_KEY_LEN); } /* Flag it as being fully parsed */ intro->parsed = 1; status = 0; goto done; err: if (err_msg_out && !err_msg) { tor_asprintf(&err_msg, "unknown INTRODUCE%d error parsing encrypted part", intro ? (int)(intro->type) : -1); } done: if (err_msg_out) *err_msg_out = err_msg; else tor_free(err_msg); return status; } /** Do validity checks on a parsed intro cell after decryption; some of * these are not done in rend_service_parse_intro_plaintext() itself because * they depend on a lot of other state and would make it hard to unit test. * Returns >= 0 if successful or < 0 if the intro cell is invalid, and * optionally writes out an error message for logging. If an err_msg * pointer is provided, it is the caller's responsibility to free any * provided message. */ int rend_service_validate_intro_late(const rend_intro_cell_t *intro, char **err_msg_out) { int status = 0; if (!intro) { if (err_msg_out) *err_msg_out = tor_strdup("NULL intro cell passed to " "rend_service_validate_intro_late()"); status = -1; goto err; } if (intro->version == 3 && intro->parsed) { if (!(intro->u.v3.auth_type == REND_NO_AUTH || intro->u.v3.auth_type == REND_BASIC_AUTH || intro->u.v3.auth_type == REND_STEALTH_AUTH)) { /* This is an informative message, not an error, as in the old code */ if (err_msg_out) tor_asprintf(err_msg_out, "unknown authorization type %d", intro->u.v3.auth_type); } } err: return status; } /** Called when we fail building a rendezvous circuit at some point other * than the last hop: launches a new circuit to the same rendezvous point. */ void rend_service_relaunch_rendezvous(origin_circuit_t *oldcirc) { origin_circuit_t *newcirc; cpath_build_state_t *newstate, *oldstate; tor_assert(oldcirc->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND); /* Don't relaunch the same rend circ twice. */ if (oldcirc->hs_service_side_rend_circ_has_been_relaunched) { log_info(LD_REND, "Rendezvous circuit to %s has already been relaunched; " "not relaunching it again.", oldcirc->build_state ? safe_str(extend_info_describe(oldcirc->build_state->chosen_exit)) : "*unknown*"); return; } oldcirc->hs_service_side_rend_circ_has_been_relaunched = 1; if (!oldcirc->build_state || oldcirc->build_state->failure_count > MAX_REND_FAILURES || oldcirc->build_state->expiry_time < time(NULL)) { log_info(LD_REND, "Attempt to build circuit to %s for rendezvous has failed " "too many times or expired; giving up.", oldcirc->build_state ? safe_str(extend_info_describe(oldcirc->build_state->chosen_exit)) : "*unknown*"); return; } oldstate = oldcirc->build_state; tor_assert(oldstate); if (oldstate->service_pending_final_cpath_ref == NULL) { log_info(LD_REND,"Skipping relaunch of circ that failed on its first hop. " "Initiator will retry."); return; } log_info(LD_REND,"Reattempting rendezvous circuit to '%s'", safe_str(extend_info_describe(oldstate->chosen_exit))); /* You'd think Single Onion Services would want to retry the rendezvous * using a direct connection. But if it's blocked by a firewall, or the * service is IPv6-only, or the rend point avoiding becoming a one-hop * proxy, we need a 3-hop connection. */ newcirc = circuit_launch_by_extend_info(CIRCUIT_PURPOSE_S_CONNECT_REND, oldstate->chosen_exit, CIRCLAUNCH_NEED_CAPACITY|CIRCLAUNCH_IS_INTERNAL); if (!newcirc) { log_warn(LD_REND,"Couldn't relaunch rendezvous circuit to '%s'.", safe_str(extend_info_describe(oldstate->chosen_exit))); return; } newstate = newcirc->build_state; tor_assert(newstate); newstate->failure_count = oldstate->failure_count+1; newstate->expiry_time = oldstate->expiry_time; newstate->service_pending_final_cpath_ref = oldstate->service_pending_final_cpath_ref; ++(newstate->service_pending_final_cpath_ref->refcount); newcirc->rend_data = rend_data_dup(oldcirc->rend_data); } /** Launch a circuit to serve as an introduction point for the service * <b>service</b> at the introduction point <b>nickname</b> */ static int rend_service_launch_establish_intro(rend_service_t *service, rend_intro_point_t *intro) { origin_circuit_t *launched; int flags = CIRCLAUNCH_NEED_UPTIME|CIRCLAUNCH_IS_INTERNAL; const or_options_t *options = get_options(); extend_info_t *launch_ei = intro->extend_info; extend_info_t *direct_ei = NULL; /* Are we in single onion mode? */ if (rend_service_allow_non_anonymous_connection(options)) { /* Do we have a descriptor for the node? * We've either just chosen it from the consensus, or we've just reviewed * our intro points to see which ones are still valid, and deleted the ones * that aren't in the consensus any more. */ const node_t *node = node_get_by_id(launch_ei->identity_digest); if (BUG(!node)) { /* The service has kept an intro point after it went missing from the * consensus. If we did anything else here, it would be a consensus * distinguisher. Which are less of an issue for single onion services, * but still a bug. */ return -1; } /* Can we connect to the node directly? If so, replace launch_ei * (a multi-hop extend_info) with one suitable for direct connection. */ if (rend_service_use_direct_connection_node(options, node)) { direct_ei = extend_info_from_node(node, 1); if (BUG(!direct_ei)) { /* rend_service_use_direct_connection_node and extend_info_from_node * disagree about which addresses on this node are permitted. This * should never happen. Avoiding the connection is a safe response. */ return -1; } flags = flags | CIRCLAUNCH_ONEHOP_TUNNEL; launch_ei = direct_ei; } } /* launch_ei is either intro->extend_info, or has been replaced with a valid * extend_info for single onion service direct connection. */ tor_assert(launch_ei); /* We must have the same intro when making a direct connection. */ tor_assert(tor_memeq(intro->extend_info->identity_digest, launch_ei->identity_digest, DIGEST_LEN)); log_info(LD_REND, "Launching circuit to introduction point %s%s%s for service %s", safe_str_client(extend_info_describe(intro->extend_info)), direct_ei ? " via direct address " : "", direct_ei ? safe_str_client(extend_info_describe(direct_ei)) : "", service->service_id); rep_hist_note_used_internal(time(NULL), 1, 0); ++service->n_intro_circuits_launched; launched = circuit_launch_by_extend_info(CIRCUIT_PURPOSE_S_ESTABLISH_INTRO, launch_ei, flags); if (!launched) { log_info(LD_REND, "Can't launch circuit to establish introduction at %s%s%s.", safe_str_client(extend_info_describe(intro->extend_info)), direct_ei ? " via direct address " : "", direct_ei ? safe_str_client(extend_info_describe(direct_ei)) : "" ); extend_info_free(direct_ei); return -1; } /* We must have the same exit node even if cannibalized or direct connection. */ tor_assert(tor_memeq(intro->extend_info->identity_digest, launched->build_state->chosen_exit->identity_digest, DIGEST_LEN)); launched->rend_data = rend_data_service_create(service->service_id, service->pk_digest, NULL, service->auth_type); launched->intro_key = crypto_pk_dup_key(intro->intro_key); if (launched->base_.state == CIRCUIT_STATE_OPEN) rend_service_intro_has_opened(launched); extend_info_free(direct_ei); return 0; } /** Return the number of introduction points that are established for the * given service. */ static unsigned int count_established_intro_points(const rend_service_t *service) { unsigned int num = 0; SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro, num += intro->circuit_established ); return num; } /** Return the number of introduction points that are or are being * established for the given service. This function iterates over all * circuit and count those that are linked to the service and are waiting * for the intro point to respond. */ static unsigned int count_intro_point_circuits(const rend_service_t *service) { unsigned int num_ipos = 0; SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, circ) { if (!circ->marked_for_close && circ->state == CIRCUIT_STATE_OPEN && (circ->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || circ->purpose == CIRCUIT_PURPOSE_S_INTRO)) { origin_circuit_t *oc = TO_ORIGIN_CIRCUIT(circ); if (oc->rend_data && rend_circuit_pk_digest_eq(oc, (uint8_t *) service->pk_digest)) { num_ipos++; } } } SMARTLIST_FOREACH_END(circ); return num_ipos; } /* Given a buffer of at least RELAY_PAYLOAD_SIZE bytes in <b>cell_body_out</b>, write the body of a legacy ESTABLISH_INTRO cell in it. Use <b>intro_key</b> as the intro point auth key, and <b>rend_circ_nonce</b> as the circuit crypto material. On success, fill <b>cell_body_out</b> and return the number of bytes written. On fail, return -1. */ STATIC ssize_t encode_establish_intro_cell_legacy(char *cell_body_out, size_t cell_body_out_len, crypto_pk_t *intro_key, char *rend_circ_nonce) { int retval = -1; int r; int len = 0; char auth[DIGEST_LEN + 9]; tor_assert(intro_key); tor_assert(rend_circ_nonce); /* Build the payload for a RELAY_ESTABLISH_INTRO cell. */ r = crypto_pk_asn1_encode(intro_key, cell_body_out+2, RELAY_PAYLOAD_SIZE-2); if (r < 0) { log_warn(LD_BUG, "Internal error; failed to establish intro point."); goto err; } len = r; set_uint16(cell_body_out, htons((uint16_t)len)); len += 2; memcpy(auth, rend_circ_nonce, DIGEST_LEN); memcpy(auth+DIGEST_LEN, "INTRODUCE", 9); if (crypto_digest(cell_body_out+len, auth, DIGEST_LEN+9)) goto err; len += 20; note_crypto_pk_op(REND_SERVER); r = crypto_pk_private_sign_digest(intro_key, cell_body_out+len, cell_body_out_len - len, cell_body_out, len); if (r<0) { log_warn(LD_BUG, "Internal error: couldn't sign introduction request."); goto err; } len += r; retval = len; err: memwipe(auth, 0, sizeof(auth)); return retval; } /** Called when we're done building a circuit to an introduction point: * sends a RELAY_ESTABLISH_INTRO cell. */ void rend_service_intro_has_opened(origin_circuit_t *circuit) { rend_service_t *service; char buf[RELAY_PAYLOAD_SIZE]; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; unsigned int expiring_nodes_len, num_ip_circuits, valid_ip_circuits = 0; int reason = END_CIRC_REASON_TORPROTOCOL; const char *rend_pk_digest; tor_assert(circuit->base_.purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO); assert_circ_anonymity_ok(circuit, get_options()); tor_assert(circuit->cpath); tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only on supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_REND, "Unrecognized service ID %s on introduction circuit %u.", safe_str_client(serviceid), (unsigned)circuit->base_.n_circ_id); reason = END_CIRC_REASON_NOSUCHSERVICE; goto err; } /* Take the current amount of expiring nodes and the current amount of IP * circuits and compute how many valid IP circuits we have. */ expiring_nodes_len = (unsigned int) smartlist_len(service->expiring_nodes); num_ip_circuits = count_intro_point_circuits(service); /* Let's avoid an underflow. The valid_ip_circuits is initialized to 0 in * case this condition turns out false because it means that all circuits * are expiring so we need to keep this circuit. */ if (num_ip_circuits > expiring_nodes_len) { valid_ip_circuits = num_ip_circuits - expiring_nodes_len; } /* If we already have enough introduction circuits for this service, * redefine this one as a general circuit or close it, depending. * Substract the amount of expiring nodes here because the circuits are * still opened. */ if (valid_ip_circuits > service->n_intro_points_wanted) { const or_options_t *options = get_options(); /* Remove the intro point associated with this circuit, it's being * repurposed or closed thus cleanup memory. */ rend_intro_point_t *intro = find_intro_point(circuit); if (intro != NULL) { smartlist_remove(service->intro_nodes, intro); rend_intro_point_free(intro); } if (options->ExcludeNodes) { /* XXXX in some future version, we can test whether the transition is allowed or not given the actual nodes in the circuit. But for now, this case, we might as well close the thing. */ log_info(LD_CIRC|LD_REND, "We have just finished an introduction " "circuit, but we already have enough. Closing it."); reason = END_CIRC_REASON_NONE; goto err; } else { tor_assert(circuit->build_state->is_internal); log_info(LD_CIRC|LD_REND, "We have just finished an introduction " "circuit, but we already have enough. Redefining purpose to " "general; leaving as internal."); circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_C_GENERAL); { rend_data_free(circuit->rend_data); circuit->rend_data = NULL; } { crypto_pk_t *intro_key = circuit->intro_key; circuit->intro_key = NULL; crypto_pk_free(intro_key); } circuit_has_opened(circuit); goto done; } } log_info(LD_REND, "Established circuit %u as introduction point for service %s", (unsigned)circuit->base_.n_circ_id, serviceid); circuit_log_path(LOG_INFO, LD_REND, circuit); /* Send the ESTABLISH_INTRO cell */ { ssize_t len; len = encode_establish_intro_cell_legacy(buf, sizeof(buf), circuit->intro_key, circuit->cpath->prev->rend_circ_nonce); if (len < 0) { reason = END_CIRC_REASON_INTERNAL; goto err; } if (relay_send_command_from_edge(0, TO_CIRCUIT(circuit), RELAY_COMMAND_ESTABLISH_INTRO, buf, len, circuit->cpath->prev)<0) { log_info(LD_GENERAL, "Couldn't send introduction request for service %s on circuit %u", serviceid, (unsigned)circuit->base_.n_circ_id); goto done; } } /* We've attempted to use this circuit */ pathbias_count_use_attempt(circuit); goto done; err: circuit_mark_for_close(TO_CIRCUIT(circuit), reason); done: memwipe(buf, 0, sizeof(buf)); memwipe(serviceid, 0, sizeof(serviceid)); return; } /** Called when we get an INTRO_ESTABLISHED cell; mark the circuit as a * live introduction point, and note that the service descriptor is * now out-of-date. */ int rend_service_intro_established(origin_circuit_t *circuit, const uint8_t *request, size_t request_len) { rend_service_t *service; rend_intro_point_t *intro; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; (void) request; (void) request_len; tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only supported one for now). */ const char *rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); if (circuit->base_.purpose != CIRCUIT_PURPOSE_S_ESTABLISH_INTRO) { log_warn(LD_PROTOCOL, "received INTRO_ESTABLISHED cell on non-intro circuit."); goto err; } service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_REND, "Unknown service on introduction circuit %u.", (unsigned)circuit->base_.n_circ_id); goto err; } /* We've just successfully established a intro circuit to one of our * introduction point, account for it. */ intro = find_intro_point(circuit); if (intro == NULL) { log_warn(LD_REND, "Introduction circuit established without a rend_intro_point_t " "object for service %s on circuit %u", safe_str_client(serviceid), (unsigned)circuit->base_.n_circ_id); goto err; } intro->circuit_established = 1; /* We might not have every introduction point ready but at this point we * know that the descriptor needs to be uploaded. */ service->desc_is_dirty = time(NULL); circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_S_INTRO); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32 + 1, rend_pk_digest, REND_SERVICE_ID_LEN); log_info(LD_REND, "Received INTRO_ESTABLISHED cell on circuit %u for service %s", (unsigned)circuit->base_.n_circ_id, serviceid); /* Getting a valid INTRODUCE_ESTABLISHED means we've successfully * used the circ */ pathbias_mark_use_success(circuit); return 0; err: circuit_mark_for_close(TO_CIRCUIT(circuit), END_CIRC_REASON_TORPROTOCOL); return -1; } /** Called once a circuit to a rendezvous point is established: sends a * RELAY_COMMAND_RENDEZVOUS1 cell. */ void rend_service_rendezvous_has_opened(origin_circuit_t *circuit) { rend_service_t *service; char buf[RELAY_PAYLOAD_SIZE]; crypt_path_t *hop; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; char hexcookie[9]; int reason; const char *rend_cookie, *rend_pk_digest; tor_assert(circuit->base_.purpose == CIRCUIT_PURPOSE_S_CONNECT_REND); tor_assert(circuit->cpath); tor_assert(circuit->build_state); assert_circ_anonymity_ok(circuit, get_options()); tor_assert(circuit->rend_data); /* XXX: This is version 2 specific (only one supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circuit->rend_data, NULL); rend_cookie = circuit->rend_data->rend_cookie; /* Declare the circuit dirty to avoid reuse, and for path-bias */ if (!circuit->base_.timestamp_dirty) circuit->base_.timestamp_dirty = time(NULL); /* This may be redundant */ pathbias_count_use_attempt(circuit); hop = circuit->build_state->service_pending_final_cpath_ref->cpath; base16_encode(hexcookie,9, rend_cookie,4); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); log_info(LD_REND, "Done building circuit %u to rendezvous with " "cookie %s for service %s", (unsigned)circuit->base_.n_circ_id, hexcookie, serviceid); circuit_log_path(LOG_INFO, LD_REND, circuit); /* Clear the 'in-progress HS circ has timed out' flag for * consistency with what happens on the client side; this line has * no effect on Tor's behaviour. */ circuit->hs_circ_has_timed_out = 0; /* If hop is NULL, another rend circ has already connected to this * rend point. Close this circ. */ if (hop == NULL) { log_info(LD_REND, "Another rend circ has already reached this rend point; " "closing this rend circ."); reason = END_CIRC_REASON_NONE; goto err; } /* Remove our final cpath element from the reference, so that no * other circuit will try to use it. Store it in * pending_final_cpath for now to ensure that it will be freed if * our rendezvous attempt fails. */ circuit->build_state->pending_final_cpath = hop; circuit->build_state->service_pending_final_cpath_ref->cpath = NULL; service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_GENERAL, "Internal error: unrecognized service ID on " "rendezvous circuit."); reason = END_CIRC_REASON_INTERNAL; goto err; } /* All we need to do is send a RELAY_RENDEZVOUS1 cell... */ memcpy(buf, rend_cookie, REND_COOKIE_LEN); if (crypto_dh_get_public(hop->rend_dh_handshake_state, buf+REND_COOKIE_LEN, DH_KEY_LEN)<0) { log_warn(LD_GENERAL,"Couldn't get DH public key."); reason = END_CIRC_REASON_INTERNAL; goto err; } memcpy(buf+REND_COOKIE_LEN+DH_KEY_LEN, hop->rend_circ_nonce, DIGEST_LEN); /* Send the cell */ if (relay_send_command_from_edge(0, TO_CIRCUIT(circuit), RELAY_COMMAND_RENDEZVOUS1, buf, REND_COOKIE_LEN+DH_KEY_LEN+DIGEST_LEN, circuit->cpath->prev)<0) { log_warn(LD_GENERAL, "Couldn't send RENDEZVOUS1 cell."); goto done; } crypto_dh_free(hop->rend_dh_handshake_state); hop->rend_dh_handshake_state = NULL; /* Append the cpath entry. */ hop->state = CPATH_STATE_OPEN; /* set the windows to default. these are the windows * that the service thinks the client has. */ hop->package_window = circuit_initial_package_window(); hop->deliver_window = CIRCWINDOW_START; onion_append_to_cpath(&circuit->cpath, hop); circuit->build_state->pending_final_cpath = NULL; /* prevent double-free */ /* Change the circuit purpose. */ circuit_change_purpose(TO_CIRCUIT(circuit), CIRCUIT_PURPOSE_S_REND_JOINED); goto done; err: circuit_mark_for_close(TO_CIRCUIT(circuit), reason); done: memwipe(buf, 0, sizeof(buf)); memwipe(serviceid, 0, sizeof(serviceid)); memwipe(hexcookie, 0, sizeof(hexcookie)); return; } /* * Manage introduction points */ /** Return the (possibly non-open) introduction circuit ending at * <b>intro</b> for the service whose public key is <b>pk_digest</b>. * (<b>desc_version</b> is ignored). Return NULL if no such service is * found. */ static origin_circuit_t * find_intro_circuit(rend_intro_point_t *intro, const char *pk_digest) { origin_circuit_t *circ = NULL; tor_assert(intro); while ((circ = circuit_get_next_by_pk_and_purpose(circ, (uint8_t *) pk_digest, CIRCUIT_PURPOSE_S_INTRO))) { if (tor_memeq(circ->build_state->chosen_exit->identity_digest, intro->extend_info->identity_digest, DIGEST_LEN) && circ->rend_data) { return circ; } } circ = NULL; while ((circ = circuit_get_next_by_pk_and_purpose(circ, (uint8_t *) pk_digest, CIRCUIT_PURPOSE_S_ESTABLISH_INTRO))) { if (tor_memeq(circ->build_state->chosen_exit->identity_digest, intro->extend_info->identity_digest, DIGEST_LEN) && circ->rend_data) { return circ; } } return NULL; } /** Return the corresponding introdution point using the circuit <b>circ</b> * found in the <b>service</b>. NULL is returned if not found. */ static rend_intro_point_t * find_expiring_intro_point(rend_service_t *service, origin_circuit_t *circ) { tor_assert(service); tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_INTRO); SMARTLIST_FOREACH(service->expiring_nodes, rend_intro_point_t *, intro_point, if (crypto_pk_eq_keys(intro_point->intro_key, circ->intro_key)) { return intro_point; }); return NULL; } /** Return a pointer to the rend_intro_point_t corresponding to the * service-side introduction circuit <b>circ</b>. */ static rend_intro_point_t * find_intro_point(origin_circuit_t *circ) { const char *serviceid; rend_service_t *service = NULL; tor_assert(TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_ESTABLISH_INTRO || TO_CIRCUIT(circ)->purpose == CIRCUIT_PURPOSE_S_INTRO); tor_assert(circ->rend_data); serviceid = rend_data_get_address(circ->rend_data); SMARTLIST_FOREACH(rend_service_list, rend_service_t *, s, if (tor_memeq(s->service_id, serviceid, REND_SERVICE_ID_LEN_BASE32)) { service = s; break; }); if (service == NULL) return NULL; SMARTLIST_FOREACH(service->intro_nodes, rend_intro_point_t *, intro_point, if (crypto_pk_eq_keys(intro_point->intro_key, circ->intro_key)) { return intro_point; }); return NULL; } /** Upload the rend_encoded_v2_service_descriptor_t's in <b>descs</b> * associated with the rend_service_descriptor_t <b>renddesc</b> to * the responsible hidden service directories OR the hidden service * directories specified by <b>hs_dirs</b>; <b>service_id</b> and * <b>seconds_valid</b> are only passed for logging purposes. */ void directory_post_to_hs_dir(rend_service_descriptor_t *renddesc, smartlist_t *descs, smartlist_t *hs_dirs, const char *service_id, int seconds_valid) { int i, j, failed_upload = 0; smartlist_t *responsible_dirs = smartlist_new(); smartlist_t *successful_uploads = smartlist_new(); routerstatus_t *hs_dir; for (i = 0; i < smartlist_len(descs); i++) { rend_encoded_v2_service_descriptor_t *desc = smartlist_get(descs, i); /** If any HSDirs are specified, they should be used instead of * the responsible directories */ if (hs_dirs && smartlist_len(hs_dirs) > 0) { smartlist_add_all(responsible_dirs, hs_dirs); } else { /* Determine responsible dirs. */ if (hid_serv_get_responsible_directories(responsible_dirs, desc->desc_id) < 0) { log_warn(LD_REND, "Could not determine the responsible hidden service " "directories to post descriptors to."); control_event_hs_descriptor_upload(service_id, "UNKNOWN", "UNKNOWN"); goto done; } } for (j = 0; j < smartlist_len(responsible_dirs); j++) { char desc_id_base32[REND_DESC_ID_V2_LEN_BASE32 + 1]; char *hs_dir_ip; const node_t *node; rend_data_t *rend_data; hs_dir = smartlist_get(responsible_dirs, j); if (smartlist_contains_digest(renddesc->successful_uploads, hs_dir->identity_digest)) /* Don't upload descriptor if we succeeded in doing so last time. */ continue; node = node_get_by_id(hs_dir->identity_digest); if (!node || !node_has_descriptor(node)) { log_info(LD_REND, "Not launching upload for for v2 descriptor to " "hidden service directory %s; we don't have its " "router descriptor. Queuing for later upload.", safe_str_client(routerstatus_describe(hs_dir))); failed_upload = -1; continue; } /* Send publish request. */ /* We need the service ID to identify which service did the upload * request. Lookup is made in rend_service_desc_has_uploaded(). */ rend_data = rend_data_client_create(service_id, desc->desc_id, NULL, REND_NO_AUTH); directory_initiate_command_routerstatus_rend(hs_dir, DIR_PURPOSE_UPLOAD_RENDDESC_V2, ROUTER_PURPOSE_GENERAL, DIRIND_ANONYMOUS, NULL, desc->desc_str, strlen(desc->desc_str), 0, rend_data, NULL); rend_data_free(rend_data); base32_encode(desc_id_base32, sizeof(desc_id_base32), desc->desc_id, DIGEST_LEN); hs_dir_ip = tor_dup_ip(hs_dir->addr); log_info(LD_REND, "Launching upload for v2 descriptor for " "service '%s' with descriptor ID '%s' with validity " "of %d seconds to hidden service directory '%s' on " "%s:%d.", safe_str_client(service_id), safe_str_client(desc_id_base32), seconds_valid, hs_dir->nickname, hs_dir_ip, hs_dir->or_port); control_event_hs_descriptor_upload(service_id, hs_dir->identity_digest, desc_id_base32); tor_free(hs_dir_ip); /* Remember successful upload to this router for next time. */ if (!smartlist_contains_digest(successful_uploads, hs_dir->identity_digest)) smartlist_add(successful_uploads, hs_dir->identity_digest); } smartlist_clear(responsible_dirs); } if (!failed_upload) { if (renddesc->successful_uploads) { SMARTLIST_FOREACH(renddesc->successful_uploads, char *, c, tor_free(c);); smartlist_free(renddesc->successful_uploads); renddesc->successful_uploads = NULL; } renddesc->all_uploads_performed = 1; } else { /* Remember which routers worked this time, so that we don't upload the * descriptor to them again. */ if (!renddesc->successful_uploads) renddesc->successful_uploads = smartlist_new(); SMARTLIST_FOREACH(successful_uploads, const char *, c, { if (!smartlist_contains_digest(renddesc->successful_uploads, c)) { char *hsdir_id = tor_memdup(c, DIGEST_LEN); smartlist_add(renddesc->successful_uploads, hsdir_id); } }); } done: smartlist_free(responsible_dirs); smartlist_free(successful_uploads); } /** Encode and sign an up-to-date service descriptor for <b>service</b>, * and upload it/them to the responsible hidden service directories. */ static void upload_service_descriptor(rend_service_t *service) { time_t now = time(NULL); int rendpostperiod; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; int uploaded = 0; rendpostperiod = get_options()->RendPostPeriod; networkstatus_t *c = networkstatus_get_latest_consensus(); if (c && smartlist_len(c->routerstatus_list) > 0) { int seconds_valid, i, j, num_descs; smartlist_t *descs = smartlist_new(); smartlist_t *client_cookies = smartlist_new(); /* Either upload a single descriptor (including replicas) or one * descriptor for each authorized client in case of authorization * type 'stealth'. */ num_descs = service->auth_type == REND_STEALTH_AUTH ? smartlist_len(service->clients) : 1; for (j = 0; j < num_descs; j++) { crypto_pk_t *client_key = NULL; rend_authorized_client_t *client = NULL; smartlist_clear(client_cookies); switch (service->auth_type) { case REND_NO_AUTH: /* Do nothing here. */ break; case REND_BASIC_AUTH: SMARTLIST_FOREACH(service->clients, rend_authorized_client_t *, cl, smartlist_add(client_cookies, cl->descriptor_cookie)); break; case REND_STEALTH_AUTH: client = smartlist_get(service->clients, j); client_key = client->client_key; smartlist_add(client_cookies, client->descriptor_cookie); break; } /* Encode the current descriptor. */ seconds_valid = rend_encode_v2_descriptors(descs, service->desc, now, 0, service->auth_type, client_key, client_cookies); if (seconds_valid < 0) { log_warn(LD_BUG, "Internal error: couldn't encode service " "descriptor; not uploading."); smartlist_free(descs); smartlist_free(client_cookies); return; } rend_get_service_id(service->desc->pk, serviceid); if (get_options()->PublishHidServDescriptors) { /* Post the current descriptors to the hidden service directories. */ log_info(LD_REND, "Launching upload for hidden service %s", serviceid); directory_post_to_hs_dir(service->desc, descs, NULL, serviceid, seconds_valid); } /* Free memory for descriptors. */ for (i = 0; i < smartlist_len(descs); i++) rend_encoded_v2_service_descriptor_free(smartlist_get(descs, i)); smartlist_clear(descs); /* Update next upload time. */ if (seconds_valid - REND_TIME_PERIOD_OVERLAPPING_V2_DESCS > rendpostperiod) service->next_upload_time = now + rendpostperiod; else if (seconds_valid < REND_TIME_PERIOD_OVERLAPPING_V2_DESCS) service->next_upload_time = now + seconds_valid + 1; else service->next_upload_time = now + seconds_valid - REND_TIME_PERIOD_OVERLAPPING_V2_DESCS + 1; /* Post also the next descriptors, if necessary. */ if (seconds_valid < REND_TIME_PERIOD_OVERLAPPING_V2_DESCS) { seconds_valid = rend_encode_v2_descriptors(descs, service->desc, now, 1, service->auth_type, client_key, client_cookies); if (seconds_valid < 0) { log_warn(LD_BUG, "Internal error: couldn't encode service " "descriptor; not uploading."); smartlist_free(descs); smartlist_free(client_cookies); return; } if (get_options()->PublishHidServDescriptors) { directory_post_to_hs_dir(service->desc, descs, NULL, serviceid, seconds_valid); } /* Free memory for descriptors. */ for (i = 0; i < smartlist_len(descs); i++) rend_encoded_v2_service_descriptor_free(smartlist_get(descs, i)); smartlist_clear(descs); } } smartlist_free(descs); smartlist_free(client_cookies); uploaded = 1; if (get_options()->PublishHidServDescriptors) { log_info(LD_REND, "Successfully uploaded v2 rend descriptors!"); } else { log_info(LD_REND, "Successfully stored created v2 rend descriptors!"); } } /* If not uploaded, try again in one minute. */ if (!uploaded) service->next_upload_time = now + 60; /* Unmark dirty flag of this service. */ service->desc_is_dirty = 0; } /** Return the number of INTRODUCE2 cells this hidden service has received * from this intro point. */ static int intro_point_accepted_intro_count(rend_intro_point_t *intro) { return intro->accepted_introduce2_count; } /** Return non-zero iff <b>intro</b> should 'expire' now (i.e. we * should stop publishing it in new descriptors and eventually close * it). */ static int intro_point_should_expire_now(rend_intro_point_t *intro, time_t now) { tor_assert(intro != NULL); if (intro->time_published == -1) { /* Don't expire an intro point if we haven't even published it yet. */ return 0; } if (intro_point_accepted_intro_count(intro) >= intro->max_introductions) { /* This intro point has been used too many times. Expire it now. */ return 1; } if (intro->time_to_expire == -1) { /* This intro point has been published, but we haven't picked an * expiration time for it. Pick one now. */ int intro_point_lifetime_seconds = crypto_rand_int_range(INTRO_POINT_LIFETIME_MIN_SECONDS, INTRO_POINT_LIFETIME_MAX_SECONDS); /* Start the expiration timer now, rather than when the intro * point was first published. There shouldn't be much of a time * difference. */ intro->time_to_expire = now + intro_point_lifetime_seconds; return 0; } /* This intro point has a time to expire set already. Use it. */ return (now >= intro->time_to_expire); } /** Iterate over intro points in the given service and remove the invalid * ones. For an intro point object to be considered invalid, the circuit * _and_ node need to have disappeared. * * If the intro point should expire, it's placed into the expiring_nodes * list of the service and removed from the active intro nodes list. * * If <b>exclude_nodes</b> is not NULL, add the valid nodes to it. * * If <b>retry_nodes</b> is not NULL, add the valid node to it if the * circuit disappeared but the node is still in the consensus. */ static void remove_invalid_intro_points(rend_service_t *service, smartlist_t *exclude_nodes, smartlist_t *retry_nodes, time_t now) { tor_assert(service); /* Remove any expired nodes that doesn't have a circuit. */ SMARTLIST_FOREACH_BEGIN(service->expiring_nodes, rend_intro_point_t *, intro) { origin_circuit_t *intro_circ = find_intro_circuit(intro, service->pk_digest); if (intro_circ) { continue; } /* No more circuit, cleanup the into point object. */ SMARTLIST_DEL_CURRENT(service->expiring_nodes, intro); rend_intro_point_free(intro); } SMARTLIST_FOREACH_END(intro); SMARTLIST_FOREACH_BEGIN(service->intro_nodes, rend_intro_point_t *, intro) { /* Find the introduction point node object. */ const node_t *node = node_get_by_id(intro->extend_info->identity_digest); /* Find the intro circuit, this might be NULL. */ origin_circuit_t *intro_circ = find_intro_circuit(intro, service->pk_digest); /* Add the valid node to the exclusion list so we don't try to establish * an introduction point to it again. */ if (node && exclude_nodes) { smartlist_add(exclude_nodes, (void*) node); } /* First, make sure we still have a valid circuit for this intro point. * If we dont, we'll give up on it and make a new one. */ if (intro_circ == NULL) { log_info(LD_REND, "Attempting to retry on %s as intro point for %s" " (circuit disappeared).", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); /* We've lost the circuit for this intro point, flag it so it can be * accounted for when considiring uploading a descriptor. */ intro->circuit_established = 0; /* Node is gone or we've reached our maximum circuit creationg retry * count, clean up everything, we'll find a new one. */ if (node == NULL || intro->circuit_retries >= MAX_INTRO_POINT_CIRCUIT_RETRIES) { rend_intro_point_free(intro); SMARTLIST_DEL_CURRENT(service->intro_nodes, intro); /* We've just killed the intro point, nothing left to do. */ continue; } /* The intro point is still alive so let's try to use it again because * we have a published descriptor containing it. Keep the intro point * in the intro_nodes list because it's still valid, we are rebuilding * a circuit to it. */ if (retry_nodes) { smartlist_add(retry_nodes, intro); } } /* else, the circuit is valid so in both cases, node being alive or not, * we leave the circuit and intro point object as is. Closing the * circuit here would leak new consensus timing and freeing the intro * point object would make the intro circuit unusable. */ /* Now, check if intro point should expire. If it does, queue it so * it can be cleaned up once it has been replaced properly. */ if (intro_point_should_expire_now(intro, now)) { log_info(LD_REND, "Expiring %s as intro point for %s.", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); smartlist_add(service->expiring_nodes, intro); SMARTLIST_DEL_CURRENT(service->intro_nodes, intro); /* Intro point is expired, we need a new one thus don't consider it * anymore has a valid established intro point. */ intro->circuit_established = 0; } } SMARTLIST_FOREACH_END(intro); } /** A new descriptor has been successfully uploaded for the given * <b>rend_data</b>. Remove and free the expiring nodes from the associated * service. */ void rend_service_desc_has_uploaded(const rend_data_t *rend_data) { rend_service_t *service; const char *onion_address; tor_assert(rend_data); onion_address = rend_data_get_address(rend_data); service = rend_service_get_by_service_id(onion_address); if (service == NULL) { return; } SMARTLIST_FOREACH_BEGIN(service->expiring_nodes, rend_intro_point_t *, intro) { origin_circuit_t *intro_circ = find_intro_circuit(intro, service->pk_digest); if (intro_circ != NULL) { circuit_mark_for_close(TO_CIRCUIT(intro_circ), END_CIRC_REASON_FINISHED); } SMARTLIST_DEL_CURRENT(service->expiring_nodes, intro); rend_intro_point_free(intro); } SMARTLIST_FOREACH_END(intro); } /** Don't try to build more than this many circuits before giving up * for a while. Dynamically calculated based on the configured number of * introduction points for the service, n_intro_points_wanted. */ static int rend_max_intro_circs_per_period(unsigned int n_intro_points_wanted) { /* Allow all but one of the initial connections to fail and be * retried. (If all fail, we *want* to wait, because something is broken.) */ tor_assert(n_intro_points_wanted <= NUM_INTRO_POINTS_MAX); return (int)(2*n_intro_points_wanted + NUM_INTRO_POINTS_EXTRA); } /** For every service, check how many intro points it currently has, and: * - Invalidate introdution points based on specific criteria, see * remove_invalid_intro_points comments. * - Pick new intro points as necessary. * - Launch circuits to any new intro points. * * This is called once a second by the main loop. */ void rend_consider_services_intro_points(void) { int i; time_t now; const or_options_t *options = get_options(); /* Are we in single onion mode? */ const int allow_direct = rend_service_allow_non_anonymous_connection( get_options()); /* List of nodes we need to _exclude_ when choosing a new node to * establish an intro point to. */ smartlist_t *exclude_nodes; /* List of nodes we need to retry to build a circuit on them because the * node is valid but circuit died. */ smartlist_t *retry_nodes; if (!have_completed_a_circuit()) return; exclude_nodes = smartlist_new(); retry_nodes = smartlist_new(); now = time(NULL); SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, service) { int r; /* Number of intro points we want to open and add to the intro nodes * list of the service. */ unsigned int n_intro_points_to_open; /* Have an unsigned len so we can use it to compare values else gcc is * not happy with unmatching signed comparaison. */ unsigned int intro_nodes_len; /* Different service are allowed to have the same introduction point as * long as they are on different circuit thus why we clear this list. */ smartlist_clear(exclude_nodes); smartlist_clear(retry_nodes); /* Cleanup the invalid intro points and save the node objects, if any, * in the exclude_nodes and retry_nodes lists. */ remove_invalid_intro_points(service, exclude_nodes, retry_nodes, now); /* This retry period is important here so we don't stress circuit * creation. */ if (now > service->intro_period_started + INTRO_CIRC_RETRY_PERIOD) { /* One period has elapsed; we can try building circuits again. */ service->intro_period_started = now; service->n_intro_circuits_launched = 0; } else if (service->n_intro_circuits_launched >= rend_max_intro_circs_per_period( service->n_intro_points_wanted)) { /* We have failed too many times in this period; wait for the next * one before we try to initiate any more connections. */ continue; } /* Let's try to rebuild circuit on the nodes we want to retry on. */ SMARTLIST_FOREACH_BEGIN(retry_nodes, rend_intro_point_t *, intro) { r = rend_service_launch_establish_intro(service, intro); if (r < 0) { log_warn(LD_REND, "Error launching circuit to node %s for service %s.", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); /* Unable to launch a circuit to that intro point, remove it from * the valid list so we can create a new one. */ smartlist_remove(service->intro_nodes, intro); rend_intro_point_free(intro); continue; } intro->circuit_retries++; } SMARTLIST_FOREACH_END(intro); /* Avoid mismatched signed comparaison below. */ intro_nodes_len = (unsigned int) smartlist_len(service->intro_nodes); /* Quiescent state, we have more or the equal amount of wanted node for * this service. Proceed to the next service. We can have more nodes * because we launch extra preemptive circuits if our intro nodes list was * originally empty for performance reasons. */ if (intro_nodes_len >= service->n_intro_points_wanted) { continue; } /* Number of intro points we want to open which is the wanted amount minus * the current amount of valid nodes. We know that this won't underflow * because of the check above. */ n_intro_points_to_open = service->n_intro_points_wanted - intro_nodes_len; if (intro_nodes_len == 0) { /* We want to end up with n_intro_points_wanted intro points, but if * we have no intro points at all (chances are they all cycled or we * are starting up), we launch NUM_INTRO_POINTS_EXTRA extra circuits * and use the first n_intro_points_wanted that complete. See proposal * #155, section 4 for the rationale of this which is purely for * performance. * * The ones after the first n_intro_points_to_open will be converted * to 'general' internal circuits in rend_service_intro_has_opened(), * and then we'll drop them from the list of intro points. */ n_intro_points_to_open += NUM_INTRO_POINTS_EXTRA; } for (i = 0; i < (int) n_intro_points_to_open; i++) { const node_t *node; rend_intro_point_t *intro; router_crn_flags_t flags = CRN_NEED_UPTIME|CRN_NEED_DESC; if (get_options()->AllowInvalid_ & ALLOW_INVALID_INTRODUCTION) flags |= CRN_ALLOW_INVALID; router_crn_flags_t direct_flags = flags; direct_flags |= CRN_PREF_ADDR; direct_flags |= CRN_DIRECT_CONN; node = router_choose_random_node(exclude_nodes, options->ExcludeNodes, allow_direct ? direct_flags : flags); /* If we are in single onion mode, retry node selection for a 3-hop * path */ if (allow_direct && !node) { log_info(LD_REND, "Unable to find an intro point that we can connect to " "directly for %s, falling back to a 3-hop path.", safe_str_client(service->service_id)); node = router_choose_random_node(exclude_nodes, options->ExcludeNodes, flags); } if (!node) { log_warn(LD_REND, "We only have %d introduction points established for %s; " "wanted %u.", smartlist_len(service->intro_nodes), safe_str_client(service->service_id), n_intro_points_to_open); break; } /* Add the choosen node to the exclusion list in order to avoid picking * it again in the next iteration. */ smartlist_add(exclude_nodes, (void*)node); intro = tor_malloc_zero(sizeof(rend_intro_point_t)); /* extend_info is for clients, so we want the multi-hop primary ORPort, * even if we are a single onion service and intend to connect to it * directly ourselves. */ intro->extend_info = extend_info_from_node(node, 0); if (BUG(intro->extend_info == NULL)) { break; } intro->intro_key = crypto_pk_new(); const int fail = crypto_pk_generate_key(intro->intro_key); tor_assert(!fail); intro->time_published = -1; intro->time_to_expire = -1; intro->max_introductions = crypto_rand_int_range(INTRO_POINT_MIN_LIFETIME_INTRODUCTIONS, INTRO_POINT_MAX_LIFETIME_INTRODUCTIONS); smartlist_add(service->intro_nodes, intro); log_info(LD_REND, "Picked router %s as an intro point for %s.", safe_str_client(node_describe(node)), safe_str_client(service->service_id)); /* Establish new introduction circuit to our chosen intro point. */ r = rend_service_launch_establish_intro(service, intro); if (r < 0) { log_warn(LD_REND, "Error launching circuit to node %s for service %s.", safe_str_client(extend_info_describe(intro->extend_info)), safe_str_client(service->service_id)); /* This funcion will be called again by the main loop so this intro * point without a intro circuit will be retried on or removed after * a maximum number of attempts. */ } } } SMARTLIST_FOREACH_END(service); smartlist_free(exclude_nodes); smartlist_free(retry_nodes); } #define MIN_REND_INITIAL_POST_DELAY (30) #define MIN_REND_INITIAL_POST_DELAY_TESTING (5) /** Regenerate and upload rendezvous service descriptors for all * services, if necessary. If the descriptor has been dirty enough * for long enough, definitely upload; else only upload when the * periodic timeout has expired. * * For the first upload, pick a random time between now and two periods * from now, and pick it independently for each service. */ void rend_consider_services_upload(time_t now) { int i; rend_service_t *service; const or_options_t *options = get_options(); int rendpostperiod = options->RendPostPeriod; int rendinitialpostdelay = (options->TestingTorNetwork ? MIN_REND_INITIAL_POST_DELAY_TESTING : MIN_REND_INITIAL_POST_DELAY); for (i=0; i < smartlist_len(rend_service_list); ++i) { service = smartlist_get(rend_service_list, i); if (!service->next_upload_time) { /* never been uploaded yet */ /* The fixed lower bound of rendinitialpostdelay seconds ensures that * the descriptor is stable before being published. See comment below. */ service->next_upload_time = now + rendinitialpostdelay + crypto_rand_int(2*rendpostperiod); /* Single Onion Services prioritise availability over hiding their * startup time, as their IP address is publicly discoverable anyway. */ if (rend_service_reveal_startup_time(options)) { service->next_upload_time = now + rendinitialpostdelay; } } /* Does every introduction points have been established? */ unsigned int intro_points_ready = count_established_intro_points(service) >= service->n_intro_points_wanted; if (intro_points_ready && (service->next_upload_time < now || (service->desc_is_dirty && service->desc_is_dirty < now-rendinitialpostdelay))) { /* if it's time, or if the directory servers have a wrong service * descriptor and ours has been stable for rendinitialpostdelay seconds, * upload a new one of each format. */ rend_service_update_descriptor(service); upload_service_descriptor(service); } } } /** True if the list of available router descriptors might have changed so * that we should have a look whether we can republish previously failed * rendezvous service descriptors. */ static int consider_republishing_rend_descriptors = 1; /** Called when our internal view of the directory has changed, so that we * might have router descriptors of hidden service directories available that * we did not have before. */ void rend_hsdir_routers_changed(void) { consider_republishing_rend_descriptors = 1; } /** Consider republication of v2 rendezvous service descriptors that failed * previously, but without regenerating descriptor contents. */ void rend_consider_descriptor_republication(void) { int i; rend_service_t *service; if (!consider_republishing_rend_descriptors) return; consider_republishing_rend_descriptors = 0; if (!get_options()->PublishHidServDescriptors) return; for (i=0; i < smartlist_len(rend_service_list); ++i) { service = smartlist_get(rend_service_list, i); if (service->desc && !service->desc->all_uploads_performed) { /* If we failed in uploading a descriptor last time, try again *without* * updating the descriptor's contents. */ upload_service_descriptor(service); } } } /** Log the status of introduction points for all rendezvous services * at log severity <b>severity</b>. */ void rend_service_dump_stats(int severity) { int i,j; rend_service_t *service; rend_intro_point_t *intro; const char *safe_name; origin_circuit_t *circ; for (i=0; i < smartlist_len(rend_service_list); ++i) { service = smartlist_get(rend_service_list, i); tor_log(severity, LD_GENERAL, "Service configured in %s:", rend_service_escaped_dir(service)); for (j=0; j < smartlist_len(service->intro_nodes); ++j) { intro = smartlist_get(service->intro_nodes, j); safe_name = safe_str_client(intro->extend_info->nickname); circ = find_intro_circuit(intro, service->pk_digest); if (!circ) { tor_log(severity, LD_GENERAL, " Intro point %d at %s: no circuit", j, safe_name); continue; } tor_log(severity, LD_GENERAL, " Intro point %d at %s: circuit is %s", j, safe_name, circuit_state_to_string(circ->base_.state)); } } } #ifdef HAVE_SYS_UN_H /** Given <b>ports</b>, a smarlist containing rend_service_port_config_t, * add the given <b>p</b>, a AF_UNIX port to the list. Return 0 on success * else return -ENOSYS if AF_UNIX is not supported (see function in the * #else statement below). */ static int add_unix_port(smartlist_t *ports, rend_service_port_config_t *p) { tor_assert(ports); tor_assert(p); tor_assert(p->is_unix_addr); smartlist_add(ports, p); return 0; } /** Given <b>conn</b> set it to use the given port <b>p</b> values. Return 0 * on success else return -ENOSYS if AF_UNIX is not supported (see function * in the #else statement below). */ static int set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p) { tor_assert(conn); tor_assert(p); tor_assert(p->is_unix_addr); conn->base_.socket_family = AF_UNIX; tor_addr_make_unspec(&conn->base_.addr); conn->base_.port = 1; conn->base_.address = tor_strdup(p->unix_addr); return 0; } #else /* defined(HAVE_SYS_UN_H) */ static int set_unix_port(edge_connection_t *conn, rend_service_port_config_t *p) { (void) conn; (void) p; return -ENOSYS; } static int add_unix_port(smartlist_t *ports, rend_service_port_config_t *p) { (void) ports; (void) p; return -ENOSYS; } #endif /* HAVE_SYS_UN_H */ /** Given <b>conn</b>, a rendezvous exit stream, look up the hidden service for * 'circ', and look up the port and address based on conn-\>port. * Assign the actual conn-\>addr and conn-\>port. Return -2 on failure * for which the circuit should be closed, -1 on other failure, * or 0 for success. */ int rend_service_set_connection_addr_port(edge_connection_t *conn, origin_circuit_t *circ) { rend_service_t *service; char serviceid[REND_SERVICE_ID_LEN_BASE32+1]; smartlist_t *matching_ports; rend_service_port_config_t *chosen_port; unsigned int warn_once = 0; const char *rend_pk_digest; tor_assert(circ->base_.purpose == CIRCUIT_PURPOSE_S_REND_JOINED); tor_assert(circ->rend_data); log_debug(LD_REND,"beginning to hunt for addr/port"); /* XXX: This is version 2 specific (only one supported). */ rend_pk_digest = (char *) rend_data_get_pk_digest(circ->rend_data, NULL); base32_encode(serviceid, REND_SERVICE_ID_LEN_BASE32+1, rend_pk_digest, REND_SERVICE_ID_LEN); service = rend_service_get_by_pk_digest(rend_pk_digest); if (!service) { log_warn(LD_REND, "Couldn't find any service associated with pk %s on " "rendezvous circuit %u; closing.", serviceid, (unsigned)circ->base_.n_circ_id); return -2; } if (service->max_streams_per_circuit > 0) { /* Enforce the streams-per-circuit limit, and refuse to provide a * mapping if this circuit will exceed the limit. */ #define MAX_STREAM_WARN_INTERVAL 600 static struct ratelim_t stream_ratelim = RATELIM_INIT(MAX_STREAM_WARN_INTERVAL); if (circ->rend_data->nr_streams >= service->max_streams_per_circuit) { log_fn_ratelim(&stream_ratelim, LOG_WARN, LD_REND, "Maximum streams per circuit limit reached on rendezvous " "circuit %u; %s. Circuit has %d out of %d streams.", (unsigned)circ->base_.n_circ_id, service->max_streams_close_circuit ? "closing circuit" : "ignoring open stream request", circ->rend_data->nr_streams, service->max_streams_per_circuit); return service->max_streams_close_circuit ? -2 : -1; } } matching_ports = smartlist_new(); SMARTLIST_FOREACH(service->ports, rend_service_port_config_t *, p, { if (conn->base_.port != p->virtual_port) { continue; } if (!(p->is_unix_addr)) { smartlist_add(matching_ports, p); } else { if (add_unix_port(matching_ports, p)) { if (!warn_once) { /* Unix port not supported so warn only once. */ log_warn(LD_REND, "Saw AF_UNIX virtual port mapping for port %d on service " "%s, which is unsupported on this platform. Ignoring it.", conn->base_.port, serviceid); } warn_once++; } } }); chosen_port = smartlist_choose(matching_ports); smartlist_free(matching_ports); if (chosen_port) { if (!(chosen_port->is_unix_addr)) { /* Get a non-AF_UNIX connection ready for connection_exit_connect() */ tor_addr_copy(&conn->base_.addr, &chosen_port->real_addr); conn->base_.port = chosen_port->real_port; } else { if (set_unix_port(conn, chosen_port)) { /* Simply impossible to end up here else we were able to add a Unix * port without AF_UNIX support... ? */ tor_assert(0); } } return 0; } log_info(LD_REND, "No virtual port mapping exists for port %d on service %s", conn->base_.port, serviceid); if (service->allow_unknown_ports) return -1; else return -2; } /* Are HiddenServiceSingleHopMode and HiddenServiceNonAnonymousMode consistent? */ static int rend_service_non_anonymous_mode_consistent(const or_options_t *options) { /* !! is used to make these options boolean */ return (!! options->HiddenServiceSingleHopMode == !! options->HiddenServiceNonAnonymousMode); } /* Do the options allow onion services to make direct (non-anonymous) * connections to introduction or rendezvous points? * Must only be called after options_validate_single_onion() has successfully * checked onion service option consistency. * Returns true if tor is in HiddenServiceSingleHopMode. */ int rend_service_allow_non_anonymous_connection(const or_options_t *options) { tor_assert(rend_service_non_anonymous_mode_consistent(options)); return options->HiddenServiceSingleHopMode ? 1 : 0; } /* Do the options allow us to reveal the exact startup time of the onion * service? * Single Onion Services prioritise availability over hiding their * startup time, as their IP address is publicly discoverable anyway. * Must only be called after options_validate_single_onion() has successfully * checked onion service option consistency. * Returns true if tor is in non-anonymous hidden service mode. */ int rend_service_reveal_startup_time(const or_options_t *options) { tor_assert(rend_service_non_anonymous_mode_consistent(options)); return rend_service_non_anonymous_mode_enabled(options); } /* Is non-anonymous mode enabled using the HiddenServiceNonAnonymousMode * config option? * Must only be called after options_validate_single_onion() has successfully * checked onion service option consistency. */ int rend_service_non_anonymous_mode_enabled(const or_options_t *options) { tor_assert(rend_service_non_anonymous_mode_consistent(options)); return options->HiddenServiceNonAnonymousMode ? 1 : 0; }
./CrossVul/dataset_final_sorted/CWE-532/c/bad_2491_1